mirror of https://github.com/encounter/nod-rs.git
Add conversion support & large refactor
This commit is contained in:
parent
374c6950b2
commit
3848edfe7b
|
@ -95,42 +95,42 @@ jobs:
|
|||
target: x86_64-unknown-linux-musl
|
||||
name: linux-x86_64
|
||||
build: zigbuild
|
||||
features: asm
|
||||
features: openssl-vendored
|
||||
- platform: ubuntu-latest
|
||||
target: i686-unknown-linux-musl
|
||||
name: linux-i686
|
||||
build: zigbuild
|
||||
features: asm
|
||||
features: openssl-vendored
|
||||
- platform: ubuntu-latest
|
||||
target: aarch64-unknown-linux-musl
|
||||
name: linux-aarch64
|
||||
build: zigbuild
|
||||
features: nightly
|
||||
features: openssl-vendored
|
||||
- platform: windows-latest
|
||||
target: i686-pc-windows-msvc
|
||||
name: windows-x86
|
||||
build: build
|
||||
features: default
|
||||
features: openssl-vendored
|
||||
- platform: windows-latest
|
||||
target: x86_64-pc-windows-msvc
|
||||
name: windows-x86_64
|
||||
build: build
|
||||
features: default
|
||||
features: openssl-vendored
|
||||
- platform: windows-latest
|
||||
target: aarch64-pc-windows-msvc
|
||||
name: windows-arm64
|
||||
build: build
|
||||
features: nightly
|
||||
features: openssl-vendored
|
||||
- platform: macos-latest
|
||||
target: x86_64-apple-darwin
|
||||
name: macos-x86_64
|
||||
build: build
|
||||
features: asm
|
||||
features: openssl
|
||||
- platform: macos-latest
|
||||
target: aarch64-apple-darwin
|
||||
name: macos-arm64
|
||||
build: build
|
||||
features: nightly
|
||||
features: openssl
|
||||
fail-fast: false
|
||||
runs-on: ${{ matrix.platform }}
|
||||
steps:
|
||||
|
|
File diff suppressed because it is too large
Load Diff
10
Cargo.toml
10
Cargo.toml
|
@ -2,6 +2,9 @@
|
|||
members = ["nod", "nodtool"]
|
||||
resolver = "2"
|
||||
|
||||
[profile.release]
|
||||
debug = 1
|
||||
|
||||
[profile.release-lto]
|
||||
inherits = "release"
|
||||
lto = "fat"
|
||||
|
@ -16,3 +19,10 @@ authors = ["Luke Street <luke@street.dev>"]
|
|||
license = "MIT OR Apache-2.0"
|
||||
repository = "https://github.com/encounter/nod"
|
||||
keywords = ["gamecube", "wii", "iso", "wbfs", "rvz"]
|
||||
|
||||
[workspace.dependencies]
|
||||
digest = { version = "0.11.0-pre.9", default-features = false }
|
||||
md-5 = { version = "0.11.0-pre.4", default-features = false }
|
||||
sha1 = { version = "0.11.0-pre.4", default-features = false }
|
||||
tracing = "0.1"
|
||||
zerocopy = { version = "0.8", features = ["alloc", "derive"] }
|
||||
|
|
|
@ -16,27 +16,42 @@ categories = ["command-line-utilities", "parser-implementations"]
|
|||
|
||||
[features]
|
||||
default = ["compress-bzip2", "compress-lzma", "compress-zlib", "compress-zstd"]
|
||||
asm = ["sha1/asm"]
|
||||
compress-bzip2 = ["bzip2"]
|
||||
compress-lzma = ["liblzma"]
|
||||
compress-lzma = ["liblzma", "liblzma-sys"]
|
||||
compress-zlib = ["adler", "miniz_oxide"]
|
||||
compress-zstd = ["zstd"]
|
||||
compress-zstd = ["zstd", "zstd-safe"]
|
||||
openssl = ["dep:openssl"]
|
||||
openssl-vendored = ["openssl", "openssl/vendored"]
|
||||
|
||||
[dependencies]
|
||||
adler = { version = "1.0", optional = true }
|
||||
aes = "0.8"
|
||||
aes = "0.9.0-pre.2"
|
||||
base16ct = "0.2"
|
||||
bit-set = "0.8"
|
||||
bytes = "1.8"
|
||||
bzip2 = { version = "0.4", features = ["static"], optional = true }
|
||||
cbc = "0.1"
|
||||
digest = "0.10"
|
||||
cbc = "0.2.0-pre.2"
|
||||
crc32fast = "1.4"
|
||||
crossbeam-channel = "0.5"
|
||||
crossbeam-utils = "0.8"
|
||||
digest = { workspace = true }
|
||||
dyn-clone = "1.0"
|
||||
encoding_rs = "0.8"
|
||||
itertools = "0.13"
|
||||
liblzma = { version = "0.3", features = ["static"], optional = true }
|
||||
log = "0.4"
|
||||
liblzma-sys = { version = "0.3", features = ["static"], optional = true }
|
||||
lru = "0.12"
|
||||
md-5 = { workspace = true }
|
||||
memmap2 = "0.9"
|
||||
miniz_oxide = { version = "0.8", optional = true }
|
||||
openssl = { version = "0.10", optional = true }
|
||||
rand = "0.8"
|
||||
rayon = "1.10"
|
||||
sha1 = "0.10"
|
||||
thiserror = "1.0"
|
||||
zerocopy = { version = "0.8", features = ["alloc", "derive"] }
|
||||
zstd = { version = "0.13", optional = true }
|
||||
sha1 = { workspace = true }
|
||||
simple_moving_average = "1.0"
|
||||
thiserror = "2.0"
|
||||
tracing = { workspace = true }
|
||||
xxhash-rust = { version = "0.8", features = ["xxh64"] }
|
||||
zerocopy = { workspace = true }
|
||||
zstd = { version = "0.13", optional = true, default-features = false }
|
||||
zstd-safe = { version = "7.2", optional = true, default-features = false }
|
||||
|
|
|
@ -0,0 +1,827 @@
|
|||
#![allow(missing_docs)] // TODO
|
||||
use std::{
|
||||
io,
|
||||
io::{Read, Seek, Write},
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use tracing::debug;
|
||||
use zerocopy::{FromZeros, IntoBytes};
|
||||
|
||||
use crate::{
|
||||
disc::{
|
||||
fst::{Fst, FstBuilder},
|
||||
DiscHeader, PartitionHeader, BI2_SIZE, BOOT_SIZE, GCN_MAGIC, MINI_DVD_SIZE, SECTOR_SIZE,
|
||||
WII_MAGIC,
|
||||
},
|
||||
read::DiscStream,
|
||||
util::{align_up_64, array_ref, array_ref_mut, lfg::LaggedFibonacci},
|
||||
Error, Result, ResultContext,
|
||||
};
|
||||
|
||||
pub trait FileCallback: Clone + Send + Sync {
|
||||
fn read_file(&mut self, out: &mut [u8], name: &str, offset: u64) -> io::Result<()>;
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct FileInfo {
|
||||
pub name: String,
|
||||
pub size: u64,
|
||||
pub offset: Option<u64>,
|
||||
pub alignment: Option<u32>,
|
||||
}
|
||||
|
||||
pub struct GCPartitionBuilder {
|
||||
disc_header: Box<DiscHeader>,
|
||||
partition_header: Box<PartitionHeader>,
|
||||
user_files: Vec<FileInfo>,
|
||||
overrides: PartitionOverrides,
|
||||
junk_files: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum WriteKind {
|
||||
File(String),
|
||||
Static(Arc<[u8]>, &'static str),
|
||||
Junk,
|
||||
}
|
||||
|
||||
impl WriteKind {
|
||||
fn name(&self) -> &str {
|
||||
match self {
|
||||
WriteKind::File(name) => name,
|
||||
WriteKind::Static(_, name) => name,
|
||||
WriteKind::Junk => "[junk data]",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct WriteInfo {
|
||||
pub kind: WriteKind,
|
||||
pub size: u64,
|
||||
pub offset: u64,
|
||||
}
|
||||
|
||||
pub struct GCPartitionWriter {
|
||||
write_info: Vec<WriteInfo>,
|
||||
disc_size: u64,
|
||||
disc_id: [u8; 4],
|
||||
disc_num: u8,
|
||||
}
|
||||
|
||||
const BI2_OFFSET: u64 = BOOT_SIZE as u64;
|
||||
const APPLOADER_OFFSET: u64 = BI2_OFFSET + BI2_SIZE as u64;
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct PartitionOverrides {
|
||||
pub game_id: Option<[u8; 6]>,
|
||||
pub game_title: Option<String>,
|
||||
pub disc_num: Option<u8>,
|
||||
pub disc_version: Option<u8>,
|
||||
pub audio_streaming: Option<bool>,
|
||||
pub audio_stream_buf_size: Option<u8>,
|
||||
pub junk_id: Option<[u8; 4]>,
|
||||
pub region: Option<u8>,
|
||||
}
|
||||
|
||||
impl GCPartitionBuilder {
|
||||
pub fn new(is_wii: bool, overrides: PartitionOverrides) -> Self {
|
||||
let mut disc_header = DiscHeader::new_box_zeroed().unwrap();
|
||||
if is_wii {
|
||||
disc_header.gcn_magic = [0u8; 4];
|
||||
disc_header.wii_magic = WII_MAGIC;
|
||||
} else {
|
||||
disc_header.gcn_magic = GCN_MAGIC;
|
||||
disc_header.wii_magic = [0u8; 4];
|
||||
}
|
||||
Self {
|
||||
disc_header,
|
||||
partition_header: PartitionHeader::new_box_zeroed().unwrap(),
|
||||
user_files: Vec::new(),
|
||||
overrides,
|
||||
junk_files: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn set_disc_header(&mut self, disc_header: Box<DiscHeader>) {
|
||||
self.disc_header = disc_header;
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn set_partition_header(&mut self, partition_header: Box<PartitionHeader>) {
|
||||
self.partition_header = partition_header;
|
||||
}
|
||||
|
||||
pub fn add_file(&mut self, info: FileInfo) -> Result<()> {
|
||||
if let (Some(offset), Some(alignment)) = (info.offset, info.alignment) {
|
||||
if offset % alignment as u64 != 0 {
|
||||
return Err(Error::Other(format!(
|
||||
"File {} offset {:#X} is not aligned to {}",
|
||||
info.name, offset, alignment
|
||||
)));
|
||||
}
|
||||
}
|
||||
self.user_files.push(info);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// A junk file exists in the FST, but is excluded from the disc layout, so junk data will be
|
||||
/// written in its place.
|
||||
pub fn add_junk_file(&mut self, name: String) { self.junk_files.push(name); }
|
||||
|
||||
pub fn build(
|
||||
&self,
|
||||
sys_file_callback: impl FnMut(&mut dyn Write, &str) -> io::Result<()>,
|
||||
) -> Result<GCPartitionWriter> {
|
||||
let mut layout = GCPartitionLayout::new(self);
|
||||
layout.locate_sys_files(sys_file_callback)?;
|
||||
layout.apply_overrides(&self.overrides)?;
|
||||
let write_info = layout.layout_files()?;
|
||||
let disc_size = layout.partition_header.user_offset.get() as u64
|
||||
+ layout.partition_header.user_size.get() as u64;
|
||||
let junk_id = layout.junk_id();
|
||||
Ok(GCPartitionWriter::new(write_info, disc_size, junk_id, self.disc_header.disc_num))
|
||||
}
|
||||
}
|
||||
|
||||
struct GCPartitionLayout {
|
||||
disc_header: Box<DiscHeader>,
|
||||
partition_header: Box<PartitionHeader>,
|
||||
user_files: Vec<FileInfo>,
|
||||
apploader_file: Option<FileInfo>,
|
||||
dol_file: Option<FileInfo>,
|
||||
raw_fst: Option<Box<[u8]>>,
|
||||
raw_bi2: Option<Box<[u8]>>,
|
||||
junk_id: Option<[u8; 4]>,
|
||||
junk_files: Vec<String>,
|
||||
}
|
||||
|
||||
impl GCPartitionLayout {
|
||||
fn new(builder: &GCPartitionBuilder) -> Self {
|
||||
GCPartitionLayout {
|
||||
disc_header: builder.disc_header.clone(),
|
||||
partition_header: builder.partition_header.clone(),
|
||||
user_files: builder.user_files.clone(),
|
||||
apploader_file: None,
|
||||
dol_file: None,
|
||||
raw_fst: None,
|
||||
raw_bi2: None,
|
||||
junk_id: builder.overrides.junk_id,
|
||||
junk_files: builder.junk_files.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
fn locate_sys_files(
|
||||
&mut self,
|
||||
mut file_callback: impl FnMut(&mut dyn Write, &str) -> io::Result<()>,
|
||||
) -> Result<()> {
|
||||
let mut handled = vec![false; self.user_files.len()];
|
||||
|
||||
// Locate fixed offset system files
|
||||
for (info, handled) in self.user_files.iter().zip(handled.iter_mut()) {
|
||||
if info.offset == Some(0) || info.name == "sys/boot.bin" {
|
||||
let mut data = Vec::with_capacity(BOOT_SIZE);
|
||||
file_callback(&mut data, &info.name)
|
||||
.with_context(|| format!("Failed to read file {}", info.name))?;
|
||||
if data.len() != BOOT_SIZE {
|
||||
return Err(Error::Other(format!(
|
||||
"Boot file {} is {} bytes, expected {}",
|
||||
info.name,
|
||||
data.len(),
|
||||
BOOT_SIZE
|
||||
)));
|
||||
}
|
||||
self.disc_header.as_mut_bytes().copy_from_slice(&data[..size_of::<DiscHeader>()]);
|
||||
self.partition_header
|
||||
.as_mut_bytes()
|
||||
.copy_from_slice(&data[size_of::<DiscHeader>()..]);
|
||||
*handled = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
if info.offset == Some(BI2_OFFSET) || info.name == "sys/bi2.bin" {
|
||||
let mut data = Vec::with_capacity(BI2_SIZE);
|
||||
file_callback(&mut data, &info.name)
|
||||
.with_context(|| format!("Failed to read file {}", info.name))?;
|
||||
if data.len() != BI2_SIZE {
|
||||
return Err(Error::Other(format!(
|
||||
"BI2 file {} is {} bytes, expected {}",
|
||||
info.name,
|
||||
data.len(),
|
||||
BI2_SIZE
|
||||
)));
|
||||
}
|
||||
self.raw_bi2 = Some(data.into_boxed_slice());
|
||||
*handled = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
if info.offset == Some(APPLOADER_OFFSET) || info.name == "sys/apploader.img" {
|
||||
self.apploader_file = Some(info.clone());
|
||||
*handled = true;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// Locate other system files
|
||||
let is_wii = self.disc_header.is_wii();
|
||||
for (info, handled) in self.user_files.iter().zip(handled.iter_mut()) {
|
||||
let dol_offset = self.partition_header.dol_offset(is_wii);
|
||||
if (dol_offset != 0 && info.offset == Some(dol_offset)) || info.name == "sys/main.dol" {
|
||||
let mut info = info.clone();
|
||||
if info.alignment.is_none() {
|
||||
info.alignment = Some(128);
|
||||
}
|
||||
self.dol_file = Some(info);
|
||||
*handled = true; // TODO DOL in user data
|
||||
continue;
|
||||
}
|
||||
|
||||
let fst_offset = self.partition_header.fst_offset(is_wii);
|
||||
if (fst_offset != 0 && info.offset == Some(fst_offset)) || info.name == "sys/fst.bin" {
|
||||
let mut data = Vec::with_capacity(info.size as usize);
|
||||
file_callback(&mut data, &info.name)
|
||||
.with_context(|| format!("Failed to read file {}", info.name))?;
|
||||
if data.len() != info.size as usize {
|
||||
return Err(Error::Other(format!(
|
||||
"FST file {} is {} bytes, expected {}",
|
||||
info.name,
|
||||
data.len(),
|
||||
info.size
|
||||
)));
|
||||
}
|
||||
self.raw_fst = Some(data.into_boxed_slice());
|
||||
*handled = true;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// Remove handled files
|
||||
let mut iter = handled.iter();
|
||||
self.user_files.retain(|_| !iter.next().unwrap());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn apply_overrides(&mut self, overrides: &PartitionOverrides) -> Result<()> {
|
||||
if let Some(game_id) = overrides.game_id {
|
||||
self.disc_header.game_id.copy_from_slice(&game_id);
|
||||
}
|
||||
if let Some(game_title) = overrides.game_title.as_ref() {
|
||||
let max_size = self.disc_header.game_title.len() - 1; // nul terminator
|
||||
if game_title.len() > max_size {
|
||||
return Err(Error::Other(format!(
|
||||
"Game title \"{}\" is too long ({} > {})",
|
||||
game_title,
|
||||
game_title.len(),
|
||||
max_size
|
||||
)));
|
||||
}
|
||||
let len = game_title.len().min(max_size);
|
||||
self.disc_header.game_title[..len].copy_from_slice(&game_title.as_bytes()[..len]);
|
||||
}
|
||||
if let Some(disc_num) = overrides.disc_num {
|
||||
self.disc_header.disc_num = disc_num;
|
||||
}
|
||||
if let Some(disc_version) = overrides.disc_version {
|
||||
self.disc_header.disc_version = disc_version;
|
||||
}
|
||||
if let Some(audio_streaming) = overrides.audio_streaming {
|
||||
self.disc_header.audio_streaming = audio_streaming as u8;
|
||||
}
|
||||
if let Some(audio_stream_buf_size) = overrides.audio_stream_buf_size {
|
||||
self.disc_header.audio_stream_buf_size = audio_stream_buf_size;
|
||||
}
|
||||
let set_bi2 = self.raw_bi2.is_none() && overrides.region.is_some();
|
||||
let raw_bi2 = self.raw_bi2.get_or_insert_with(|| {
|
||||
<[u8]>::new_box_zeroed_with_elems(BI2_SIZE).expect("Failed to allocate BI2")
|
||||
});
|
||||
if set_bi2 {
|
||||
let region = overrides.region.unwrap_or(0xFF) as u32;
|
||||
*array_ref_mut![raw_bi2, 0x18, 4] = region.to_be_bytes();
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn can_use_orig_fst(&self) -> bool {
|
||||
if let Some(existing) = self.raw_fst.as_deref() {
|
||||
let Ok(existing_fst) = Fst::new(existing) else {
|
||||
return false;
|
||||
};
|
||||
for (_, node, path) in existing_fst.iter() {
|
||||
if node.is_dir() {
|
||||
continue;
|
||||
}
|
||||
if !self.user_files.iter().any(|info| info.name == path)
|
||||
&& !self.junk_files.contains(&path)
|
||||
{
|
||||
println!("FST file {} not found", path);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
println!("Using existing FST");
|
||||
return true;
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
fn calculate_fst_size(&self) -> Result<u64> {
|
||||
if self.can_use_orig_fst() {
|
||||
return Ok(self.raw_fst.as_deref().unwrap().len() as u64);
|
||||
}
|
||||
|
||||
let mut file_names = Vec::with_capacity(self.user_files.len());
|
||||
for info in &self.user_files {
|
||||
file_names.push(info.name.as_str());
|
||||
}
|
||||
// file_names.sort_unstable();
|
||||
let is_wii = self.disc_header.is_wii();
|
||||
let mut builder = if let Some(existing) = self.raw_fst.as_deref() {
|
||||
let existing_fst = Fst::new(existing)?;
|
||||
FstBuilder::new_with_string_table(is_wii, Vec::from(existing_fst.string_table))?
|
||||
} else {
|
||||
FstBuilder::new(is_wii)
|
||||
};
|
||||
for name in file_names {
|
||||
builder.add_file(name, 0, 0);
|
||||
}
|
||||
let size = builder.byte_size() as u64;
|
||||
// if size != self.partition_header.fst_size(is_wii) {
|
||||
// return Err(Error::Other(format!(
|
||||
// "FST size {} != {}",
|
||||
// size,
|
||||
// self.partition_header.fst_size(is_wii)
|
||||
// )));
|
||||
// }
|
||||
Ok(size)
|
||||
}
|
||||
|
||||
fn generate_fst(&mut self, write_info: &[WriteInfo]) -> Result<Arc<[u8]>> {
|
||||
if self.can_use_orig_fst() {
|
||||
let fst_data = self.raw_fst.as_ref().unwrap().clone();
|
||||
// TODO update offsets and sizes
|
||||
// let node_count = Fst::new(fst_data.as_ref())?.nodes.len();
|
||||
// let string_base = node_count * size_of::<Node>();
|
||||
// let (node_buf, string_table) = fst_data.split_at_mut(string_base);
|
||||
// let nodes = <[Node]>::mut_from_bytes(node_buf).unwrap();
|
||||
return Ok(Arc::from(fst_data));
|
||||
}
|
||||
|
||||
let files = write_info.to_vec();
|
||||
// files.sort_unstable_by(|a, b| a.name.cmp(&b.name));
|
||||
let is_wii = self.disc_header.is_wii();
|
||||
let mut builder = if let Some(existing) = self.raw_fst.as_deref() {
|
||||
let existing_fst = Fst::new(existing)?;
|
||||
FstBuilder::new_with_string_table(is_wii, Vec::from(existing_fst.string_table))?
|
||||
} else {
|
||||
FstBuilder::new(is_wii)
|
||||
};
|
||||
for info in files {
|
||||
if let WriteKind::File(name) = info.kind {
|
||||
builder.add_file(&name, info.offset, info.size as u32);
|
||||
}
|
||||
}
|
||||
let raw_fst = builder.finalize();
|
||||
if raw_fst.len() != self.partition_header.fst_size(is_wii) as usize {
|
||||
return Err(Error::Other(format!(
|
||||
"FST size mismatch: {} != {}",
|
||||
raw_fst.len(),
|
||||
self.partition_header.fst_size(is_wii)
|
||||
)));
|
||||
}
|
||||
Ok(Arc::from(raw_fst))
|
||||
}
|
||||
|
||||
fn layout_system_data(&mut self, write_info: &mut Vec<WriteInfo>) -> Result<u64> {
|
||||
let mut last_offset = 0;
|
||||
|
||||
let Some(apploader_file) = self.apploader_file.as_ref() else {
|
||||
return Err(Error::Other("Apploader not set".to_string()));
|
||||
};
|
||||
let Some(dol_file) = self.dol_file.as_ref() else {
|
||||
return Err(Error::Other("DOL not set".to_string()));
|
||||
};
|
||||
let Some(raw_bi2) = self.raw_bi2.as_ref() else {
|
||||
return Err(Error::Other("BI2 not set".to_string()));
|
||||
};
|
||||
// let Some(raw_fst) = self.raw_fst.as_ref() else {
|
||||
// return Err(Error::Other("FST not set".to_string()));
|
||||
// };
|
||||
|
||||
let mut boot = <[u8]>::new_box_zeroed_with_elems(BOOT_SIZE)?;
|
||||
boot[..size_of::<DiscHeader>()].copy_from_slice(self.disc_header.as_bytes());
|
||||
boot[size_of::<DiscHeader>()..].copy_from_slice(self.partition_header.as_bytes());
|
||||
write_info.push(WriteInfo {
|
||||
kind: WriteKind::Static(Arc::from(boot), "[BOOT]"),
|
||||
size: BOOT_SIZE as u64,
|
||||
offset: last_offset,
|
||||
});
|
||||
last_offset += BOOT_SIZE as u64;
|
||||
write_info.push(WriteInfo {
|
||||
kind: WriteKind::Static(Arc::from(raw_bi2.as_ref()), "[BI2]"),
|
||||
size: BI2_SIZE as u64,
|
||||
offset: last_offset,
|
||||
});
|
||||
last_offset += BI2_SIZE as u64;
|
||||
write_info.push(WriteInfo {
|
||||
kind: WriteKind::File(apploader_file.name.clone()),
|
||||
size: apploader_file.size,
|
||||
offset: last_offset,
|
||||
});
|
||||
last_offset += apploader_file.size;
|
||||
|
||||
// Update DOL and FST offsets if not set
|
||||
let is_wii = self.disc_header.is_wii();
|
||||
let mut dol_offset = self.partition_header.dol_offset(is_wii);
|
||||
if dol_offset == 0 {
|
||||
dol_offset = align_up_64(last_offset, dol_file.alignment.unwrap() as u64);
|
||||
self.partition_header.set_dol_offset(dol_offset, is_wii);
|
||||
}
|
||||
let mut fst_offset = self.partition_header.fst_offset(is_wii);
|
||||
if fst_offset == 0 {
|
||||
// TODO handle DOL in user data
|
||||
fst_offset = align_up_64(dol_offset + dol_file.size, 128);
|
||||
self.partition_header.set_fst_offset(fst_offset, is_wii);
|
||||
}
|
||||
let fst_size = self.calculate_fst_size()?;
|
||||
self.partition_header.set_fst_size(fst_size, is_wii);
|
||||
if self.partition_header.fst_max_size(is_wii) < fst_size {
|
||||
self.partition_header.set_fst_max_size(fst_size, is_wii);
|
||||
}
|
||||
|
||||
if dol_offset < fst_offset {
|
||||
write_info.push(WriteInfo {
|
||||
kind: WriteKind::File(dol_file.name.clone()),
|
||||
size: dol_file.size,
|
||||
offset: dol_offset,
|
||||
});
|
||||
} else {
|
||||
// DOL in user data
|
||||
}
|
||||
// write_info.push(WriteInfo {
|
||||
// kind: WriteKind::Static(Arc::from(raw_fst.as_ref()), "[FST]"),
|
||||
// size: fst_size,
|
||||
// offset: fst_offset,
|
||||
// });
|
||||
|
||||
Ok(fst_offset + fst_size)
|
||||
}
|
||||
|
||||
fn layout_files(&mut self) -> Result<Vec<WriteInfo>> {
|
||||
let mut system_write_info = Vec::new();
|
||||
let mut write_info = Vec::with_capacity(self.user_files.len());
|
||||
let mut last_offset = self.layout_system_data(&mut system_write_info)?;
|
||||
|
||||
// Layout user data
|
||||
let mut user_offset = self.partition_header.user_offset.get() as u64;
|
||||
if user_offset == 0 {
|
||||
user_offset = align_up_64(last_offset, SECTOR_SIZE as u64);
|
||||
self.partition_header.user_offset.set(user_offset as u32);
|
||||
} else if user_offset < last_offset {
|
||||
return Err(Error::Other(format!(
|
||||
"User offset {:#X} is before FST {:#X}",
|
||||
user_offset, last_offset
|
||||
)));
|
||||
}
|
||||
last_offset = user_offset;
|
||||
for info in &self.user_files {
|
||||
let offset = info
|
||||
.offset
|
||||
.unwrap_or_else(|| align_up_64(last_offset, info.alignment.unwrap_or(32) as u64));
|
||||
write_info.push(WriteInfo {
|
||||
kind: WriteKind::File(info.name.clone()),
|
||||
offset,
|
||||
size: info.size,
|
||||
});
|
||||
last_offset = offset + info.size;
|
||||
}
|
||||
|
||||
// Generate FST from only user files
|
||||
let is_wii = self.disc_header.is_wii();
|
||||
let fst_data = self.generate_fst(&write_info)?;
|
||||
let fst_size = fst_data.len() as u64;
|
||||
write_info.push(WriteInfo {
|
||||
kind: WriteKind::Static(fst_data, "[FST]"),
|
||||
size: fst_size,
|
||||
offset: self.partition_header.fst_offset(is_wii),
|
||||
});
|
||||
// Add system files to write info
|
||||
write_info.extend(system_write_info);
|
||||
// Sort files by offset
|
||||
sort_files(&mut write_info)?;
|
||||
|
||||
// Update user size if not set
|
||||
if self.partition_header.user_size.get() == 0 {
|
||||
let user_end = if self.disc_header.is_wii() {
|
||||
align_up_64(last_offset, SECTOR_SIZE as u64)
|
||||
} else {
|
||||
MINI_DVD_SIZE
|
||||
};
|
||||
self.partition_header.user_size.set((user_end - user_offset) as u32);
|
||||
}
|
||||
|
||||
// Insert junk data
|
||||
let write_info = insert_junk_data(write_info, &self.partition_header);
|
||||
|
||||
Ok(write_info)
|
||||
}
|
||||
|
||||
fn junk_id(&self) -> [u8; 4] {
|
||||
self.junk_id.unwrap_or_else(|| *array_ref![self.disc_header.game_id, 0, 4])
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn insert_junk_data(
|
||||
write_info: Vec<WriteInfo>,
|
||||
partition_header: &PartitionHeader,
|
||||
) -> Vec<WriteInfo> {
|
||||
let mut new_write_info = Vec::with_capacity(write_info.len());
|
||||
|
||||
let fst_end = partition_header.fst_offset(false) + partition_header.fst_size(false);
|
||||
let file_gap = find_file_gap(&write_info, fst_end);
|
||||
let mut last_file_end = 0;
|
||||
for info in write_info {
|
||||
if let WriteKind::File(..) | WriteKind::Static(..) = &info.kind {
|
||||
let aligned_end = gcm_align(last_file_end);
|
||||
if info.offset > aligned_end && last_file_end >= fst_end {
|
||||
// Junk data is aligned to 4 bytes with a 28 byte padding (aka `(n + 31) & !3`)
|
||||
// but a few cases don't have the 28 byte padding. Namely, the junk data after the
|
||||
// FST, and the junk data in between the inner and outer rim files. This attempts to
|
||||
// determine the correct alignment, but is not 100% accurate.
|
||||
let junk_start = if file_gap == Some(last_file_end) {
|
||||
align_up_64(last_file_end, 4)
|
||||
} else {
|
||||
aligned_end
|
||||
};
|
||||
new_write_info.push(WriteInfo {
|
||||
kind: WriteKind::Junk,
|
||||
size: info.offset - junk_start,
|
||||
offset: junk_start,
|
||||
});
|
||||
}
|
||||
last_file_end = info.offset + info.size;
|
||||
}
|
||||
new_write_info.push(info);
|
||||
}
|
||||
let aligned_end = gcm_align(last_file_end);
|
||||
let user_end =
|
||||
partition_header.user_offset.get() as u64 + partition_header.user_size.get() as u64;
|
||||
if aligned_end < user_end && aligned_end >= fst_end {
|
||||
new_write_info.push(WriteInfo {
|
||||
kind: WriteKind::Junk,
|
||||
size: user_end - aligned_end,
|
||||
offset: aligned_end,
|
||||
});
|
||||
}
|
||||
|
||||
new_write_info
|
||||
}
|
||||
|
||||
impl GCPartitionWriter {
|
||||
fn new(write_info: Vec<WriteInfo>, disc_size: u64, disc_id: [u8; 4], disc_num: u8) -> Self {
|
||||
Self { write_info, disc_size, disc_id, disc_num }
|
||||
}
|
||||
|
||||
pub fn write_to<W>(
|
||||
&self,
|
||||
out: &mut W,
|
||||
mut file_callback: impl FnMut(&mut dyn Write, &str) -> io::Result<()>,
|
||||
) -> Result<()>
|
||||
where
|
||||
W: Write + ?Sized,
|
||||
{
|
||||
let mut out = WriteCursor { inner: out, position: 0 };
|
||||
let mut lfg = LaggedFibonacci::default();
|
||||
for info in &self.write_info {
|
||||
out.write_zeroes_until(info.offset).context("Writing padding")?;
|
||||
match &info.kind {
|
||||
WriteKind::File(name) => file_callback(&mut out, name)
|
||||
.with_context(|| format!("Writing file {}", name))?,
|
||||
WriteKind::Static(data, name) => out.write_all(data).with_context(|| {
|
||||
format!("Writing static data {} ({} bytes)", name, data.len())
|
||||
})?,
|
||||
WriteKind::Junk => {
|
||||
lfg.write_sector_chunked(
|
||||
&mut out,
|
||||
info.size,
|
||||
self.disc_id,
|
||||
self.disc_num,
|
||||
info.offset,
|
||||
)
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Writing junk data at {:X} -> {:X}",
|
||||
info.offset,
|
||||
info.offset + info.size
|
||||
)
|
||||
})?;
|
||||
}
|
||||
};
|
||||
if out.position != info.offset + info.size {
|
||||
return Err(Error::Other(format!(
|
||||
"File {}: Wrote {} bytes, expected {}",
|
||||
info.kind.name(),
|
||||
out.position - info.offset,
|
||||
info.size
|
||||
)));
|
||||
}
|
||||
}
|
||||
out.write_zeroes_until(self.disc_size).context("Writing end of file")?;
|
||||
out.flush().context("Flushing output")?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn into_stream<Cb>(self, file_callback: Cb) -> Result<Box<dyn DiscStream>>
|
||||
where Cb: FileCallback + 'static {
|
||||
Ok(Box::new(GCPartitionStream::new(
|
||||
file_callback,
|
||||
Arc::from(self.write_info),
|
||||
self.disc_size,
|
||||
self.disc_id,
|
||||
self.disc_num,
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
struct WriteCursor<W> {
|
||||
inner: W,
|
||||
position: u64,
|
||||
}
|
||||
|
||||
impl<W> WriteCursor<W>
|
||||
where W: Write
|
||||
{
|
||||
fn write_zeroes_until(&mut self, until: u64) -> io::Result<()> {
|
||||
const ZEROES: [u8; 0x1000] = [0u8; 0x1000];
|
||||
let mut remaining = until.saturating_sub(self.position);
|
||||
while remaining > 0 {
|
||||
let write_len = remaining.min(ZEROES.len() as u64) as usize;
|
||||
let written = self.write(&ZEROES[..write_len])?;
|
||||
remaining -= written as u64;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<W> Write for WriteCursor<W>
|
||||
where W: Write
|
||||
{
|
||||
#[inline]
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
let len = self.inner.write(buf)?;
|
||||
self.position += len as u64;
|
||||
Ok(len)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn flush(&mut self) -> io::Result<()> { self.inner.flush() }
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct GCPartitionStream<Cb> {
|
||||
file_callback: Cb,
|
||||
pos: u64,
|
||||
write_info: Arc<[WriteInfo]>,
|
||||
size: u64,
|
||||
disc_id: [u8; 4],
|
||||
disc_num: u8,
|
||||
}
|
||||
|
||||
impl<Cb> GCPartitionStream<Cb> {
|
||||
pub fn new(
|
||||
file_callback: Cb,
|
||||
write_info: Arc<[WriteInfo]>,
|
||||
size: u64,
|
||||
disc_id: [u8; 4],
|
||||
disc_num: u8,
|
||||
) -> Self {
|
||||
Self { file_callback, pos: 0, write_info, size, disc_id, disc_num }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn set_position(&mut self, pos: u64) { self.pos = pos; }
|
||||
|
||||
#[inline]
|
||||
pub fn len(&self) -> u64 { self.size }
|
||||
}
|
||||
|
||||
impl<Cb> Read for GCPartitionStream<Cb>
|
||||
where Cb: FileCallback
|
||||
{
|
||||
fn read(&mut self, out: &mut [u8]) -> io::Result<usize> {
|
||||
if self.pos >= self.size {
|
||||
// Out of bounds
|
||||
return Ok(0);
|
||||
}
|
||||
|
||||
let end = (self.size - self.pos).min(out.len() as u64) as usize;
|
||||
let mut buf = &mut out[..end];
|
||||
let mut curr = self
|
||||
.write_info
|
||||
.binary_search_by_key(&self.pos, |i| i.offset)
|
||||
.unwrap_or_else(|idx| idx.saturating_sub(1));
|
||||
let mut pos = self.pos;
|
||||
let mut total = 0;
|
||||
while !buf.is_empty() {
|
||||
let Some(info) = self.write_info.get(curr) else {
|
||||
buf.fill(0);
|
||||
total += buf.len();
|
||||
break;
|
||||
};
|
||||
if pos > info.offset + info.size {
|
||||
curr += 1;
|
||||
continue;
|
||||
}
|
||||
let read = if pos < info.offset {
|
||||
let read = buf.len().min((info.offset - pos) as usize);
|
||||
buf[..read].fill(0);
|
||||
read
|
||||
} else {
|
||||
let read = buf.len().min((info.offset + info.size - pos) as usize);
|
||||
match &info.kind {
|
||||
WriteKind::File(name) => {
|
||||
self.file_callback.read_file(&mut buf[..read], name, pos - info.offset)?;
|
||||
}
|
||||
WriteKind::Static(data, _) => {
|
||||
let offset = (pos - info.offset) as usize;
|
||||
buf[..read].copy_from_slice(&data[offset..offset + read]);
|
||||
}
|
||||
WriteKind::Junk => {
|
||||
let mut lfg = LaggedFibonacci::default();
|
||||
lfg.fill_sector_chunked(&mut buf[..read], self.disc_id, self.disc_num, pos);
|
||||
}
|
||||
}
|
||||
curr += 1;
|
||||
read
|
||||
};
|
||||
buf = &mut buf[read..];
|
||||
pos += read as u64;
|
||||
total += read;
|
||||
}
|
||||
|
||||
Ok(total)
|
||||
}
|
||||
}
|
||||
|
||||
impl<Cb> Seek for GCPartitionStream<Cb> {
|
||||
fn seek(&mut self, pos: io::SeekFrom) -> io::Result<u64> {
|
||||
self.pos = match pos {
|
||||
io::SeekFrom::Start(pos) => pos,
|
||||
io::SeekFrom::End(v) => self.size.saturating_add_signed(v),
|
||||
io::SeekFrom::Current(v) => self.pos.saturating_add_signed(v),
|
||||
};
|
||||
Ok(self.pos)
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn gcm_align(n: u64) -> u64 { (n + 31) & !3 }
|
||||
|
||||
fn sort_files(files: &mut [WriteInfo]) -> Result<()> {
|
||||
files.sort_unstable_by_key(|info| (info.offset, info.size));
|
||||
for i in 1..files.len() {
|
||||
let prev = &files[i - 1];
|
||||
let cur = &files[i];
|
||||
if cur.offset < prev.offset + prev.size {
|
||||
let name = match &cur.kind {
|
||||
WriteKind::File(name) => name.as_str(),
|
||||
WriteKind::Static(_, name) => name,
|
||||
WriteKind::Junk => "[junk data]",
|
||||
};
|
||||
let prev_name = match &prev.kind {
|
||||
WriteKind::File(name) => name.as_str(),
|
||||
WriteKind::Static(_, name) => name,
|
||||
WriteKind::Junk => "[junk data]",
|
||||
};
|
||||
return Err(Error::Other(format!(
|
||||
"File {} ({:#X}-{:#X}) overlaps with {} ({:#X}-{:#X})",
|
||||
name,
|
||||
cur.offset,
|
||||
cur.offset + cur.size,
|
||||
prev_name,
|
||||
prev.offset,
|
||||
prev.offset + prev.size
|
||||
)));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Files can be located on the inner rim of the disc (closer to the center) or the outer rim
|
||||
/// (closer to the edge). The inner rim is slower to read, so developers often configured certain
|
||||
/// files to be located on the outer rim. This function attempts to find a gap in the file offsets
|
||||
/// between the inner and outer rim, which we need to recreate junk data properly.
|
||||
fn find_file_gap(file_infos: &[WriteInfo], fst_end: u64) -> Option<u64> {
|
||||
let mut last_offset = 0;
|
||||
for info in file_infos {
|
||||
if let WriteKind::File(..) | WriteKind::Static(..) = &info.kind {
|
||||
if last_offset > fst_end && info.offset > last_offset + SECTOR_SIZE as u64 {
|
||||
debug!("Found file gap at {:X} -> {:X}", last_offset, info.offset);
|
||||
return Some(last_offset);
|
||||
}
|
||||
last_offset = info.offset + info.size;
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
|
@ -0,0 +1,4 @@
|
|||
//! Disc image building.
|
||||
|
||||
pub mod gc;
|
||||
pub mod wii;
|
|
@ -0,0 +1 @@
|
|||
#![allow(missing_docs)] // TODO
|
|
@ -0,0 +1,325 @@
|
|||
//! Common types.
|
||||
|
||||
use std::{borrow::Cow, fmt, str::FromStr, sync::Arc};
|
||||
|
||||
use crate::{
|
||||
disc::{wii::WiiPartitionHeader, DiscHeader, PartitionHeader, SECTOR_SIZE},
|
||||
Error, Result,
|
||||
};
|
||||
|
||||
/// SHA-1 hash bytes
|
||||
pub type HashBytes = [u8; 20];
|
||||
|
||||
/// AES key bytes
|
||||
pub type KeyBytes = [u8; 16];
|
||||
|
||||
/// Magic bytes
|
||||
pub type MagicBytes = [u8; 4];
|
||||
|
||||
/// The disc file format.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
||||
pub enum Format {
|
||||
/// ISO / GCM (GameCube master disc)
|
||||
#[default]
|
||||
Iso,
|
||||
/// CISO (Compact ISO)
|
||||
Ciso,
|
||||
/// GCZ
|
||||
Gcz,
|
||||
/// NFS (Wii U VC)
|
||||
Nfs,
|
||||
/// RVZ
|
||||
Rvz,
|
||||
/// WBFS
|
||||
Wbfs,
|
||||
/// WIA
|
||||
Wia,
|
||||
/// TGC
|
||||
Tgc,
|
||||
}
|
||||
|
||||
impl Format {
|
||||
/// Returns the default block size for the disc format, if any.
|
||||
pub fn default_block_size(self) -> u32 {
|
||||
match self {
|
||||
Format::Ciso => crate::io::ciso::DEFAULT_BLOCK_SIZE,
|
||||
#[cfg(feature = "compress-zlib")]
|
||||
Format::Gcz => crate::io::gcz::DEFAULT_BLOCK_SIZE,
|
||||
Format::Rvz => crate::io::wia::RVZ_DEFAULT_CHUNK_SIZE,
|
||||
Format::Wbfs => crate::io::wbfs::DEFAULT_BLOCK_SIZE,
|
||||
Format::Wia => crate::io::wia::WIA_DEFAULT_CHUNK_SIZE,
|
||||
_ => 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the default compression algorithm for the disc format.
|
||||
pub fn default_compression(self) -> Compression {
|
||||
match self {
|
||||
#[cfg(feature = "compress-zlib")]
|
||||
Format::Gcz => crate::io::gcz::DEFAULT_COMPRESSION,
|
||||
Format::Rvz => crate::io::wia::RVZ_DEFAULT_COMPRESSION,
|
||||
Format::Wia => crate::io::wia::WIA_DEFAULT_COMPRESSION,
|
||||
_ => Compression::None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Format {
|
||||
#[inline]
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Format::Iso => write!(f, "ISO"),
|
||||
Format::Ciso => write!(f, "CISO"),
|
||||
Format::Gcz => write!(f, "GCZ"),
|
||||
Format::Nfs => write!(f, "NFS"),
|
||||
Format::Rvz => write!(f, "RVZ"),
|
||||
Format::Wbfs => write!(f, "WBFS"),
|
||||
Format::Wia => write!(f, "WIA"),
|
||||
Format::Tgc => write!(f, "TGC"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The disc file format's compression algorithm.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
||||
pub enum Compression {
|
||||
/// No compression
|
||||
#[default]
|
||||
None,
|
||||
/// BZIP2
|
||||
Bzip2(u8),
|
||||
/// Deflate (GCZ only)
|
||||
Deflate(u8),
|
||||
/// LZMA
|
||||
Lzma(u8),
|
||||
/// LZMA2
|
||||
Lzma2(u8),
|
||||
/// Zstandard
|
||||
Zstandard(i8),
|
||||
}
|
||||
|
||||
impl Compression {
|
||||
/// Validates the compression level. Sets the default level if the level is 0.
|
||||
pub fn validate_level(&mut self) -> Result<()> {
|
||||
match self {
|
||||
Compression::Bzip2(level) => {
|
||||
if *level == 0 {
|
||||
*level = 9;
|
||||
}
|
||||
if *level > 9 {
|
||||
return Err(Error::Other(format!(
|
||||
"Invalid BZIP2 compression level: {level} (expected 1-9)"
|
||||
)));
|
||||
}
|
||||
}
|
||||
Compression::Deflate(level) => {
|
||||
if *level == 0 {
|
||||
*level = 9;
|
||||
}
|
||||
if *level > 10 {
|
||||
return Err(Error::Other(format!(
|
||||
"Invalid Deflate compression level: {level} (expected 1-10)"
|
||||
)));
|
||||
}
|
||||
}
|
||||
Compression::Lzma(level) => {
|
||||
if *level == 0 {
|
||||
*level = 6;
|
||||
}
|
||||
if *level > 9 {
|
||||
return Err(Error::Other(format!(
|
||||
"Invalid LZMA compression level: {level} (expected 1-9)"
|
||||
)));
|
||||
}
|
||||
}
|
||||
Compression::Lzma2(level) => {
|
||||
if *level == 0 {
|
||||
*level = 6;
|
||||
}
|
||||
if *level > 9 {
|
||||
return Err(Error::Other(format!(
|
||||
"Invalid LZMA2 compression level: {level} (expected 1-9)"
|
||||
)));
|
||||
}
|
||||
}
|
||||
Compression::Zstandard(level) => {
|
||||
if *level == 0 {
|
||||
*level = 19;
|
||||
}
|
||||
if *level < -22 || *level > 22 {
|
||||
return Err(Error::Other(format!(
|
||||
"Invalid Zstandard compression level: {level} (expected -22 to 22)"
|
||||
)));
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Compression {
|
||||
#[inline]
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Compression::None => write!(f, "None"),
|
||||
Compression::Bzip2(level) => {
|
||||
if *level == 0 {
|
||||
write!(f, "BZIP2")
|
||||
} else {
|
||||
write!(f, "BZIP2 ({level})")
|
||||
}
|
||||
}
|
||||
Compression::Deflate(level) => {
|
||||
if *level == 0 {
|
||||
write!(f, "Deflate")
|
||||
} else {
|
||||
write!(f, "Deflate ({level})")
|
||||
}
|
||||
}
|
||||
Compression::Lzma(level) => {
|
||||
if *level == 0 {
|
||||
write!(f, "LZMA")
|
||||
} else {
|
||||
write!(f, "LZMA ({level})")
|
||||
}
|
||||
}
|
||||
Compression::Lzma2(level) => {
|
||||
if *level == 0 {
|
||||
write!(f, "LZMA2")
|
||||
} else {
|
||||
write!(f, "LZMA2 ({level})")
|
||||
}
|
||||
}
|
||||
Compression::Zstandard(level) => {
|
||||
if *level == 0 {
|
||||
write!(f, "Zstandard")
|
||||
} else {
|
||||
write!(f, "Zstandard ({level})")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for Compression {
|
||||
type Err = String;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
let (format, level) =
|
||||
if let Some((format, level_str)) = s.split_once(':').or_else(|| s.split_once('.')) {
|
||||
let level = level_str
|
||||
.parse::<i32>()
|
||||
.map_err(|_| format!("Failed to parse compression level: {level_str:?}"))?;
|
||||
(format, level)
|
||||
} else {
|
||||
(s, 0)
|
||||
};
|
||||
match format.to_ascii_lowercase().as_str() {
|
||||
"" | "none" => Ok(Compression::None),
|
||||
"bz2" | "bzip2" => Ok(Compression::Bzip2(level as u8)),
|
||||
"deflate" | "gz" | "gzip" => Ok(Compression::Deflate(level as u8)),
|
||||
"lzma" => Ok(Compression::Lzma(level as u8)),
|
||||
"lzma2" | "xz" => Ok(Compression::Lzma2(level as u8)),
|
||||
"zst" | "zstd" | "zstandard" => Ok(Compression::Zstandard(level as i8)),
|
||||
_ => Err(format!("Unknown compression type: {format:?}")),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The kind of disc partition.
|
||||
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
|
||||
pub enum PartitionKind {
|
||||
/// Data partition.
|
||||
Data,
|
||||
/// Update partition.
|
||||
Update,
|
||||
/// Channel partition.
|
||||
Channel,
|
||||
/// Other partition kind.
|
||||
Other(u32),
|
||||
}
|
||||
|
||||
impl fmt::Display for PartitionKind {
|
||||
#[inline]
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Self::Data => write!(f, "Data"),
|
||||
Self::Update => write!(f, "Update"),
|
||||
Self::Channel => write!(f, "Channel"),
|
||||
Self::Other(v) => {
|
||||
let bytes = v.to_be_bytes();
|
||||
write!(f, "Other ({:08X}, {})", v, String::from_utf8_lossy(&bytes))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PartitionKind {
|
||||
/// Returns the directory name for the partition kind.
|
||||
#[inline]
|
||||
pub fn dir_name(&self) -> Cow<str> {
|
||||
match self {
|
||||
Self::Data => Cow::Borrowed("DATA"),
|
||||
Self::Update => Cow::Borrowed("UPDATE"),
|
||||
Self::Channel => Cow::Borrowed("CHANNEL"),
|
||||
Self::Other(v) => {
|
||||
let bytes = v.to_be_bytes();
|
||||
Cow::Owned(format!("P-{}", String::from_utf8_lossy(&bytes)))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<u32> for PartitionKind {
|
||||
#[inline]
|
||||
fn from(v: u32) -> Self {
|
||||
match v {
|
||||
0 => Self::Data,
|
||||
1 => Self::Update,
|
||||
2 => Self::Channel,
|
||||
v => Self::Other(v),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Wii partition information.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct PartitionInfo {
|
||||
/// The partition index.
|
||||
pub index: usize,
|
||||
/// The kind of disc partition.
|
||||
pub kind: PartitionKind,
|
||||
/// The start sector of the partition.
|
||||
pub start_sector: u32,
|
||||
/// The start sector of the partition's (usually encrypted) data.
|
||||
pub data_start_sector: u32,
|
||||
/// The end sector of the partition's (usually encrypted) data.
|
||||
pub data_end_sector: u32,
|
||||
/// The AES key for the partition, also known as the "title key".
|
||||
pub key: KeyBytes,
|
||||
/// The Wii partition header.
|
||||
pub header: Arc<WiiPartitionHeader>,
|
||||
/// The disc header within the partition.
|
||||
pub disc_header: Arc<DiscHeader>,
|
||||
/// The partition header within the partition.
|
||||
pub partition_header: Arc<PartitionHeader>,
|
||||
/// Whether the partition data is encrypted
|
||||
pub has_encryption: bool,
|
||||
/// Whether the partition data hashes are present
|
||||
pub has_hashes: bool,
|
||||
}
|
||||
|
||||
impl PartitionInfo {
|
||||
/// Returns the size of the partition's data region in bytes.
|
||||
#[inline]
|
||||
pub fn data_size(&self) -> u64 {
|
||||
(self.data_end_sector as u64 - self.data_start_sector as u64) * SECTOR_SIZE as u64
|
||||
}
|
||||
|
||||
/// Returns whether the given sector is within the partition's data region.
|
||||
#[inline]
|
||||
pub fn data_contains_sector(&self, sector: u32) -> bool {
|
||||
sector >= self.data_start_sector && sector < self.data_end_sector
|
||||
}
|
||||
}
|
|
@ -0,0 +1,124 @@
|
|||
use std::{
|
||||
io,
|
||||
io::{BufRead, Seek, SeekFrom},
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use zerocopy::FromZeros;
|
||||
|
||||
use crate::{
|
||||
common::KeyBytes,
|
||||
disc::{wii::SECTOR_DATA_SIZE, DiscHeader, SECTOR_SIZE},
|
||||
io::block::{Block, BlockReader},
|
||||
util::impl_read_for_bufread,
|
||||
Result,
|
||||
};
|
||||
|
||||
pub enum DirectDiscReaderMode {
|
||||
Raw,
|
||||
Partition { disc_header: Arc<DiscHeader>, data_start_sector: u32, key: KeyBytes },
|
||||
}
|
||||
|
||||
/// Simplified disc reader that uses a block reader directly.
|
||||
///
|
||||
/// This is used to read disc and partition metadata before we can construct a full disc reader.
|
||||
pub struct DirectDiscReader {
|
||||
io: Box<dyn BlockReader>,
|
||||
block: Block,
|
||||
block_buf: Box<[u8]>,
|
||||
block_decrypted: bool,
|
||||
pos: u64,
|
||||
mode: DirectDiscReaderMode,
|
||||
}
|
||||
|
||||
impl DirectDiscReader {
|
||||
pub fn new(inner: Box<dyn BlockReader>) -> Result<Box<Self>> {
|
||||
let block_size = inner.block_size() as usize;
|
||||
Ok(Box::new(Self {
|
||||
io: inner,
|
||||
block: Block::default(),
|
||||
block_buf: <[u8]>::new_box_zeroed_with_elems(block_size)?,
|
||||
block_decrypted: false,
|
||||
pos: 0,
|
||||
mode: DirectDiscReaderMode::Raw,
|
||||
}))
|
||||
}
|
||||
|
||||
pub fn reset(&mut self, mode: DirectDiscReaderMode) {
|
||||
self.block = Block::default();
|
||||
self.block_decrypted = false;
|
||||
self.pos = 0;
|
||||
self.mode = mode;
|
||||
}
|
||||
|
||||
pub fn into_inner(self) -> Box<dyn BlockReader> { self.io }
|
||||
}
|
||||
|
||||
impl BufRead for DirectDiscReader {
|
||||
fn fill_buf(&mut self) -> io::Result<&[u8]> {
|
||||
match &self.mode {
|
||||
DirectDiscReaderMode::Raw => {
|
||||
// Read new block if necessary
|
||||
let sector = (self.pos / SECTOR_SIZE as u64) as u32;
|
||||
if self.block_decrypted || !self.block.contains(sector) {
|
||||
self.block = self.io.read_block(self.block_buf.as_mut(), sector)?;
|
||||
self.block_decrypted = false;
|
||||
}
|
||||
self.block.data(self.block_buf.as_ref(), self.pos)
|
||||
}
|
||||
DirectDiscReaderMode::Partition { disc_header, data_start_sector, key } => {
|
||||
let has_encryption = disc_header.has_partition_encryption();
|
||||
let has_hashes = disc_header.has_partition_hashes();
|
||||
let part_sector = if has_hashes {
|
||||
(self.pos / SECTOR_DATA_SIZE as u64) as u32
|
||||
} else {
|
||||
(self.pos / SECTOR_SIZE as u64) as u32
|
||||
};
|
||||
|
||||
// Read new block if necessary
|
||||
let abs_sector = data_start_sector + part_sector;
|
||||
if !self.block.contains(abs_sector) {
|
||||
self.block = self.io.read_block(self.block_buf.as_mut(), abs_sector)?;
|
||||
self.block_decrypted = false;
|
||||
}
|
||||
|
||||
// Allow reusing the same block from raw mode, just decrypt it if necessary
|
||||
if !self.block_decrypted {
|
||||
self.block
|
||||
.decrypt_block(self.block_buf.as_mut(), has_encryption.then_some(*key))?;
|
||||
self.block_decrypted = true;
|
||||
}
|
||||
|
||||
self.block.partition_data(
|
||||
self.block_buf.as_ref(),
|
||||
self.pos,
|
||||
*data_start_sector,
|
||||
has_hashes,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn consume(&mut self, amt: usize) { self.pos += amt as u64; }
|
||||
}
|
||||
|
||||
impl_read_for_bufread!(DirectDiscReader);
|
||||
|
||||
impl Seek for DirectDiscReader {
|
||||
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
|
||||
self.pos = match pos {
|
||||
SeekFrom::Start(v) => v,
|
||||
SeekFrom::End(_) => {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Unsupported,
|
||||
"DirectDiscReader: SeekFrom::End is not supported",
|
||||
));
|
||||
}
|
||||
SeekFrom::Current(v) => self.pos.saturating_add_signed(v),
|
||||
};
|
||||
Ok(self.pos)
|
||||
}
|
||||
|
||||
fn stream_position(&mut self) -> io::Result<u64> { Ok(self.pos) }
|
||||
}
|
|
@ -1,11 +1,15 @@
|
|||
//! Disc file system types
|
||||
//! File system table (FST) types.
|
||||
|
||||
use std::{borrow::Cow, ffi::CStr, mem::size_of};
|
||||
|
||||
use encoding_rs::SHIFT_JIS;
|
||||
use zerocopy::{big_endian::*, FromBytes, Immutable, IntoBytes, KnownLayout};
|
||||
use itertools::Itertools;
|
||||
use zerocopy::{big_endian::*, FromBytes, FromZeros, Immutable, IntoBytes, KnownLayout};
|
||||
|
||||
use crate::{static_assert, Result};
|
||||
use crate::{
|
||||
util::{array_ref, static_assert},
|
||||
Error, Result,
|
||||
};
|
||||
|
||||
/// File system node kind.
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
|
@ -25,13 +29,31 @@ pub struct Node {
|
|||
kind: u8,
|
||||
// u24 big-endian
|
||||
name_offset: [u8; 3],
|
||||
pub(crate) offset: U32,
|
||||
offset: U32,
|
||||
length: U32,
|
||||
}
|
||||
|
||||
static_assert!(size_of::<Node>() == 12);
|
||||
|
||||
impl Node {
|
||||
/// Create a new node.
|
||||
#[inline]
|
||||
pub fn new(kind: NodeKind, name_offset: u32, offset: u64, length: u32, is_wii: bool) -> Self {
|
||||
Self {
|
||||
kind: match kind {
|
||||
NodeKind::File => 0,
|
||||
NodeKind::Directory => 1,
|
||||
NodeKind::Invalid => u8::MAX,
|
||||
},
|
||||
name_offset: *array_ref![name_offset.to_be_bytes(), 1, 3],
|
||||
offset: U32::new(match kind {
|
||||
NodeKind::File if is_wii => (offset / 4) as u32,
|
||||
_ => offset as u32,
|
||||
}),
|
||||
length: U32::new(length),
|
||||
}
|
||||
}
|
||||
|
||||
/// File system node kind.
|
||||
#[inline]
|
||||
pub fn kind(&self) -> NodeKind {
|
||||
|
@ -42,6 +64,16 @@ impl Node {
|
|||
}
|
||||
}
|
||||
|
||||
/// Set the node kind.
|
||||
#[inline]
|
||||
pub fn set_kind(&mut self, kind: NodeKind) {
|
||||
self.kind = match kind {
|
||||
NodeKind::File => 0,
|
||||
NodeKind::Directory => 1,
|
||||
NodeKind::Invalid => u8::MAX,
|
||||
};
|
||||
}
|
||||
|
||||
/// Whether the node is a file.
|
||||
#[inline]
|
||||
pub fn is_file(&self) -> bool { self.kind == 0 }
|
||||
|
@ -56,6 +88,12 @@ impl Node {
|
|||
u32::from_be_bytes([0, self.name_offset[0], self.name_offset[1], self.name_offset[2]])
|
||||
}
|
||||
|
||||
/// Set the name offset of the node.
|
||||
#[inline]
|
||||
pub fn set_name_offset(&mut self, name_offset: u32) {
|
||||
self.name_offset = *array_ref![name_offset.to_be_bytes(), 1, 3];
|
||||
}
|
||||
|
||||
/// For files, this is the partition offset of the file data. (Wii: >> 2)
|
||||
///
|
||||
/// For directories, this is the parent node index in the FST.
|
||||
|
@ -68,16 +106,27 @@ impl Node {
|
|||
}
|
||||
}
|
||||
|
||||
/// Set the offset of the node. See [`Node::offset`] for details.
|
||||
#[inline]
|
||||
pub fn set_offset(&mut self, offset: u64, is_wii: bool) {
|
||||
self.offset.set(if is_wii && self.is_file() { (offset / 4) as u32 } else { offset as u32 });
|
||||
}
|
||||
|
||||
/// For files, this is the byte size of the file.
|
||||
///
|
||||
/// For directories, this is the child end index in the FST.
|
||||
///
|
||||
/// Number of child files and directories recursively is `length - offset`.
|
||||
#[inline]
|
||||
pub fn length(&self) -> u64 { self.length.get() as u64 }
|
||||
pub fn length(&self) -> u32 { self.length.get() }
|
||||
|
||||
/// Set the length of the node. See [`Node::length`] for details.
|
||||
#[inline]
|
||||
pub fn set_length(&mut self, length: u32) { self.length.set(length); }
|
||||
}
|
||||
|
||||
/// A view into the file system table (FST).
|
||||
#[derive(Clone)]
|
||||
pub struct Fst<'a> {
|
||||
/// The nodes in the FST.
|
||||
pub nodes: &'a [Node],
|
||||
|
@ -87,14 +136,13 @@ pub struct Fst<'a> {
|
|||
|
||||
impl<'a> Fst<'a> {
|
||||
/// Create a new FST view from a buffer.
|
||||
#[allow(clippy::missing_inline_in_public_items)]
|
||||
pub fn new(buf: &'a [u8]) -> Result<Self, &'static str> {
|
||||
let Ok((root_node, _)) = Node::ref_from_prefix(buf) else {
|
||||
return Err("FST root node not found");
|
||||
};
|
||||
// String table starts after the last node
|
||||
let string_base = root_node.length() * size_of::<Node>() as u64;
|
||||
if string_base >= buf.len() as u64 {
|
||||
let string_base = root_node.length() * size_of::<Node>() as u32;
|
||||
if string_base > buf.len() as u32 {
|
||||
return Err("FST string table out of bounds");
|
||||
}
|
||||
let (node_buf, string_table) = buf.split_at(string_base as usize);
|
||||
|
@ -104,10 +152,9 @@ impl<'a> Fst<'a> {
|
|||
|
||||
/// Iterate over the nodes in the FST.
|
||||
#[inline]
|
||||
pub fn iter(&self) -> FstIter { FstIter { fst: self, idx: 1 } }
|
||||
pub fn iter(&self) -> FstIter { FstIter { fst: self.clone(), idx: 1, segments: vec![] } }
|
||||
|
||||
/// Get the name of a node.
|
||||
#[allow(clippy::missing_inline_in_public_items)]
|
||||
pub fn get_name(&self, node: Node) -> Result<Cow<'a, str>, String> {
|
||||
let name_buf = self.string_table.get(node.name_offset() as usize..).ok_or_else(|| {
|
||||
format!(
|
||||
|
@ -126,7 +173,6 @@ impl<'a> Fst<'a> {
|
|||
}
|
||||
|
||||
/// Finds a particular file or directory by path.
|
||||
#[allow(clippy::missing_inline_in_public_items)]
|
||||
pub fn find(&self, path: &str) -> Option<(usize, Node)> {
|
||||
let mut split = path.trim_matches('/').split('/');
|
||||
let mut current = next_non_empty(&mut split);
|
||||
|
@ -160,23 +206,46 @@ impl<'a> Fst<'a> {
|
|||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Count the number of files in the FST.
|
||||
pub fn num_files(&self) -> usize { self.nodes.iter().filter(|n| n.is_file()).count() }
|
||||
}
|
||||
|
||||
/// Iterator over the nodes in an FST.
|
||||
///
|
||||
/// For each node, the iterator yields the node index, the node itself,
|
||||
/// and the full path to the node (separated by `/`).
|
||||
pub struct FstIter<'a> {
|
||||
fst: &'a Fst<'a>,
|
||||
fst: Fst<'a>,
|
||||
idx: usize,
|
||||
segments: Vec<(Cow<'a, str>, usize)>,
|
||||
}
|
||||
|
||||
impl<'a> Iterator for FstIter<'a> {
|
||||
type Item = (usize, Node, Result<Cow<'a, str>, String>);
|
||||
type Item = (usize, Node, String);
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
let idx = self.idx;
|
||||
let node = self.fst.nodes.get(idx).copied()?;
|
||||
let name = self.fst.get_name(node);
|
||||
let name = self.fst.get_name(node).unwrap_or("<invalid>".into());
|
||||
self.idx += 1;
|
||||
Some((idx, node, name))
|
||||
|
||||
// Remove ended path segments
|
||||
let mut new_size = 0;
|
||||
for (_, end) in self.segments.iter() {
|
||||
if *end == idx {
|
||||
break;
|
||||
}
|
||||
new_size += 1;
|
||||
}
|
||||
self.segments.truncate(new_size);
|
||||
|
||||
// Add the new path segment
|
||||
let length = node.length() as u64;
|
||||
let end = if node.is_dir() { length as usize } else { idx + 1 };
|
||||
self.segments.push((name, end));
|
||||
let path = self.segments.iter().map(|(name, _)| name.as_ref()).join("/");
|
||||
Some((idx, node, path))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -190,3 +259,117 @@ fn next_non_empty<'a>(iter: &mut impl Iterator<Item = &'a str>) -> &'a str {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A builder for creating a file system table (FST).
|
||||
pub struct FstBuilder {
|
||||
nodes: Vec<Node>,
|
||||
string_table: Vec<u8>,
|
||||
stack: Vec<(String, u32)>,
|
||||
is_wii: bool,
|
||||
}
|
||||
|
||||
impl FstBuilder {
|
||||
/// Create a new FST builder.
|
||||
pub fn new(is_wii: bool) -> Self {
|
||||
let mut builder = Self { nodes: vec![], string_table: vec![], stack: vec![], is_wii };
|
||||
builder.add_node(NodeKind::Directory, "<root>", 0, 0);
|
||||
builder
|
||||
}
|
||||
|
||||
/// Create a new FST builder with an existing string table. This allows matching the string
|
||||
/// ordering of an existing FST.
|
||||
pub fn new_with_string_table(is_wii: bool, string_table: Vec<u8>) -> Result<Self> {
|
||||
if matches!(string_table.last(), Some(n) if *n != 0) {
|
||||
return Err(Error::DiscFormat("String table must be null-terminated".to_string()));
|
||||
}
|
||||
let root_name = CStr::from_bytes_until_nul(&string_table)
|
||||
.map_err(|_| {
|
||||
Error::DiscFormat("String table root name not null-terminated".to_string())
|
||||
})?
|
||||
.to_str()
|
||||
.unwrap_or("<root>")
|
||||
.to_string();
|
||||
let mut builder = Self { nodes: vec![], string_table, stack: vec![], is_wii };
|
||||
builder.add_node(NodeKind::Directory, &root_name, 0, 0);
|
||||
Ok(builder)
|
||||
}
|
||||
|
||||
/// Add a file to the FST. All paths within a directory must be added sequentially,
|
||||
/// otherwise the output FST will be invalid.
|
||||
pub fn add_file(&mut self, path: &str, offset: u64, size: u32) {
|
||||
let components = path.split('/').collect::<Vec<_>>();
|
||||
for i in 0..components.len() - 1 {
|
||||
if matches!(self.stack.get(i), Some((name, _)) if name != components[i]) {
|
||||
// Pop directories
|
||||
while self.stack.len() > i {
|
||||
let (_, idx) = self.stack.pop().unwrap();
|
||||
let length = self.nodes.len() as u32;
|
||||
self.nodes[idx as usize].set_length(length);
|
||||
}
|
||||
}
|
||||
while i >= self.stack.len() {
|
||||
// Push a new directory node
|
||||
let component_idx = self.stack.len();
|
||||
let parent = if component_idx == 0 { 0 } else { self.stack[component_idx - 1].1 };
|
||||
let node_idx =
|
||||
self.add_node(NodeKind::Directory, components[component_idx], parent as u64, 0);
|
||||
self.stack.push((components[i].to_string(), node_idx));
|
||||
}
|
||||
}
|
||||
if components.len() == 1 {
|
||||
// Pop all directories
|
||||
while let Some((_, idx)) = self.stack.pop() {
|
||||
let length = self.nodes.len() as u32;
|
||||
self.nodes[idx as usize].set_length(length);
|
||||
}
|
||||
}
|
||||
// Add file node
|
||||
self.add_node(NodeKind::File, components.last().unwrap(), offset, size);
|
||||
}
|
||||
|
||||
/// Get the byte size of the FST.
|
||||
pub fn byte_size(&self) -> usize {
|
||||
size_of_val(self.nodes.as_slice()) + self.string_table.len()
|
||||
}
|
||||
|
||||
/// Finalize the FST and return the serialized data.
|
||||
pub fn finalize(mut self) -> Box<[u8]> {
|
||||
// Finalize directory lengths
|
||||
let node_count = self.nodes.len() as u32;
|
||||
while let Some((_, idx)) = self.stack.pop() {
|
||||
self.nodes[idx as usize].set_length(node_count);
|
||||
}
|
||||
self.nodes[0].set_length(node_count);
|
||||
|
||||
// Serialize nodes and string table
|
||||
let nodes_data = self.nodes.as_bytes();
|
||||
let string_table_data = self.string_table.as_bytes();
|
||||
let mut data =
|
||||
<[u8]>::new_box_zeroed_with_elems(nodes_data.len() + string_table_data.len()).unwrap();
|
||||
data[..nodes_data.len()].copy_from_slice(self.nodes.as_bytes());
|
||||
data[nodes_data.len()..].copy_from_slice(self.string_table.as_bytes());
|
||||
data
|
||||
}
|
||||
|
||||
fn add_node(&mut self, kind: NodeKind, name: &str, offset: u64, length: u32) -> u32 {
|
||||
let (bytes, _, _) = SHIFT_JIS.encode(name);
|
||||
// Check if the name already exists in the string table
|
||||
let mut name_offset = 0;
|
||||
while name_offset < self.string_table.len() {
|
||||
let string_buf = &self.string_table[name_offset..];
|
||||
let existing = CStr::from_bytes_until_nul(string_buf).unwrap();
|
||||
if existing.to_bytes() == bytes.as_ref() {
|
||||
break;
|
||||
}
|
||||
name_offset += existing.to_bytes_with_nul().len();
|
||||
}
|
||||
// Otherwise, add the name to the string table
|
||||
if name_offset == self.string_table.len() {
|
||||
self.string_table.extend_from_slice(bytes.as_ref());
|
||||
self.string_table.push(0);
|
||||
}
|
||||
let idx = self.nodes.len() as u32;
|
||||
self.nodes.push(Node::new(kind, name_offset as u32, offset, length, self.is_wii));
|
||||
idx
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,190 +2,144 @@ use std::{
|
|||
io,
|
||||
io::{BufRead, Read, Seek, SeekFrom},
|
||||
mem::size_of,
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use zerocopy::{FromBytes, FromZeros};
|
||||
use zerocopy::FromBytes;
|
||||
|
||||
use super::{
|
||||
ApploaderHeader, DiscHeader, DolHeader, FileStream, Node, PartitionBase, PartitionHeader,
|
||||
PartitionMeta, BI2_SIZE, BOOT_SIZE, SECTOR_SIZE,
|
||||
};
|
||||
use crate::{
|
||||
disc::streams::OwnedFileStream,
|
||||
io::block::{Block, BlockIO},
|
||||
util::read::{read_box, read_box_slice, read_vec},
|
||||
disc::{
|
||||
preloader::{Preloader, SectorGroup, SectorGroupRequest},
|
||||
ApploaderHeader, DiscHeader, DolHeader, PartitionHeader, BI2_SIZE, BOOT_SIZE,
|
||||
SECTOR_GROUP_SIZE, SECTOR_SIZE,
|
||||
},
|
||||
io::block::BlockReader,
|
||||
read::{PartitionEncryption, PartitionMeta, PartitionReader},
|
||||
util::{
|
||||
impl_read_for_bufread,
|
||||
read::{read_arc, read_arc_slice, read_vec},
|
||||
},
|
||||
Result, ResultContext,
|
||||
};
|
||||
|
||||
pub struct PartitionGC {
|
||||
io: Box<dyn BlockIO>,
|
||||
block: Block,
|
||||
block_buf: Box<[u8]>,
|
||||
block_idx: u32,
|
||||
sector_buf: Box<[u8; SECTOR_SIZE]>,
|
||||
sector: u32,
|
||||
pub struct PartitionReaderGC {
|
||||
io: Box<dyn BlockReader>,
|
||||
preloader: Arc<Preloader>,
|
||||
pos: u64,
|
||||
disc_header: Box<DiscHeader>,
|
||||
disc_size: u64,
|
||||
sector_group: Option<SectorGroup>,
|
||||
meta: Option<PartitionMeta>,
|
||||
}
|
||||
|
||||
impl Clone for PartitionGC {
|
||||
impl Clone for PartitionReaderGC {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
io: self.io.clone(),
|
||||
block: Block::default(),
|
||||
block_buf: <[u8]>::new_box_zeroed_with_elems(self.block_buf.len()).unwrap(),
|
||||
block_idx: u32::MAX,
|
||||
sector_buf: <[u8; SECTOR_SIZE]>::new_box_zeroed().unwrap(),
|
||||
sector: u32::MAX,
|
||||
preloader: self.preloader.clone(),
|
||||
pos: 0,
|
||||
disc_header: self.disc_header.clone(),
|
||||
disc_size: self.disc_size,
|
||||
sector_group: None,
|
||||
meta: self.meta.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PartitionGC {
|
||||
pub fn new(inner: Box<dyn BlockIO>, disc_header: Box<DiscHeader>) -> Result<Box<Self>> {
|
||||
let block_size = inner.block_size();
|
||||
impl PartitionReaderGC {
|
||||
pub fn new(
|
||||
inner: Box<dyn BlockReader>,
|
||||
preloader: Arc<Preloader>,
|
||||
disc_size: u64,
|
||||
) -> Result<Box<Self>> {
|
||||
Ok(Box::new(Self {
|
||||
io: inner,
|
||||
block: Block::default(),
|
||||
block_buf: <[u8]>::new_box_zeroed_with_elems(block_size as usize).unwrap(),
|
||||
block_idx: u32::MAX,
|
||||
sector_buf: <[u8; SECTOR_SIZE]>::new_box_zeroed().unwrap(),
|
||||
sector: u32::MAX,
|
||||
preloader,
|
||||
pos: 0,
|
||||
disc_header,
|
||||
disc_size,
|
||||
sector_group: None,
|
||||
meta: None,
|
||||
}))
|
||||
}
|
||||
|
||||
pub fn into_inner(self) -> Box<dyn BlockIO> { self.io }
|
||||
}
|
||||
|
||||
impl BufRead for PartitionGC {
|
||||
impl BufRead for PartitionReaderGC {
|
||||
fn fill_buf(&mut self) -> io::Result<&[u8]> {
|
||||
let sector = (self.pos / SECTOR_SIZE as u64) as u32;
|
||||
let block_idx = (sector as u64 * SECTOR_SIZE as u64 / self.block_buf.len() as u64) as u32;
|
||||
|
||||
// Read new block if necessary
|
||||
if block_idx != self.block_idx {
|
||||
self.block = self.io.read_block(self.block_buf.as_mut(), block_idx, None)?;
|
||||
self.block_idx = block_idx;
|
||||
if self.pos >= self.disc_size {
|
||||
return Ok(&[]);
|
||||
}
|
||||
|
||||
// Copy sector if necessary
|
||||
if sector != self.sector {
|
||||
self.block.copy_raw(
|
||||
self.sector_buf.as_mut(),
|
||||
self.block_buf.as_ref(),
|
||||
sector,
|
||||
&self.disc_header,
|
||||
)?;
|
||||
self.sector = sector;
|
||||
}
|
||||
let abs_sector = (self.pos / SECTOR_SIZE as u64) as u32;
|
||||
let group_idx = abs_sector / 64;
|
||||
let abs_group_sector = group_idx * 64;
|
||||
let max_groups = self.disc_size.div_ceil(SECTOR_GROUP_SIZE as u64) as u32;
|
||||
let request = SectorGroupRequest {
|
||||
group_idx,
|
||||
partition_idx: None,
|
||||
mode: PartitionEncryption::Original,
|
||||
};
|
||||
|
||||
let offset = (self.pos % SECTOR_SIZE as u64) as usize;
|
||||
Ok(&self.sector_buf[offset..])
|
||||
let sector_group = if matches!(&self.sector_group, Some(sector_group) if sector_group.request == request)
|
||||
{
|
||||
// We can improve this in Rust 2024 with `if_let_rescope`
|
||||
// https://github.com/rust-lang/rust/issues/124085
|
||||
self.sector_group.as_ref().unwrap()
|
||||
} else {
|
||||
self.sector_group.insert(self.preloader.fetch(request, max_groups)?)
|
||||
};
|
||||
|
||||
// Calculate the number of consecutive sectors in the group
|
||||
let group_sector = abs_sector - abs_group_sector;
|
||||
let consecutive_sectors = sector_group.consecutive_sectors(group_sector);
|
||||
if consecutive_sectors == 0 {
|
||||
return Ok(&[]);
|
||||
}
|
||||
let num_sectors = group_sector + consecutive_sectors;
|
||||
|
||||
// Read from sector group buffer
|
||||
let group_start = abs_group_sector as u64 * SECTOR_SIZE as u64;
|
||||
let offset = (self.pos - group_start) as usize;
|
||||
let end =
|
||||
(num_sectors as u64 * SECTOR_SIZE as u64).min(self.disc_size - group_start) as usize;
|
||||
Ok(§or_group.data[offset..end])
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn consume(&mut self, amt: usize) { self.pos += amt as u64; }
|
||||
}
|
||||
|
||||
impl Read for PartitionGC {
|
||||
#[inline]
|
||||
fn read(&mut self, out: &mut [u8]) -> io::Result<usize> {
|
||||
let buf = self.fill_buf()?;
|
||||
let len = buf.len().min(out.len());
|
||||
out[..len].copy_from_slice(&buf[..len]);
|
||||
self.consume(len);
|
||||
Ok(len)
|
||||
}
|
||||
}
|
||||
impl_read_for_bufread!(PartitionReaderGC);
|
||||
|
||||
impl Seek for PartitionGC {
|
||||
impl Seek for PartitionReaderGC {
|
||||
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
|
||||
self.pos = match pos {
|
||||
SeekFrom::Start(v) => v,
|
||||
SeekFrom::End(_) => {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Unsupported,
|
||||
"GCPartitionReader: SeekFrom::End is not supported".to_string(),
|
||||
));
|
||||
}
|
||||
SeekFrom::End(v) => self.disc_size.saturating_add_signed(v),
|
||||
SeekFrom::Current(v) => self.pos.saturating_add_signed(v),
|
||||
};
|
||||
Ok(self.pos)
|
||||
}
|
||||
|
||||
fn stream_position(&mut self) -> io::Result<u64> { Ok(self.pos) }
|
||||
}
|
||||
|
||||
impl PartitionBase for PartitionGC {
|
||||
fn meta(&mut self) -> Result<Box<PartitionMeta>> {
|
||||
self.seek(SeekFrom::Start(0)).context("Seeking to partition metadata")?;
|
||||
read_part_meta(self, false)
|
||||
}
|
||||
impl PartitionReader for PartitionReaderGC {
|
||||
fn is_wii(&self) -> bool { false }
|
||||
|
||||
fn open_file(&mut self, node: Node) -> io::Result<FileStream> {
|
||||
if !node.is_file() {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"Node is not a file".to_string(),
|
||||
));
|
||||
fn meta(&mut self) -> Result<PartitionMeta> {
|
||||
if let Some(meta) = &self.meta {
|
||||
Ok(meta.clone())
|
||||
} else {
|
||||
let meta = read_part_meta(self, false)?;
|
||||
self.meta = Some(meta.clone());
|
||||
Ok(meta)
|
||||
}
|
||||
FileStream::new(self, node.offset(false), node.length())
|
||||
}
|
||||
|
||||
fn into_open_file(self: Box<Self>, node: Node) -> io::Result<OwnedFileStream> {
|
||||
if !node.is_file() {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"Node is not a file".to_string(),
|
||||
));
|
||||
}
|
||||
OwnedFileStream::new(self, node.offset(false), node.length())
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn read_part_meta(
|
||||
reader: &mut dyn PartitionBase,
|
||||
pub(crate) fn read_dol(
|
||||
reader: &mut dyn PartitionReader,
|
||||
partition_header: &PartitionHeader,
|
||||
is_wii: bool,
|
||||
) -> Result<Box<PartitionMeta>> {
|
||||
// boot.bin
|
||||
let raw_boot: Box<[u8; BOOT_SIZE]> = read_box(reader).context("Reading boot.bin")?;
|
||||
let partition_header =
|
||||
PartitionHeader::ref_from_bytes(&raw_boot[size_of::<DiscHeader>()..]).unwrap();
|
||||
|
||||
// bi2.bin
|
||||
let raw_bi2: Box<[u8; BI2_SIZE]> = read_box(reader).context("Reading bi2.bin")?;
|
||||
|
||||
// apploader.bin
|
||||
let mut raw_apploader: Vec<u8> =
|
||||
read_vec(reader, size_of::<ApploaderHeader>()).context("Reading apploader header")?;
|
||||
let apploader_header = ApploaderHeader::ref_from_bytes(raw_apploader.as_slice()).unwrap();
|
||||
raw_apploader.resize(
|
||||
size_of::<ApploaderHeader>()
|
||||
+ apploader_header.size.get() as usize
|
||||
+ apploader_header.trailer_size.get() as usize,
|
||||
0,
|
||||
);
|
||||
reader
|
||||
.read_exact(&mut raw_apploader[size_of::<ApploaderHeader>()..])
|
||||
.context("Reading apploader")?;
|
||||
let raw_apploader = raw_apploader.into_boxed_slice();
|
||||
|
||||
// fst.bin
|
||||
reader
|
||||
.seek(SeekFrom::Start(partition_header.fst_offset(is_wii)))
|
||||
.context("Seeking to FST offset")?;
|
||||
let raw_fst: Box<[u8]> = read_box_slice(reader, partition_header.fst_size(is_wii) as usize)
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Reading partition FST (offset {}, size {})",
|
||||
partition_header.fst_offset(is_wii),
|
||||
partition_header.fst_size(is_wii)
|
||||
)
|
||||
})?;
|
||||
|
||||
// main.dol
|
||||
) -> Result<Arc<[u8]>> {
|
||||
reader
|
||||
.seek(SeekFrom::Start(partition_header.dol_offset(is_wii)))
|
||||
.context("Seeking to DOL offset")?;
|
||||
|
@ -208,9 +162,65 @@ pub(crate) fn read_part_meta(
|
|||
.unwrap_or(size_of::<DolHeader>() as u32);
|
||||
raw_dol.resize(dol_size as usize, 0);
|
||||
reader.read_exact(&mut raw_dol[size_of::<DolHeader>()..]).context("Reading DOL")?;
|
||||
let raw_dol = raw_dol.into_boxed_slice();
|
||||
Ok(Arc::from(raw_dol.as_slice()))
|
||||
}
|
||||
|
||||
Ok(Box::new(PartitionMeta {
|
||||
pub(crate) fn read_fst<R>(
|
||||
reader: &mut R,
|
||||
partition_header: &PartitionHeader,
|
||||
is_wii: bool,
|
||||
) -> Result<Arc<[u8]>>
|
||||
where
|
||||
R: Read + Seek + ?Sized,
|
||||
{
|
||||
reader
|
||||
.seek(SeekFrom::Start(partition_header.fst_offset(is_wii)))
|
||||
.context("Seeking to FST offset")?;
|
||||
let raw_fst: Arc<[u8]> = read_arc_slice(reader, partition_header.fst_size(is_wii) as usize)
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Reading partition FST (offset {}, size {})",
|
||||
partition_header.fst_offset(is_wii),
|
||||
partition_header.fst_size(is_wii)
|
||||
)
|
||||
})?;
|
||||
Ok(raw_fst)
|
||||
}
|
||||
|
||||
pub(crate) fn read_part_meta(
|
||||
reader: &mut dyn PartitionReader,
|
||||
is_wii: bool,
|
||||
) -> Result<PartitionMeta> {
|
||||
// boot.bin
|
||||
let raw_boot: Arc<[u8; BOOT_SIZE]> = read_arc(reader).context("Reading boot.bin")?;
|
||||
let partition_header =
|
||||
PartitionHeader::ref_from_bytes(&raw_boot[size_of::<DiscHeader>()..]).unwrap();
|
||||
|
||||
// bi2.bin
|
||||
let raw_bi2: Arc<[u8; BI2_SIZE]> = read_arc(reader).context("Reading bi2.bin")?;
|
||||
|
||||
// apploader.bin
|
||||
let mut raw_apploader: Vec<u8> =
|
||||
read_vec(reader, size_of::<ApploaderHeader>()).context("Reading apploader header")?;
|
||||
let apploader_header = ApploaderHeader::ref_from_bytes(raw_apploader.as_slice()).unwrap();
|
||||
raw_apploader.resize(
|
||||
size_of::<ApploaderHeader>()
|
||||
+ apploader_header.size.get() as usize
|
||||
+ apploader_header.trailer_size.get() as usize,
|
||||
0,
|
||||
);
|
||||
reader
|
||||
.read_exact(&mut raw_apploader[size_of::<ApploaderHeader>()..])
|
||||
.context("Reading apploader")?;
|
||||
let raw_apploader = Arc::from(raw_apploader.as_slice());
|
||||
|
||||
// fst.bin
|
||||
let raw_fst = read_fst(reader, partition_header, is_wii)?;
|
||||
|
||||
// main.dol
|
||||
let raw_dol = read_dol(reader, partition_header, is_wii)?;
|
||||
|
||||
Ok(PartitionMeta {
|
||||
raw_boot,
|
||||
raw_bi2,
|
||||
raw_apploader,
|
||||
|
@ -220,5 +230,5 @@ pub(crate) fn read_part_meta(
|
|||
raw_tmd: None,
|
||||
raw_cert_chain: None,
|
||||
raw_h3_table: None,
|
||||
}))
|
||||
})
|
||||
}
|
||||
|
|
|
@ -1,202 +1,92 @@
|
|||
use std::{
|
||||
io::{Read, Seek, SeekFrom},
|
||||
sync::{Arc, Mutex},
|
||||
time::Instant,
|
||||
};
|
||||
|
||||
use rayon::iter::{IntoParallelIterator, ParallelIterator};
|
||||
use sha1::{Digest, Sha1};
|
||||
use zerocopy::FromZeros;
|
||||
use tracing::instrument;
|
||||
use zerocopy::{FromZeros, IntoBytes};
|
||||
|
||||
use crate::{
|
||||
array_ref, array_ref_mut,
|
||||
common::HashBytes,
|
||||
disc::{
|
||||
reader::DiscReader,
|
||||
wii::{HASHES_SIZE, SECTOR_DATA_SIZE},
|
||||
SECTOR_GROUP_SIZE, SECTOR_SIZE,
|
||||
},
|
||||
io::HashBytes,
|
||||
util::read::read_box_slice,
|
||||
PartitionOptions, Result, ResultContext, SECTOR_SIZE,
|
||||
util::{array_ref, array_ref_mut},
|
||||
};
|
||||
|
||||
/// In a sector, following the 0x400 byte block of hashes, each 0x400 bytes of decrypted data is
|
||||
/// hashed, yielding 31 H0 hashes.
|
||||
/// Then, 8 sectors are aggregated into a subgroup, and the 31 H0 hashes for each sector are hashed,
|
||||
/// yielding 8 H1 hashes.
|
||||
/// Then, 8 subgroups are aggregated into a group, and the 8 H1 hashes for each subgroup are hashed,
|
||||
/// yielding 8 H2 hashes.
|
||||
/// Finally, the 8 H2 hashes for each group are hashed, yielding 1 H3 hash.
|
||||
/// The H3 hashes for each group are stored in the partition's H3 table.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct HashTable {
|
||||
/// SHA-1 hash of each 0x400 byte block of decrypted data.
|
||||
pub h0_hashes: Box<[HashBytes]>,
|
||||
/// SHA-1 hash of the 31 H0 hashes for each sector.
|
||||
pub h1_hashes: Box<[HashBytes]>,
|
||||
/// SHA-1 hash of the 8 H1 hashes for each subgroup.
|
||||
pub h2_hashes: Box<[HashBytes]>,
|
||||
/// SHA-1 hash of the 8 H2 hashes for each group.
|
||||
pub h3_hashes: Box<[HashBytes]>,
|
||||
}
|
||||
|
||||
/// Hashes for a single sector group (64 sectors).
|
||||
#[derive(Clone, FromZeros)]
|
||||
struct HashResult {
|
||||
h0_hashes: [HashBytes; 1984],
|
||||
h1_hashes: [HashBytes; 64],
|
||||
h2_hashes: [HashBytes; 8],
|
||||
h3_hash: HashBytes,
|
||||
pub struct GroupHashes {
|
||||
pub h3_hash: HashBytes,
|
||||
pub h2_hashes: [HashBytes; 8],
|
||||
pub h1_hashes: [HashBytes; 64],
|
||||
pub h0_hashes: [HashBytes; 1984],
|
||||
}
|
||||
|
||||
impl HashTable {
|
||||
fn new(num_sectors: u32) -> Self {
|
||||
let num_sectors = num_sectors.next_multiple_of(64) as usize;
|
||||
let num_data_hashes = num_sectors * 31;
|
||||
let num_subgroups = num_sectors / 8;
|
||||
let num_groups = num_subgroups / 8;
|
||||
Self {
|
||||
h0_hashes: <[HashBytes]>::new_box_zeroed_with_elems(num_data_hashes).unwrap(),
|
||||
h1_hashes: <[HashBytes]>::new_box_zeroed_with_elems(num_sectors).unwrap(),
|
||||
h2_hashes: <[HashBytes]>::new_box_zeroed_with_elems(num_subgroups).unwrap(),
|
||||
h3_hashes: <[HashBytes]>::new_box_zeroed_with_elems(num_groups).unwrap(),
|
||||
}
|
||||
impl GroupHashes {
|
||||
#[inline]
|
||||
pub fn hashes_for_sector(
|
||||
&self,
|
||||
sector: usize,
|
||||
) -> (&[HashBytes; 31], &[HashBytes; 8], &[HashBytes; 8]) {
|
||||
let h1_hashes = array_ref![self.h1_hashes, sector & !7, 8];
|
||||
let h0_hashes = array_ref![self.h0_hashes, sector * 31, 31];
|
||||
(h0_hashes, h1_hashes, &self.h2_hashes)
|
||||
}
|
||||
|
||||
fn extend(&mut self, group_index: usize, result: &HashResult) {
|
||||
*array_ref_mut![self.h0_hashes, group_index * 1984, 1984] = result.h0_hashes;
|
||||
*array_ref_mut![self.h1_hashes, group_index * 64, 64] = result.h1_hashes;
|
||||
*array_ref_mut![self.h2_hashes, group_index * 8, 8] = result.h2_hashes;
|
||||
self.h3_hashes[group_index] = result.h3_hash;
|
||||
#[inline]
|
||||
pub fn apply(&self, sector_data: &mut [u8; SECTOR_SIZE], sector: usize) {
|
||||
let (h0_hashes, h1_hashes, h2_hashes) = self.hashes_for_sector(sector);
|
||||
array_ref_mut![sector_data, 0, 0x26C].copy_from_slice(h0_hashes.as_bytes());
|
||||
array_ref_mut![sector_data, 0x280, 0xA0].copy_from_slice(h1_hashes.as_bytes());
|
||||
array_ref_mut![sector_data, 0x340, 0xA0].copy_from_slice(h2_hashes.as_bytes());
|
||||
}
|
||||
}
|
||||
|
||||
pub fn rebuild_hashes(reader: &mut DiscReader) -> Result<()> {
|
||||
const NUM_H0_HASHES: usize = SECTOR_DATA_SIZE / HASHES_SIZE;
|
||||
pub const NUM_H0_HASHES: usize = SECTOR_DATA_SIZE / HASHES_SIZE;
|
||||
|
||||
log::info!(
|
||||
"Rebuilding hashes for Wii partition data (using {} threads)",
|
||||
rayon::current_num_threads()
|
||||
);
|
||||
|
||||
let start = Instant::now();
|
||||
|
||||
// Precompute hashes for zeroed sectors.
|
||||
const ZERO_H0_BYTES: &[u8] = &[0u8; HASHES_SIZE];
|
||||
let zero_h0_hash = sha1_hash(ZERO_H0_BYTES);
|
||||
|
||||
let partitions = reader.partitions();
|
||||
let mut hash_tables = Vec::with_capacity(partitions.len());
|
||||
for part in partitions {
|
||||
let part_sectors = part.data_end_sector - part.data_start_sector;
|
||||
let hash_table = HashTable::new(part_sectors);
|
||||
log::debug!(
|
||||
"Rebuilding hashes: {} sectors, {} subgroups, {} groups",
|
||||
hash_table.h1_hashes.len(),
|
||||
hash_table.h2_hashes.len(),
|
||||
hash_table.h3_hashes.len()
|
||||
);
|
||||
|
||||
let group_count = hash_table.h3_hashes.len();
|
||||
let mutex = Arc::new(Mutex::new(hash_table));
|
||||
let partition_options = PartitionOptions { validate_hashes: false };
|
||||
(0..group_count).into_par_iter().try_for_each_with(
|
||||
(reader.open_partition(part.index, &partition_options)?, mutex.clone()),
|
||||
|(stream, mutex), h3_index| -> Result<()> {
|
||||
let mut result = HashResult::new_box_zeroed()?;
|
||||
let mut data_buf = <[u8]>::new_box_zeroed_with_elems(SECTOR_DATA_SIZE)?;
|
||||
let mut h3_hasher = Sha1::new();
|
||||
for h2_index in 0..8 {
|
||||
let mut h2_hasher = Sha1::new();
|
||||
for h1_index in 0..8 {
|
||||
let sector = h1_index + h2_index * 8;
|
||||
let part_sector = sector as u32 + h3_index as u32 * 64;
|
||||
let mut h1_hasher = Sha1::new();
|
||||
if part_sector >= part_sectors {
|
||||
for h0_index in 0..NUM_H0_HASHES {
|
||||
result.h0_hashes[h0_index + sector * 31] = zero_h0_hash;
|
||||
h1_hasher.update(zero_h0_hash);
|
||||
}
|
||||
} else {
|
||||
stream
|
||||
.seek(SeekFrom::Start(part_sector as u64 * SECTOR_DATA_SIZE as u64))
|
||||
.with_context(|| format!("Seeking to sector {}", part_sector))?;
|
||||
stream
|
||||
.read_exact(&mut data_buf)
|
||||
.with_context(|| format!("Reading sector {}", part_sector))?;
|
||||
for h0_index in 0..NUM_H0_HASHES {
|
||||
let h0_hash = sha1_hash(array_ref![
|
||||
data_buf,
|
||||
h0_index * HASHES_SIZE,
|
||||
HASHES_SIZE
|
||||
]);
|
||||
result.h0_hashes[h0_index + sector * 31] = h0_hash;
|
||||
h1_hasher.update(h0_hash);
|
||||
}
|
||||
};
|
||||
let h1_hash = h1_hasher.finalize().into();
|
||||
result.h1_hashes[sector] = h1_hash;
|
||||
h2_hasher.update(h1_hash);
|
||||
}
|
||||
let h2_hash = h2_hasher.finalize().into();
|
||||
result.h2_hashes[h2_index] = h2_hash;
|
||||
h3_hasher.update(h2_hash);
|
||||
#[instrument(skip_all)]
|
||||
pub fn hash_sector_group(sector_group: &[u8; SECTOR_GROUP_SIZE]) -> Box<GroupHashes> {
|
||||
let mut result = GroupHashes::new_box_zeroed().unwrap();
|
||||
for (h2_index, h2_hash) in result.h2_hashes.iter_mut().enumerate() {
|
||||
let out_h1_hashes = array_ref_mut![result.h1_hashes, h2_index * 8, 8];
|
||||
for (h1_index, h1_hash) in out_h1_hashes.iter_mut().enumerate() {
|
||||
let sector = h1_index + h2_index * 8;
|
||||
let out_h0_hashes =
|
||||
array_ref_mut![result.h0_hashes, sector * NUM_H0_HASHES, NUM_H0_HASHES];
|
||||
if array_ref![sector_group, sector * SECTOR_SIZE, 20].iter().any(|&v| v != 0) {
|
||||
// Hash block already present, use it
|
||||
out_h0_hashes.as_mut_bytes().copy_from_slice(array_ref![
|
||||
sector_group,
|
||||
sector * SECTOR_SIZE,
|
||||
0x26C
|
||||
]);
|
||||
} else {
|
||||
for (h0_index, h0_hash) in out_h0_hashes.iter_mut().enumerate() {
|
||||
*h0_hash = sha1_hash(array_ref![
|
||||
sector_group,
|
||||
sector * SECTOR_SIZE + HASHES_SIZE + h0_index * HASHES_SIZE,
|
||||
HASHES_SIZE
|
||||
]);
|
||||
}
|
||||
result.h3_hash = h3_hasher.finalize().into();
|
||||
let mut hash_table = mutex.lock().map_err(|_| "Failed to lock mutex")?;
|
||||
hash_table.extend(h3_index, &result);
|
||||
Ok(())
|
||||
},
|
||||
)?;
|
||||
|
||||
let hash_table = Arc::try_unwrap(mutex)
|
||||
.map_err(|_| "Failed to unwrap Arc")?
|
||||
.into_inner()
|
||||
.map_err(|_| "Failed to lock mutex")?;
|
||||
hash_tables.push(hash_table);
|
||||
}
|
||||
|
||||
// Verify against H3 table
|
||||
for (part, hash_table) in reader.partitions.clone().iter().zip(hash_tables.iter()) {
|
||||
log::debug!(
|
||||
"Verifying H3 table for partition {} (count {})",
|
||||
part.index,
|
||||
hash_table.h3_hashes.len()
|
||||
);
|
||||
reader
|
||||
.seek(SeekFrom::Start(
|
||||
part.start_sector as u64 * SECTOR_SIZE as u64 + part.header.h3_table_off(),
|
||||
))
|
||||
.context("Seeking to H3 table")?;
|
||||
let h3_table: Box<[HashBytes]> =
|
||||
read_box_slice(reader, hash_table.h3_hashes.len()).context("Reading H3 table")?;
|
||||
let mut mismatches = 0;
|
||||
for (idx, (expected_hash, h3_hash)) in
|
||||
h3_table.iter().zip(hash_table.h3_hashes.iter()).enumerate()
|
||||
{
|
||||
if expected_hash != h3_hash {
|
||||
let mut got_bytes = [0u8; 40];
|
||||
let got = base16ct::lower::encode_str(h3_hash, &mut got_bytes).unwrap();
|
||||
let mut expected_bytes = [0u8; 40];
|
||||
let expected =
|
||||
base16ct::lower::encode_str(expected_hash, &mut expected_bytes).unwrap();
|
||||
log::debug!(
|
||||
"Partition {} H3 table does not match:\n\tindex {}\n\texpected: {}\n\tgot: {}",
|
||||
part.index, idx, expected, got
|
||||
);
|
||||
mismatches += 1;
|
||||
}
|
||||
*h1_hash = sha1_hash(out_h0_hashes.as_bytes());
|
||||
}
|
||||
if mismatches > 0 {
|
||||
log::warn!("Partition {} H3 table has {} hash mismatches", part.index, mismatches);
|
||||
}
|
||||
*h2_hash = sha1_hash(out_h1_hashes.as_bytes());
|
||||
}
|
||||
|
||||
for (part, hash_table) in reader.partitions.iter_mut().zip(hash_tables) {
|
||||
part.hash_table = Some(hash_table);
|
||||
}
|
||||
log::info!("Rebuilt hashes in {:?}", start.elapsed());
|
||||
Ok(())
|
||||
result.h3_hash = sha1_hash(result.h2_hashes.as_bytes());
|
||||
result
|
||||
}
|
||||
|
||||
/// Hashes a byte slice with SHA-1.
|
||||
#[inline]
|
||||
pub fn sha1_hash(buf: &[u8]) -> HashBytes { HashBytes::from(Sha1::digest(buf)) }
|
||||
#[instrument(skip_all)]
|
||||
pub fn sha1_hash(buf: &[u8]) -> HashBytes {
|
||||
#[cfg(feature = "openssl")]
|
||||
{
|
||||
// The one-shot openssl::sha::sha1 ends up being much slower
|
||||
let mut hasher = openssl::sha::Sha1::new();
|
||||
hasher.update(buf);
|
||||
hasher.finish()
|
||||
}
|
||||
#[cfg(not(feature = "openssl"))]
|
||||
{
|
||||
use sha1::Digest;
|
||||
HashBytes::from(sha1::Sha1::digest(buf))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,40 +1,54 @@
|
|||
//! Disc type related logic (GameCube, Wii)
|
||||
//! GameCube/Wii disc format types.
|
||||
|
||||
use std::{
|
||||
borrow::Cow,
|
||||
ffi::CStr,
|
||||
fmt::{Debug, Display, Formatter},
|
||||
io,
|
||||
io::{BufRead, Seek},
|
||||
mem::size_of,
|
||||
str::from_utf8,
|
||||
};
|
||||
use std::{ffi::CStr, str::from_utf8};
|
||||
|
||||
use dyn_clone::DynClone;
|
||||
use zerocopy::{big_endian::*, FromBytes, Immutable, IntoBytes, KnownLayout};
|
||||
|
||||
use crate::{io::MagicBytes, static_assert, Result};
|
||||
use crate::{common::MagicBytes, util::static_assert};
|
||||
|
||||
pub(crate) mod fst;
|
||||
pub(crate) mod direct;
|
||||
pub mod fst;
|
||||
pub(crate) mod gcn;
|
||||
pub(crate) mod hashes;
|
||||
pub(crate) mod preloader;
|
||||
pub(crate) mod reader;
|
||||
pub(crate) mod streams;
|
||||
pub(crate) mod wii;
|
||||
|
||||
pub use fst::{Fst, Node, NodeKind};
|
||||
pub use streams::{FileStream, OwnedFileStream, WindowedStream};
|
||||
pub use wii::{ContentMetadata, SignedHeader, Ticket, TicketLimit, TmdHeader, REGION_SIZE};
|
||||
pub mod wii;
|
||||
pub(crate) mod writer;
|
||||
|
||||
/// Size in bytes of a disc sector. (32 KiB)
|
||||
pub const SECTOR_SIZE: usize = 0x8000;
|
||||
|
||||
/// Size in bytes of a Wii partition sector group. (32 KiB * 64, 2 MiB)
|
||||
pub const SECTOR_GROUP_SIZE: usize = SECTOR_SIZE * 64;
|
||||
|
||||
/// Magic bytes for Wii discs. Located at offset 0x18.
|
||||
pub const WII_MAGIC: MagicBytes = [0x5D, 0x1C, 0x9E, 0xA3];
|
||||
|
||||
/// Magic bytes for GameCube discs. Located at offset 0x1C.
|
||||
pub const GCN_MAGIC: MagicBytes = [0xC2, 0x33, 0x9F, 0x3D];
|
||||
|
||||
/// Size in bytes of the disc header and partition header. (boot.bin)
|
||||
pub const BOOT_SIZE: usize = size_of::<DiscHeader>() + size_of::<PartitionHeader>();
|
||||
|
||||
/// Size in bytes of the debug and region information. (bi2.bin)
|
||||
pub const BI2_SIZE: usize = 0x2000;
|
||||
|
||||
/// The size of a single-layer MiniDVD. (1.4 GB)
|
||||
///
|
||||
/// GameCube games and some third-party Wii discs (Datel) use this format.
|
||||
pub const MINI_DVD_SIZE: u64 = 1_459_978_240;
|
||||
|
||||
/// The size of a single-layer DVD. (4.7 GB)
|
||||
///
|
||||
/// The vast majority of Wii games use this format.
|
||||
pub const SL_DVD_SIZE: u64 = 4_699_979_776;
|
||||
|
||||
/// The size of a dual-layer DVD. (8.5 GB)
|
||||
///
|
||||
/// A few larger Wii games use this format.
|
||||
/// (Super Smash Bros. Brawl, Metroid Prime Trilogy, etc.)
|
||||
pub const DL_DVD_SIZE: u64 = 8_511_160_320;
|
||||
|
||||
/// Shared GameCube & Wii disc header.
|
||||
///
|
||||
/// This header is always at the start of the disc image and within each Wii partition.
|
||||
|
@ -53,7 +67,7 @@ pub struct DiscHeader {
|
|||
pub audio_stream_buf_size: u8,
|
||||
/// Padding
|
||||
_pad1: [u8; 14],
|
||||
/// If this is a Wii disc, this will be 0x5D1C9EA3
|
||||
/// If this is a Wii disc, this will bPartitionKinde 0x5D1C9EA3
|
||||
pub wii_magic: MagicBytes,
|
||||
/// If this is a GameCube disc, this will be 0xC2339F3D
|
||||
pub gcn_magic: MagicBytes,
|
||||
|
@ -112,7 +126,7 @@ pub struct PartitionHeader {
|
|||
pub debug_mon_offset: U32,
|
||||
/// Debug monitor load address
|
||||
pub debug_load_address: U32,
|
||||
/// Padding
|
||||
/// PaddingPartitionKind
|
||||
_pad1: [u8; 0x18],
|
||||
/// Offset to main DOL (Wii: >> 2)
|
||||
pub dol_offset: U32,
|
||||
|
@ -145,6 +159,16 @@ impl PartitionHeader {
|
|||
}
|
||||
}
|
||||
|
||||
/// Set the offset within the partition to the main DOL.
|
||||
#[inline]
|
||||
pub fn set_dol_offset(&mut self, offset: u64, is_wii: bool) {
|
||||
if is_wii {
|
||||
self.dol_offset.set((offset / 4) as u32);
|
||||
} else {
|
||||
self.dol_offset.set(offset as u32);
|
||||
}
|
||||
}
|
||||
|
||||
/// Offset within the partition to the file system table (FST).
|
||||
#[inline]
|
||||
pub fn fst_offset(&self, is_wii: bool) -> u64 {
|
||||
|
@ -155,6 +179,16 @@ impl PartitionHeader {
|
|||
}
|
||||
}
|
||||
|
||||
/// Set the offset within the partition to the file system table (FST).
|
||||
#[inline]
|
||||
pub fn set_fst_offset(&mut self, offset: u64, is_wii: bool) {
|
||||
if is_wii {
|
||||
self.fst_offset.set((offset / 4) as u32);
|
||||
} else {
|
||||
self.fst_offset.set(offset as u32);
|
||||
}
|
||||
}
|
||||
|
||||
/// Size of the file system table (FST).
|
||||
#[inline]
|
||||
pub fn fst_size(&self, is_wii: bool) -> u64 {
|
||||
|
@ -165,6 +199,16 @@ impl PartitionHeader {
|
|||
}
|
||||
}
|
||||
|
||||
/// Set the size of the file system table (FST).
|
||||
#[inline]
|
||||
pub fn set_fst_size(&mut self, size: u64, is_wii: bool) {
|
||||
if is_wii {
|
||||
self.fst_size.set((size / 4) as u32);
|
||||
} else {
|
||||
self.fst_size.set(size as u32);
|
||||
}
|
||||
}
|
||||
|
||||
/// Maximum size of the file system table (FST) across multi-disc games.
|
||||
#[inline]
|
||||
pub fn fst_max_size(&self, is_wii: bool) -> u64 {
|
||||
|
@ -174,6 +218,16 @@ impl PartitionHeader {
|
|||
self.fst_max_size.get() as u64
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the maximum size of the file system table (FST) across multi-disc games.
|
||||
#[inline]
|
||||
pub fn set_fst_max_size(&mut self, size: u64, is_wii: bool) {
|
||||
if is_wii {
|
||||
self.fst_max_size.set((size / 4) as u32);
|
||||
} else {
|
||||
self.fst_max_size.set(size as u32);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Apploader header.
|
||||
|
@ -231,225 +285,3 @@ pub struct DolHeader {
|
|||
}
|
||||
|
||||
static_assert!(size_of::<DolHeader>() == 0x100);
|
||||
|
||||
/// The kind of disc partition.
|
||||
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
|
||||
pub enum PartitionKind {
|
||||
/// Data partition.
|
||||
Data,
|
||||
/// Update partition.
|
||||
Update,
|
||||
/// Channel partition.
|
||||
Channel,
|
||||
/// Other partition kind.
|
||||
Other(u32),
|
||||
}
|
||||
|
||||
impl Display for PartitionKind {
|
||||
#[inline]
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Self::Data => write!(f, "Data"),
|
||||
Self::Update => write!(f, "Update"),
|
||||
Self::Channel => write!(f, "Channel"),
|
||||
Self::Other(v) => {
|
||||
let bytes = v.to_be_bytes();
|
||||
write!(f, "Other ({:08X}, {})", v, String::from_utf8_lossy(&bytes))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PartitionKind {
|
||||
/// Returns the directory name for the partition kind.
|
||||
#[inline]
|
||||
pub fn dir_name(&self) -> Cow<str> {
|
||||
match self {
|
||||
Self::Data => Cow::Borrowed("DATA"),
|
||||
Self::Update => Cow::Borrowed("UPDATE"),
|
||||
Self::Channel => Cow::Borrowed("CHANNEL"),
|
||||
Self::Other(v) => {
|
||||
let bytes = v.to_be_bytes();
|
||||
Cow::Owned(format!("P-{}", String::from_utf8_lossy(&bytes)))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<u32> for PartitionKind {
|
||||
#[inline]
|
||||
fn from(v: u32) -> Self {
|
||||
match v {
|
||||
0 => Self::Data,
|
||||
1 => Self::Update,
|
||||
2 => Self::Channel,
|
||||
v => Self::Other(v),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// An open disc partition.
|
||||
pub trait PartitionBase: DynClone + BufRead + Seek + Send + Sync {
|
||||
/// Reads the partition header and file system table.
|
||||
fn meta(&mut self) -> Result<Box<PartitionMeta>>;
|
||||
|
||||
/// Seeks the partition stream to the specified file system node
|
||||
/// and returns a windowed stream.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// Basic usage:
|
||||
/// ```no_run
|
||||
/// use std::io::Read;
|
||||
///
|
||||
/// use nod::{Disc, PartitionKind};
|
||||
///
|
||||
/// fn main() -> nod::Result<()> {
|
||||
/// let disc = Disc::new("path/to/file.iso")?;
|
||||
/// let mut partition = disc.open_partition_kind(PartitionKind::Data)?;
|
||||
/// let meta = partition.meta()?;
|
||||
/// let fst = meta.fst()?;
|
||||
/// if let Some((_, node)) = fst.find("/MP3/Worlds.txt") {
|
||||
/// let mut s = String::new();
|
||||
/// partition
|
||||
/// .open_file(node)
|
||||
/// .expect("Failed to open file stream")
|
||||
/// .read_to_string(&mut s)
|
||||
/// .expect("Failed to read file");
|
||||
/// println!("{}", s);
|
||||
/// }
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// ```
|
||||
fn open_file(&mut self, node: Node) -> io::Result<FileStream>;
|
||||
|
||||
/// Consumes the partition instance and returns a windowed stream.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```no_run
|
||||
/// use std::io::Read;
|
||||
///
|
||||
/// use nod::{Disc, PartitionKind, OwnedFileStream};
|
||||
///
|
||||
/// fn main() -> nod::Result<()> {
|
||||
/// let disc = Disc::new("path/to/file.iso")?;
|
||||
/// let mut partition = disc.open_partition_kind(PartitionKind::Data)?;
|
||||
/// let meta = partition.meta()?;
|
||||
/// let fst = meta.fst()?;
|
||||
/// if let Some((_, node)) = fst.find("/disc.tgc") {
|
||||
/// let file: OwnedFileStream = partition
|
||||
/// .clone() // Clone the Box<dyn PartitionBase>
|
||||
/// .into_open_file(node) // Get an OwnedFileStream
|
||||
/// .expect("Failed to open file stream");
|
||||
/// // Open the inner disc image using the owned stream
|
||||
/// let inner_disc = Disc::new_stream(Box::new(file))
|
||||
/// .expect("Failed to open inner disc");
|
||||
/// // ...
|
||||
/// }
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// ```
|
||||
fn into_open_file(self: Box<Self>, node: Node) -> io::Result<OwnedFileStream>;
|
||||
}
|
||||
|
||||
dyn_clone::clone_trait_object!(PartitionBase);
|
||||
|
||||
/// Size of the disc header and partition header (boot.bin)
|
||||
pub const BOOT_SIZE: usize = size_of::<DiscHeader>() + size_of::<PartitionHeader>();
|
||||
/// Size of the debug and region information (bi2.bin)
|
||||
pub const BI2_SIZE: usize = 0x2000;
|
||||
|
||||
/// Extra disc partition data. (DOL, FST, etc.)
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct PartitionMeta {
|
||||
/// Disc and partition header (boot.bin)
|
||||
pub raw_boot: Box<[u8; BOOT_SIZE]>,
|
||||
/// Debug and region information (bi2.bin)
|
||||
pub raw_bi2: Box<[u8; BI2_SIZE]>,
|
||||
/// Apploader (apploader.bin)
|
||||
pub raw_apploader: Box<[u8]>,
|
||||
/// Main binary (main.dol)
|
||||
pub raw_dol: Box<[u8]>,
|
||||
/// File system table (fst.bin)
|
||||
pub raw_fst: Box<[u8]>,
|
||||
/// Ticket (ticket.bin, Wii only)
|
||||
pub raw_ticket: Option<Box<[u8]>>,
|
||||
/// TMD (tmd.bin, Wii only)
|
||||
pub raw_tmd: Option<Box<[u8]>>,
|
||||
/// Certificate chain (cert.bin, Wii only)
|
||||
pub raw_cert_chain: Option<Box<[u8]>>,
|
||||
/// H3 hash table (h3.bin, Wii only)
|
||||
pub raw_h3_table: Option<Box<[u8]>>,
|
||||
}
|
||||
|
||||
impl PartitionMeta {
|
||||
/// A view into the disc header.
|
||||
#[inline]
|
||||
pub fn header(&self) -> &DiscHeader {
|
||||
DiscHeader::ref_from_bytes(&self.raw_boot[..size_of::<DiscHeader>()])
|
||||
.expect("Invalid header alignment")
|
||||
}
|
||||
|
||||
/// A view into the partition header.
|
||||
#[inline]
|
||||
pub fn partition_header(&self) -> &PartitionHeader {
|
||||
PartitionHeader::ref_from_bytes(&self.raw_boot[size_of::<DiscHeader>()..])
|
||||
.expect("Invalid partition header alignment")
|
||||
}
|
||||
|
||||
/// A view into the apploader header.
|
||||
#[inline]
|
||||
pub fn apploader_header(&self) -> &ApploaderHeader {
|
||||
ApploaderHeader::ref_from_prefix(&self.raw_apploader)
|
||||
.expect("Invalid apploader alignment")
|
||||
.0
|
||||
}
|
||||
|
||||
/// A view into the file system table (FST).
|
||||
#[inline]
|
||||
pub fn fst(&self) -> Result<Fst, &'static str> { Fst::new(&self.raw_fst) }
|
||||
|
||||
/// A view into the DOL header.
|
||||
#[inline]
|
||||
pub fn dol_header(&self) -> &DolHeader {
|
||||
DolHeader::ref_from_prefix(&self.raw_dol).expect("Invalid DOL alignment").0
|
||||
}
|
||||
|
||||
/// A view into the ticket. (Wii only)
|
||||
#[inline]
|
||||
pub fn ticket(&self) -> Option<&Ticket> {
|
||||
let raw_ticket = self.raw_ticket.as_deref()?;
|
||||
Some(Ticket::ref_from_bytes(raw_ticket).expect("Invalid ticket alignment"))
|
||||
}
|
||||
|
||||
/// A view into the TMD. (Wii only)
|
||||
#[inline]
|
||||
pub fn tmd_header(&self) -> Option<&TmdHeader> {
|
||||
let raw_tmd = self.raw_tmd.as_deref()?;
|
||||
Some(TmdHeader::ref_from_prefix(raw_tmd).expect("Invalid TMD alignment").0)
|
||||
}
|
||||
|
||||
/// A view into the TMD content metadata. (Wii only)
|
||||
#[inline]
|
||||
pub fn content_metadata(&self) -> Option<&[ContentMetadata]> {
|
||||
let raw_cmd = &self.raw_tmd.as_deref()?[size_of::<TmdHeader>()..];
|
||||
Some(<[ContentMetadata]>::ref_from_bytes(raw_cmd).expect("Invalid CMD alignment"))
|
||||
}
|
||||
}
|
||||
|
||||
/// The size of a single-layer MiniDVD. (1.4 GB)
|
||||
///
|
||||
/// GameCube games and some third-party Wii discs (Datel) use this format.
|
||||
pub const MINI_DVD_SIZE: u64 = 1_459_978_240;
|
||||
|
||||
/// The size of a single-layer DVD. (4.7 GB)
|
||||
///
|
||||
/// The vast majority of Wii games use this format.
|
||||
pub const SL_DVD_SIZE: u64 = 4_699_979_776;
|
||||
|
||||
/// The size of a dual-layer DVD. (8.5 GB)
|
||||
///
|
||||
/// A few larger Wii games use this format.
|
||||
/// (Super Smash Bros. Brawl, Metroid Prime Trilogy, etc.)
|
||||
pub const DL_DVD_SIZE: u64 = 8_511_160_320;
|
||||
|
|
|
@ -0,0 +1,558 @@
|
|||
use std::{
|
||||
collections::HashMap,
|
||||
io,
|
||||
num::NonZeroUsize,
|
||||
sync::{Arc, Mutex},
|
||||
thread::JoinHandle,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use crossbeam_channel::{Receiver, Sender};
|
||||
use crossbeam_utils::sync::WaitGroup;
|
||||
use lru::LruCache;
|
||||
use simple_moving_average::{SingleSumSMA, SMA};
|
||||
use tracing::{debug, error, instrument, span, Level};
|
||||
use zerocopy::FromZeros;
|
||||
|
||||
use crate::{
|
||||
common::PartitionInfo,
|
||||
disc::{
|
||||
hashes::hash_sector_group, wii::HASHES_SIZE, DiscHeader, SECTOR_GROUP_SIZE, SECTOR_SIZE,
|
||||
},
|
||||
io::{
|
||||
block::{Block, BlockKind, BlockReader},
|
||||
wia::WIAException,
|
||||
},
|
||||
read::PartitionEncryption,
|
||||
util::{
|
||||
aes::{decrypt_sector, encrypt_sector},
|
||||
array_ref_mut,
|
||||
},
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
||||
pub struct SectorGroupRequest {
|
||||
pub group_idx: u32,
|
||||
pub partition_idx: Option<u8>,
|
||||
pub mode: PartitionEncryption,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct SectorGroup {
|
||||
pub request: SectorGroupRequest,
|
||||
pub data: Bytes,
|
||||
pub sector_bitmap: u64,
|
||||
pub io_duration: Option<Duration>,
|
||||
}
|
||||
|
||||
impl SectorGroup {
|
||||
/// Calculate the number of consecutive sectors starting from `start`.
|
||||
#[inline]
|
||||
pub fn consecutive_sectors(&self, start: u32) -> u32 {
|
||||
(self.sector_bitmap >> start).trailing_ones()
|
||||
}
|
||||
}
|
||||
|
||||
pub type SectorGroupResult = io::Result<SectorGroup>;
|
||||
|
||||
#[allow(unused)]
|
||||
pub struct Preloader {
|
||||
request_tx: Sender<SectorGroupRequest>,
|
||||
request_rx: Receiver<SectorGroupRequest>,
|
||||
stat_tx: Sender<PreloaderThreadStats>,
|
||||
stat_rx: Receiver<PreloaderThreadStats>,
|
||||
threads: Mutex<PreloaderThreads>,
|
||||
cache: Arc<Mutex<PreloaderCache>>,
|
||||
// Fallback single-threaded loader
|
||||
loader: Mutex<SectorGroupLoader>,
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
struct PreloaderThreads {
|
||||
join_handles: Vec<JoinHandle<()>>,
|
||||
last_adjust: Instant,
|
||||
num_samples: usize,
|
||||
wait_time_avg: SingleSumSMA<Duration, u32, 100>,
|
||||
req_time_avg: SingleSumSMA<Duration, u32, 100>,
|
||||
io_time_avg: SingleSumSMA<Duration, u32, 100>,
|
||||
}
|
||||
|
||||
impl PreloaderThreads {
|
||||
fn new(join_handles: Vec<JoinHandle<()>>) -> Self {
|
||||
Self {
|
||||
join_handles,
|
||||
last_adjust: Instant::now(),
|
||||
num_samples: 0,
|
||||
wait_time_avg: SingleSumSMA::<_, _, 100>::from_zero(Duration::default()),
|
||||
req_time_avg: SingleSumSMA::<_, _, 100>::from_zero(Duration::default()),
|
||||
io_time_avg: SingleSumSMA::<_, _, 100>::from_zero(Duration::default()),
|
||||
}
|
||||
}
|
||||
|
||||
fn push_stats(&mut self, stat: PreloaderThreadStats, _outer: &Preloader) {
|
||||
self.wait_time_avg.add_sample(stat.wait_time);
|
||||
self.req_time_avg.add_sample(stat.req_time);
|
||||
self.io_time_avg.add_sample(stat.io_time);
|
||||
self.num_samples += 1;
|
||||
if self.num_samples % 100 == 0 {
|
||||
let avg_wait = self.wait_time_avg.get_average();
|
||||
let avg_req = self.req_time_avg.get_average();
|
||||
let avg_io = self.io_time_avg.get_average();
|
||||
let utilization =
|
||||
avg_req.as_secs_f64() / (avg_req.as_secs_f64() + avg_wait.as_secs_f64());
|
||||
let io_time = avg_io.as_secs_f64() / avg_req.as_secs_f64();
|
||||
debug!(
|
||||
"Preloader stats: count {}, wait: {:?}, req: {:?}, util: {:.2}%, io: {:.2}%",
|
||||
self.num_samples,
|
||||
avg_wait,
|
||||
avg_req,
|
||||
utilization * 100.0,
|
||||
io_time * 100.0
|
||||
);
|
||||
// if self.last_adjust.elapsed() > Duration::from_secs(2) {
|
||||
// if utilization > 0.9 && io_time < 0.1 {
|
||||
// println!("Preloader is CPU-bound, increasing thread count");
|
||||
// let id = self.join_handles.len();
|
||||
// self.join_handles.push(preloader_thread(
|
||||
// id,
|
||||
// outer.request_rx.clone(),
|
||||
// outer.cache.clone(),
|
||||
// outer.loader.lock().unwrap().clone(),
|
||||
// outer.stat_tx.clone(),
|
||||
// ));
|
||||
// self.last_adjust = Instant::now();
|
||||
// } /*else if io_time > 0.9 {
|
||||
// println!("Preloader is I/O-bound, decreasing thread count");
|
||||
// if self.join_handles.len() > 1 {
|
||||
// let handle = self.join_handles.pop().unwrap();
|
||||
//
|
||||
// }
|
||||
// }*/
|
||||
// }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct PreloaderCache {
|
||||
inflight: HashMap<SectorGroupRequest, WaitGroup>,
|
||||
lru_cache: LruCache<SectorGroupRequest, SectorGroup>,
|
||||
}
|
||||
|
||||
impl Default for PreloaderCache {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
inflight: Default::default(),
|
||||
lru_cache: LruCache::new(NonZeroUsize::new(64).unwrap()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PreloaderCache {
|
||||
fn push(&mut self, request: SectorGroupRequest, group: SectorGroup) {
|
||||
self.lru_cache.push(request, group);
|
||||
self.inflight.remove(&request);
|
||||
}
|
||||
|
||||
fn remove(&mut self, request: &SectorGroupRequest) { self.inflight.remove(request); }
|
||||
|
||||
fn contains(&self, request: &SectorGroupRequest) -> bool {
|
||||
self.lru_cache.contains(request) || self.inflight.contains_key(request)
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
struct PreloaderThreadStats {
|
||||
thread_id: usize,
|
||||
wait_time: Duration,
|
||||
req_time: Duration,
|
||||
io_time: Duration,
|
||||
}
|
||||
|
||||
fn preloader_thread(
|
||||
thread_id: usize,
|
||||
request_rx: Receiver<SectorGroupRequest>,
|
||||
cache: Arc<Mutex<PreloaderCache>>,
|
||||
mut loader: SectorGroupLoader,
|
||||
stat_tx: Sender<PreloaderThreadStats>,
|
||||
) -> JoinHandle<()> {
|
||||
std::thread::Builder::new()
|
||||
.name(format!("Preloader {thread_id}"))
|
||||
.spawn(move || {
|
||||
let mut last_request_end: Option<Instant> = None;
|
||||
while let Ok(request) = request_rx.recv() {
|
||||
let wait_time = if let Some(last_request) = last_request_end {
|
||||
last_request.elapsed()
|
||||
} else {
|
||||
Duration::default()
|
||||
};
|
||||
let start = Instant::now();
|
||||
let mut io_time = Duration::default();
|
||||
match loader.load(request) {
|
||||
Ok(group) => {
|
||||
let Ok(mut cache_guard) = cache.lock() else {
|
||||
break;
|
||||
};
|
||||
io_time = group.io_duration.unwrap_or_default();
|
||||
cache_guard.push(request, group);
|
||||
}
|
||||
Err(_) => {
|
||||
let Ok(mut cache_guard) = cache.lock() else {
|
||||
break;
|
||||
};
|
||||
// Just drop the request if it failed
|
||||
cache_guard.remove(&request);
|
||||
}
|
||||
}
|
||||
let end = Instant::now();
|
||||
last_request_end = Some(end);
|
||||
let req_time = end - start;
|
||||
stat_tx
|
||||
.send(PreloaderThreadStats { thread_id, wait_time, req_time, io_time })
|
||||
.expect("Failed to send preloader stats");
|
||||
}
|
||||
})
|
||||
.expect("Failed to spawn preloader thread")
|
||||
}
|
||||
|
||||
impl Preloader {
|
||||
pub fn new(loader: SectorGroupLoader, num_threads: usize) -> Arc<Self> {
|
||||
debug!("Creating preloader with {} threads", num_threads);
|
||||
|
||||
let (request_tx, request_rx) = crossbeam_channel::unbounded();
|
||||
let (stat_tx, stat_rx) = crossbeam_channel::unbounded();
|
||||
let cache = Arc::new(Mutex::new(PreloaderCache::default()));
|
||||
let mut join_handles = Vec::with_capacity(num_threads);
|
||||
for i in 0..num_threads {
|
||||
join_handles.push(preloader_thread(
|
||||
i,
|
||||
request_rx.clone(),
|
||||
cache.clone(),
|
||||
loader.clone(),
|
||||
stat_tx.clone(),
|
||||
));
|
||||
}
|
||||
let threads = Mutex::new(PreloaderThreads::new(join_handles));
|
||||
let loader = Mutex::new(loader);
|
||||
Arc::new(Self { request_tx, request_rx, stat_tx, stat_rx, threads, cache, loader })
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
pub fn shutdown(self) {
|
||||
let guard = self.threads.into_inner().unwrap();
|
||||
for handle in guard.join_handles {
|
||||
handle.join().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(name = "Preloader::fetch", skip_all)]
|
||||
pub fn fetch(&self, request: SectorGroupRequest, max_groups: u32) -> SectorGroupResult {
|
||||
let num_threads = {
|
||||
let mut threads_guard = self.threads.lock().map_err(map_poisoned)?;
|
||||
while let Ok(stat) = self.stat_rx.try_recv() {
|
||||
threads_guard.push_stats(stat, self);
|
||||
}
|
||||
threads_guard.join_handles.len()
|
||||
};
|
||||
let mut cache_guard = self.cache.lock().map_err(map_poisoned)?;
|
||||
// Preload n groups ahead
|
||||
for i in 0..num_threads as u32 {
|
||||
let group_idx = request.group_idx + i;
|
||||
if group_idx >= max_groups {
|
||||
break;
|
||||
}
|
||||
let request = SectorGroupRequest { group_idx, ..request };
|
||||
if cache_guard.contains(&request) {
|
||||
continue;
|
||||
}
|
||||
if self.request_tx.send(request).is_ok() {
|
||||
cache_guard.inflight.insert(request, WaitGroup::new());
|
||||
}
|
||||
}
|
||||
if let Some(cached) = cache_guard.lru_cache.get(&request) {
|
||||
return Ok(cached.clone());
|
||||
}
|
||||
if let Some(wg) = cache_guard.inflight.get(&request) {
|
||||
// Wait for inflight request to finish
|
||||
let wg = wg.clone();
|
||||
drop(cache_guard);
|
||||
{
|
||||
let _span = span!(Level::TRACE, "wg.wait").entered();
|
||||
wg.wait();
|
||||
}
|
||||
let mut cache_guard = self.cache.lock().map_err(map_poisoned)?;
|
||||
if let Some(cached) = cache_guard.lru_cache.get(&request) {
|
||||
return Ok(cached.clone());
|
||||
}
|
||||
} else {
|
||||
drop(cache_guard);
|
||||
}
|
||||
// No threads are running, fallback to single-threaded loader
|
||||
let result = {
|
||||
let mut loader = self.loader.lock().map_err(map_poisoned)?;
|
||||
loader.load(request)
|
||||
};
|
||||
match result {
|
||||
Ok(group) => {
|
||||
let mut cache_guard = self.cache.lock().map_err(map_poisoned)?;
|
||||
cache_guard.push(request, group.clone());
|
||||
Ok(group)
|
||||
}
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn map_poisoned<T>(_: std::sync::PoisonError<T>) -> io::Error {
|
||||
io::Error::new(io::ErrorKind::Other, "Mutex poisoned")
|
||||
}
|
||||
|
||||
pub struct SectorGroupLoader {
|
||||
io: Box<dyn BlockReader>,
|
||||
disc_header: Arc<DiscHeader>,
|
||||
partitions: Arc<[PartitionInfo]>,
|
||||
block: Block,
|
||||
block_buf: Box<[u8]>,
|
||||
}
|
||||
|
||||
impl Clone for SectorGroupLoader {
|
||||
fn clone(&self) -> Self {
|
||||
let block_size = self.io.block_size() as usize;
|
||||
Self {
|
||||
io: self.io.clone(),
|
||||
disc_header: self.disc_header.clone(),
|
||||
partitions: self.partitions.clone(),
|
||||
block: Block::default(),
|
||||
block_buf: <[u8]>::new_box_zeroed_with_elems(block_size).unwrap(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl SectorGroupLoader {
|
||||
pub fn new(
|
||||
io: Box<dyn BlockReader>,
|
||||
disc_header: Arc<DiscHeader>,
|
||||
partitions: Arc<[PartitionInfo]>,
|
||||
) -> Self {
|
||||
let block_buf = <[u8]>::new_box_zeroed_with_elems(io.block_size() as usize).unwrap();
|
||||
Self { io, disc_header, partitions, block: Block::default(), block_buf }
|
||||
}
|
||||
|
||||
#[instrument(name = "SectorGroupLoader::load", skip_all)]
|
||||
pub fn load(&mut self, request: SectorGroupRequest) -> SectorGroupResult {
|
||||
let mut sector_group_buf = BytesMut::zeroed(SECTOR_GROUP_SIZE);
|
||||
|
||||
let out = array_ref_mut![sector_group_buf, 0, SECTOR_GROUP_SIZE];
|
||||
let (sector_bitmap, io_duration) = if request.partition_idx.is_some() {
|
||||
self.load_partition_group(request, out)?
|
||||
} else {
|
||||
self.load_raw_group(request, out)?
|
||||
};
|
||||
|
||||
Ok(SectorGroup { request, data: sector_group_buf.freeze(), sector_bitmap, io_duration })
|
||||
}
|
||||
|
||||
/// Load a sector group from a partition.
|
||||
///
|
||||
/// This will handle encryption, decryption, and hash recovery as needed.
|
||||
fn load_partition_group(
|
||||
&mut self,
|
||||
request: SectorGroupRequest,
|
||||
sector_group_buf: &mut [u8; SECTOR_GROUP_SIZE],
|
||||
) -> io::Result<(u64, Option<Duration>)> {
|
||||
let Some(partition) =
|
||||
request.partition_idx.and_then(|idx| self.partitions.get(idx as usize))
|
||||
else {
|
||||
return Ok((0, None));
|
||||
};
|
||||
|
||||
let abs_group_sector = partition.data_start_sector + request.group_idx * 64;
|
||||
if abs_group_sector >= partition.data_end_sector {
|
||||
return Ok((0, None));
|
||||
}
|
||||
|
||||
// Bitmap of sectors that were read
|
||||
let mut sector_bitmap = 0u64;
|
||||
// Bitmap of sectors that are decrypted
|
||||
let mut decrypted_sectors = 0u64;
|
||||
// Bitmap of sectors that need hash recovery
|
||||
let mut hash_recovery_sectors = 0u64;
|
||||
// Hash exceptions
|
||||
let mut hash_exceptions = Vec::<WIAException>::new();
|
||||
// Total duration of I/O operations
|
||||
let mut io_duration = None;
|
||||
|
||||
// Read sector group
|
||||
for sector in 0..64 {
|
||||
let sector_data =
|
||||
array_ref_mut![sector_group_buf, sector as usize * SECTOR_SIZE, SECTOR_SIZE];
|
||||
let abs_sector = abs_group_sector + sector;
|
||||
if abs_sector >= partition.data_end_sector {
|
||||
// Already zeroed
|
||||
decrypted_sectors |= 1 << sector;
|
||||
hash_recovery_sectors |= 1 << sector;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Read new block
|
||||
if !self.block.contains(abs_sector) {
|
||||
self.block = self.io.read_block(self.block_buf.as_mut(), abs_sector)?;
|
||||
if let Some(duration) = self.block.io_duration {
|
||||
*io_duration.get_or_insert_with(Duration::default) += duration;
|
||||
}
|
||||
if self.block.kind == BlockKind::None {
|
||||
error!("Failed to read block for sector {}", abs_sector);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Add hash exceptions
|
||||
self.block.append_hash_exceptions(abs_sector, sector, &mut hash_exceptions)?;
|
||||
|
||||
// Read new sector into buffer
|
||||
let (encrypted, has_hashes) = self.block.copy_sector(
|
||||
sector_data,
|
||||
self.block_buf.as_mut(),
|
||||
abs_sector,
|
||||
&partition.disc_header,
|
||||
Some(partition),
|
||||
)?;
|
||||
if !encrypted {
|
||||
decrypted_sectors |= 1 << sector;
|
||||
}
|
||||
if !has_hashes && partition.has_hashes {
|
||||
hash_recovery_sectors |= 1 << sector;
|
||||
}
|
||||
sector_bitmap |= 1 << sector;
|
||||
}
|
||||
|
||||
// Recover hashes
|
||||
if request.mode != PartitionEncryption::ForceDecryptedNoHashes && hash_recovery_sectors != 0
|
||||
{
|
||||
// Decrypt any encrypted sectors
|
||||
if decrypted_sectors != u64::MAX {
|
||||
for sector in 0..64 {
|
||||
let sector_data =
|
||||
array_ref_mut![sector_group_buf, sector * SECTOR_SIZE, SECTOR_SIZE];
|
||||
if (decrypted_sectors >> sector) & 1 == 0 {
|
||||
decrypt_sector(sector_data, &partition.key);
|
||||
}
|
||||
}
|
||||
decrypted_sectors = u64::MAX;
|
||||
}
|
||||
|
||||
// Recover hashes
|
||||
let group_hashes = hash_sector_group(sector_group_buf);
|
||||
|
||||
// Apply hashes
|
||||
for sector in 0..64 {
|
||||
let sector_data =
|
||||
array_ref_mut![sector_group_buf, sector * SECTOR_SIZE, SECTOR_SIZE];
|
||||
if (hash_recovery_sectors >> sector) & 1 == 1 {
|
||||
group_hashes.apply(sector_data, sector);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Apply hash exceptions
|
||||
if request.mode != PartitionEncryption::ForceDecryptedNoHashes
|
||||
&& !hash_exceptions.is_empty()
|
||||
{
|
||||
for exception in hash_exceptions {
|
||||
let offset = exception.offset.get();
|
||||
let sector = offset / HASHES_SIZE as u16;
|
||||
|
||||
// Decrypt sector if needed
|
||||
let sector_data =
|
||||
array_ref_mut![sector_group_buf, sector as usize * SECTOR_SIZE, SECTOR_SIZE];
|
||||
if (decrypted_sectors >> sector) & 1 == 0 {
|
||||
decrypt_sector(sector_data, &partition.key);
|
||||
decrypted_sectors |= 1 << sector;
|
||||
}
|
||||
|
||||
let sector_offset = (offset - (sector * HASHES_SIZE as u16)) as usize;
|
||||
*array_ref_mut![sector_data, sector_offset, 20] = exception.hash;
|
||||
}
|
||||
}
|
||||
|
||||
// Encrypt/decrypt sectors
|
||||
if match request.mode {
|
||||
PartitionEncryption::Original => partition.has_encryption,
|
||||
PartitionEncryption::ForceEncrypted => true,
|
||||
PartitionEncryption::ForceDecrypted | PartitionEncryption::ForceDecryptedNoHashes => {
|
||||
false
|
||||
}
|
||||
} {
|
||||
// Encrypt any decrypted sectors
|
||||
if decrypted_sectors != 0 {
|
||||
for sector in 0..64 {
|
||||
let sector_data = array_ref_mut![
|
||||
sector_group_buf,
|
||||
sector as usize * SECTOR_SIZE,
|
||||
SECTOR_SIZE
|
||||
];
|
||||
if (decrypted_sectors >> sector) & 1 == 1 {
|
||||
encrypt_sector(sector_data, &partition.key);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if decrypted_sectors != u64::MAX {
|
||||
// Decrypt any encrypted sectors
|
||||
for sector in 0..64 {
|
||||
let sector_data =
|
||||
array_ref_mut![sector_group_buf, sector as usize * SECTOR_SIZE, SECTOR_SIZE];
|
||||
if (decrypted_sectors >> sector) & 1 == 0 {
|
||||
decrypt_sector(sector_data, &partition.key);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok((sector_bitmap, io_duration))
|
||||
}
|
||||
|
||||
/// Loads a non-partition sector group.
|
||||
fn load_raw_group(
|
||||
&mut self,
|
||||
request: SectorGroupRequest,
|
||||
sector_group_buf: &mut [u8; SECTOR_GROUP_SIZE],
|
||||
) -> io::Result<(u64, Option<Duration>)> {
|
||||
let abs_group_sector = request.group_idx * 64;
|
||||
|
||||
// Bitmap of sectors that were read
|
||||
let mut sector_bitmap = 0u64;
|
||||
// Total duration of I/O operations
|
||||
let mut io_duration = None;
|
||||
|
||||
for sector in 0..64 {
|
||||
let sector_data =
|
||||
array_ref_mut![sector_group_buf, sector as usize * SECTOR_SIZE, SECTOR_SIZE];
|
||||
let abs_sector = abs_group_sector + sector;
|
||||
if self.partitions.iter().any(|p| p.data_contains_sector(abs_sector)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Read new block
|
||||
if !self.block.contains(abs_sector) {
|
||||
self.block = self.io.read_block(self.block_buf.as_mut(), abs_sector)?;
|
||||
if let Some(duration) = self.block.io_duration {
|
||||
*io_duration.get_or_insert_with(Duration::default) += duration;
|
||||
}
|
||||
if self.block.kind == BlockKind::None {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Read new sector into buffer
|
||||
self.block.copy_sector(
|
||||
sector_data,
|
||||
self.block_buf.as_mut(),
|
||||
abs_sector,
|
||||
self.disc_header.as_ref(),
|
||||
None,
|
||||
)?;
|
||||
sector_bitmap |= 1 << sector;
|
||||
}
|
||||
|
||||
Ok((sector_bitmap, io_duration))
|
||||
}
|
||||
}
|
|
@ -1,130 +1,163 @@
|
|||
use std::{
|
||||
io,
|
||||
io::{BufRead, Read, Seek, SeekFrom},
|
||||
io::{BufRead, Seek, SeekFrom},
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use zerocopy::{FromBytes, FromZeros};
|
||||
use bytes::Bytes;
|
||||
use tracing::warn;
|
||||
use zerocopy::IntoBytes;
|
||||
|
||||
use super::{
|
||||
gcn::PartitionGC,
|
||||
hashes::{rebuild_hashes, HashTable},
|
||||
wii::{PartitionWii, WiiPartEntry, WiiPartGroup, WiiPartitionHeader, WII_PART_GROUP_OFF},
|
||||
DiscHeader, PartitionBase, PartitionHeader, PartitionKind, DL_DVD_SIZE, MINI_DVD_SIZE,
|
||||
REGION_SIZE, SL_DVD_SIZE,
|
||||
};
|
||||
use crate::{
|
||||
disc::wii::REGION_OFFSET,
|
||||
io::block::{Block, BlockIO, PartitionInfo},
|
||||
util::read::{read_box, read_from, read_vec},
|
||||
DiscMeta, Error, OpenOptions, PartitionEncryptionMode, PartitionOptions, Result, ResultContext,
|
||||
SECTOR_SIZE,
|
||||
common::{PartitionInfo, PartitionKind},
|
||||
disc::{
|
||||
direct::{DirectDiscReader, DirectDiscReaderMode},
|
||||
fst::{Fst, NodeKind},
|
||||
gcn::{read_fst, PartitionReaderGC},
|
||||
preloader::{Preloader, SectorGroup, SectorGroupLoader, SectorGroupRequest},
|
||||
wii::{
|
||||
PartitionReaderWii, WiiPartEntry, WiiPartGroup, WiiPartitionHeader, REGION_OFFSET,
|
||||
REGION_SIZE, WII_PART_GROUP_OFF,
|
||||
},
|
||||
DiscHeader, DL_DVD_SIZE, MINI_DVD_SIZE, SECTOR_GROUP_SIZE, SECTOR_SIZE, SL_DVD_SIZE,
|
||||
},
|
||||
io::block::BlockReader,
|
||||
read::{DiscMeta, DiscOptions, PartitionEncryption, PartitionOptions, PartitionReader},
|
||||
util::{
|
||||
impl_read_for_bufread,
|
||||
read::{read_arc, read_from, read_vec},
|
||||
},
|
||||
Error, Result, ResultContext,
|
||||
};
|
||||
|
||||
pub struct DiscReader {
|
||||
io: Box<dyn BlockIO>,
|
||||
block: Block,
|
||||
block_buf: Box<[u8]>,
|
||||
block_idx: u32,
|
||||
sector_buf: Box<[u8; SECTOR_SIZE]>,
|
||||
sector_idx: u32,
|
||||
io: Box<dyn BlockReader>,
|
||||
pos: u64,
|
||||
mode: PartitionEncryptionMode,
|
||||
disc_header: Box<DiscHeader>,
|
||||
pub(crate) partitions: Vec<PartitionInfo>,
|
||||
hash_tables: Vec<HashTable>,
|
||||
size: u64,
|
||||
mode: PartitionEncryption,
|
||||
disc_header: Arc<DiscHeader>,
|
||||
partitions: Arc<[PartitionInfo]>,
|
||||
region: Option<[u8; REGION_SIZE]>,
|
||||
sector_group: Option<SectorGroup>,
|
||||
preloader: Arc<Preloader>,
|
||||
alt_disc_header: Option<Arc<DiscHeader>>,
|
||||
alt_partitions: Option<Arc<[PartitionInfo]>>,
|
||||
}
|
||||
|
||||
impl Clone for DiscReader {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
io: self.io.clone(),
|
||||
block: Block::default(),
|
||||
block_buf: <[u8]>::new_box_zeroed_with_elems(self.block_buf.len()).unwrap(),
|
||||
block_idx: u32::MAX,
|
||||
sector_buf: <[u8; SECTOR_SIZE]>::new_box_zeroed().unwrap(),
|
||||
sector_idx: u32::MAX,
|
||||
pos: 0,
|
||||
size: self.size,
|
||||
mode: self.mode,
|
||||
disc_header: self.disc_header.clone(),
|
||||
partitions: self.partitions.clone(),
|
||||
hash_tables: self.hash_tables.clone(),
|
||||
region: self.region,
|
||||
sector_group: None,
|
||||
preloader: self.preloader.clone(),
|
||||
alt_disc_header: self.alt_disc_header.clone(),
|
||||
alt_partitions: self.alt_partitions.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DiscReader {
|
||||
pub fn new(inner: Box<dyn BlockIO>, options: &OpenOptions) -> Result<Self> {
|
||||
let block_size = inner.block_size();
|
||||
let meta = inner.meta();
|
||||
let mut reader = Self {
|
||||
io: inner,
|
||||
block: Block::default(),
|
||||
block_buf: <[u8]>::new_box_zeroed_with_elems(block_size as usize)?,
|
||||
block_idx: u32::MAX,
|
||||
sector_buf: <[u8; SECTOR_SIZE]>::new_box_zeroed()?,
|
||||
sector_idx: u32::MAX,
|
||||
pos: 0,
|
||||
mode: options.partition_encryption,
|
||||
disc_header: DiscHeader::new_box_zeroed()?,
|
||||
partitions: vec![],
|
||||
hash_tables: vec![],
|
||||
region: None,
|
||||
};
|
||||
let disc_header: Box<DiscHeader> = read_box(&mut reader).context("Reading disc header")?;
|
||||
reader.disc_header = disc_header;
|
||||
if reader.disc_header.is_wii() {
|
||||
if reader.disc_header.has_partition_encryption()
|
||||
&& !reader.disc_header.has_partition_hashes()
|
||||
{
|
||||
pub fn new(inner: Box<dyn BlockReader>, options: &DiscOptions) -> Result<Self> {
|
||||
let mut reader = DirectDiscReader::new(inner)?;
|
||||
|
||||
let disc_header: Arc<DiscHeader> = read_arc(&mut reader).context("Reading disc header")?;
|
||||
let mut alt_disc_header = None;
|
||||
let mut region = None;
|
||||
let mut partitions = Arc::<[PartitionInfo]>::default();
|
||||
let mut alt_partitions = None;
|
||||
if disc_header.is_wii() {
|
||||
// Sanity check
|
||||
if disc_header.has_partition_encryption() && !disc_header.has_partition_hashes() {
|
||||
return Err(Error::DiscFormat(
|
||||
"Wii disc is encrypted but has no partition hashes".to_string(),
|
||||
));
|
||||
}
|
||||
if !reader.disc_header.has_partition_hashes()
|
||||
&& options.partition_encryption == PartitionEncryptionMode::ForceEncrypted
|
||||
if !disc_header.has_partition_hashes()
|
||||
&& options.partition_encryption == PartitionEncryption::ForceEncrypted
|
||||
{
|
||||
return Err(Error::Other(
|
||||
"Unsupported: Rebuilding encryption for Wii disc without hashes".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
// Read region info
|
||||
reader.seek(SeekFrom::Start(REGION_OFFSET)).context("Seeking to region info")?;
|
||||
reader.region = Some(read_from(&mut reader).context("Reading region info")?);
|
||||
reader.partitions = read_partition_info(&mut reader)?;
|
||||
// Rebuild hashes if the format requires it
|
||||
if options.partition_encryption != PartitionEncryptionMode::AsIs
|
||||
&& meta.needs_hash_recovery
|
||||
&& reader.disc_header.has_partition_hashes()
|
||||
{
|
||||
rebuild_hashes(&mut reader)?;
|
||||
region = Some(read_from(&mut reader).context("Reading region info")?);
|
||||
|
||||
// Read partition info
|
||||
partitions = Arc::from(read_partition_info(&mut reader, disc_header.clone())?);
|
||||
|
||||
// Update disc header with encryption mode
|
||||
if matches!(
|
||||
options.partition_encryption,
|
||||
PartitionEncryption::ForceDecrypted | PartitionEncryption::ForceEncrypted
|
||||
) {
|
||||
let mut disc_header = Box::new(disc_header.as_ref().clone());
|
||||
let mut partitions = Box::<[PartitionInfo]>::from(partitions.as_ref());
|
||||
disc_header.no_partition_encryption = match options.partition_encryption {
|
||||
PartitionEncryption::ForceDecrypted => 1,
|
||||
PartitionEncryption::ForceEncrypted => 0,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
for partition in &mut partitions {
|
||||
partition.has_encryption = disc_header.has_partition_encryption();
|
||||
}
|
||||
alt_disc_header = Some(Arc::from(disc_header));
|
||||
alt_partitions = Some(Arc::from(partitions));
|
||||
}
|
||||
} else if !disc_header.is_gamecube() {
|
||||
return Err(Error::DiscFormat("Invalid disc header".to_string()));
|
||||
}
|
||||
reader.reset();
|
||||
Ok(reader)
|
||||
}
|
||||
|
||||
pub fn reset(&mut self) {
|
||||
self.block = Block::default();
|
||||
self.block_buf.fill(0);
|
||||
self.block_idx = u32::MAX;
|
||||
self.sector_buf.fill(0);
|
||||
self.sector_idx = u32::MAX;
|
||||
self.pos = 0;
|
||||
}
|
||||
|
||||
pub fn disc_size(&self) -> u64 {
|
||||
self.io.meta().disc_size.unwrap_or_else(|| guess_disc_size(&self.partitions))
|
||||
// Calculate disc size
|
||||
let io = reader.into_inner();
|
||||
let size = io.meta().disc_size.unwrap_or_else(|| guess_disc_size(&partitions));
|
||||
let preloader = Preloader::new(
|
||||
SectorGroupLoader::new(io.clone(), disc_header.clone(), partitions.clone()),
|
||||
options.preloader_threads,
|
||||
);
|
||||
Ok(Self {
|
||||
io,
|
||||
pos: 0,
|
||||
size,
|
||||
mode: options.partition_encryption,
|
||||
disc_header,
|
||||
partitions,
|
||||
region,
|
||||
sector_group: None,
|
||||
preloader,
|
||||
alt_disc_header,
|
||||
alt_partitions,
|
||||
})
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn header(&self) -> &DiscHeader { &self.disc_header }
|
||||
pub fn reset(&mut self) { self.pos = 0; }
|
||||
|
||||
#[inline]
|
||||
pub fn position(&self) -> u64 { self.pos }
|
||||
|
||||
#[inline]
|
||||
pub fn disc_size(&self) -> u64 { self.size }
|
||||
|
||||
#[inline]
|
||||
pub fn header(&self) -> &DiscHeader {
|
||||
self.alt_disc_header.as_ref().unwrap_or(&self.disc_header)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn region(&self) -> Option<&[u8; REGION_SIZE]> { self.region.as_ref() }
|
||||
|
||||
#[inline]
|
||||
pub fn partitions(&self) -> &[PartitionInfo] { &self.partitions }
|
||||
pub fn partitions(&self) -> &[PartitionInfo] {
|
||||
self.alt_partitions.as_deref().unwrap_or(&self.partitions)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn meta(&self) -> DiscMeta { self.io.meta() }
|
||||
|
@ -134,15 +167,19 @@ impl DiscReader {
|
|||
&self,
|
||||
index: usize,
|
||||
options: &PartitionOptions,
|
||||
) -> Result<Box<dyn PartitionBase>> {
|
||||
) -> Result<Box<dyn PartitionReader>> {
|
||||
if self.disc_header.is_gamecube() {
|
||||
if index == 0 {
|
||||
Ok(PartitionGC::new(self.io.clone(), self.disc_header.clone())?)
|
||||
Ok(PartitionReaderGC::new(
|
||||
self.io.clone(),
|
||||
self.preloader.clone(),
|
||||
self.disc_size(),
|
||||
)?)
|
||||
} else {
|
||||
Err(Error::DiscFormat("GameCube discs only have one partition".to_string()))
|
||||
}
|
||||
} else if let Some(part) = self.partitions.get(index) {
|
||||
Ok(PartitionWii::new(self.io.clone(), self.disc_header.clone(), part, options)?)
|
||||
Ok(PartitionReaderWii::new(self.io.clone(), self.preloader.clone(), part, options)?)
|
||||
} else {
|
||||
Err(Error::DiscFormat(format!("Partition {index} not found")))
|
||||
}
|
||||
|
@ -154,108 +191,151 @@ impl DiscReader {
|
|||
&self,
|
||||
kind: PartitionKind,
|
||||
options: &PartitionOptions,
|
||||
) -> Result<Box<dyn PartitionBase>> {
|
||||
) -> Result<Box<dyn PartitionReader>> {
|
||||
if self.disc_header.is_gamecube() {
|
||||
if kind == PartitionKind::Data {
|
||||
Ok(PartitionGC::new(self.io.clone(), self.disc_header.clone())?)
|
||||
Ok(PartitionReaderGC::new(
|
||||
self.io.clone(),
|
||||
self.preloader.clone(),
|
||||
self.disc_size(),
|
||||
)?)
|
||||
} else {
|
||||
Err(Error::DiscFormat("GameCube discs only have a data partition".to_string()))
|
||||
}
|
||||
} else if let Some(part) = self.partitions.iter().find(|v| v.kind == kind) {
|
||||
Ok(PartitionWii::new(self.io.clone(), self.disc_header.clone(), part, options)?)
|
||||
Ok(PartitionReaderWii::new(self.io.clone(), self.preloader.clone(), part, options)?)
|
||||
} else {
|
||||
Err(Error::DiscFormat(format!("Partition type {kind} not found")))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn fill_buf_internal(&mut self) -> io::Result<Bytes> {
|
||||
if self.pos >= self.size {
|
||||
return Ok(Bytes::new());
|
||||
}
|
||||
|
||||
// Read from modified disc header
|
||||
if self.pos < size_of::<DiscHeader>() as u64 {
|
||||
if let Some(alt_disc_header) = &self.alt_disc_header {
|
||||
return Ok(Bytes::copy_from_slice(
|
||||
&alt_disc_header.as_bytes()[self.pos as usize..],
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
// Build sector group request
|
||||
let abs_sector = (self.pos / SECTOR_SIZE as u64) as u32;
|
||||
let (request, abs_group_sector, max_groups) = if let Some(partition) =
|
||||
self.partitions.iter().find(|part| part.data_contains_sector(abs_sector))
|
||||
{
|
||||
let group_idx = (abs_sector - partition.data_start_sector) / 64;
|
||||
let abs_group_sector = partition.data_start_sector + group_idx * 64;
|
||||
let max_groups = (partition.data_end_sector - partition.data_start_sector).div_ceil(64);
|
||||
let request = SectorGroupRequest {
|
||||
group_idx,
|
||||
partition_idx: Some(partition.index as u8),
|
||||
mode: self.mode,
|
||||
};
|
||||
(request, abs_group_sector, max_groups)
|
||||
} else {
|
||||
let group_idx = abs_sector / 64;
|
||||
let abs_group_sector = group_idx * 64;
|
||||
let max_groups = self.size.div_ceil(SECTOR_GROUP_SIZE as u64) as u32;
|
||||
let request = SectorGroupRequest { group_idx, partition_idx: None, mode: self.mode };
|
||||
(request, abs_group_sector, max_groups)
|
||||
};
|
||||
|
||||
// Load sector group
|
||||
let sector_group = if matches!(&self.sector_group, Some(sector_group) if sector_group.request == request)
|
||||
{
|
||||
// We can improve this in Rust 2024 with `if_let_rescope`
|
||||
// https://github.com/rust-lang/rust/issues/124085
|
||||
self.sector_group.as_ref().unwrap()
|
||||
} else {
|
||||
self.sector_group.insert(self.preloader.fetch(request, max_groups)?)
|
||||
};
|
||||
|
||||
// Calculate the number of consecutive sectors in the group
|
||||
let group_sector = abs_sector - abs_group_sector;
|
||||
let consecutive_sectors = sector_group.consecutive_sectors(group_sector);
|
||||
if consecutive_sectors == 0 {
|
||||
return Ok(Bytes::new());
|
||||
}
|
||||
let num_sectors = group_sector + consecutive_sectors;
|
||||
|
||||
// Read from sector group buffer
|
||||
let group_start = abs_group_sector as u64 * SECTOR_SIZE as u64;
|
||||
let offset = (self.pos - group_start) as usize;
|
||||
let end = (num_sectors as u64 * SECTOR_SIZE as u64).min(self.size - group_start) as usize;
|
||||
Ok(sector_group.data.slice(offset..end))
|
||||
}
|
||||
}
|
||||
|
||||
impl BufRead for DiscReader {
|
||||
fn fill_buf(&mut self) -> io::Result<&[u8]> {
|
||||
let block_idx = (self.pos / self.block_buf.len() as u64) as u32;
|
||||
let abs_sector = (self.pos / SECTOR_SIZE as u64) as u32;
|
||||
if self.pos >= self.size {
|
||||
return Ok(&[]);
|
||||
}
|
||||
|
||||
let partition = if self.disc_header.is_wii() {
|
||||
self.partitions.iter().find(|part| {
|
||||
abs_sector >= part.data_start_sector && abs_sector < part.data_end_sector
|
||||
})
|
||||
// Read from modified disc header
|
||||
if self.pos < size_of::<DiscHeader>() as u64 {
|
||||
if let Some(alt_disc_header) = &self.alt_disc_header {
|
||||
return Ok(&alt_disc_header.as_bytes()[self.pos as usize..]);
|
||||
}
|
||||
}
|
||||
|
||||
// Build sector group request
|
||||
let abs_sector = (self.pos / SECTOR_SIZE as u64) as u32;
|
||||
let (request, abs_group_sector, max_groups) = if let Some(partition) =
|
||||
self.partitions.iter().find(|part| part.data_contains_sector(abs_sector))
|
||||
{
|
||||
let group_idx = (abs_sector - partition.data_start_sector) / 64;
|
||||
let abs_group_sector = partition.data_start_sector + group_idx * 64;
|
||||
let max_groups = (partition.data_end_sector - partition.data_start_sector).div_ceil(64);
|
||||
let request = SectorGroupRequest {
|
||||
group_idx,
|
||||
partition_idx: Some(partition.index as u8),
|
||||
mode: self.mode,
|
||||
};
|
||||
(request, abs_group_sector, max_groups)
|
||||
} else {
|
||||
None
|
||||
let group_idx = abs_sector / 64;
|
||||
let abs_group_sector = group_idx * 64;
|
||||
let max_groups = self.size.div_ceil(SECTOR_GROUP_SIZE as u64) as u32;
|
||||
let request = SectorGroupRequest { group_idx, partition_idx: None, mode: self.mode };
|
||||
(request, abs_group_sector, max_groups)
|
||||
};
|
||||
|
||||
// Read new block
|
||||
if block_idx != self.block_idx {
|
||||
self.block = self.io.read_block(self.block_buf.as_mut(), block_idx, partition)?;
|
||||
self.block_idx = block_idx;
|
||||
// Load sector group
|
||||
let sector_group = if matches!(&self.sector_group, Some(sector_group) if sector_group.request == request)
|
||||
{
|
||||
// We can improve this in Rust 2024 with `if_let_rescope`
|
||||
// https://github.com/rust-lang/rust/issues/124085
|
||||
self.sector_group.as_ref().unwrap()
|
||||
} else {
|
||||
self.sector_group.insert(self.preloader.fetch(request, max_groups)?)
|
||||
};
|
||||
|
||||
// Calculate the number of consecutive sectors in the group
|
||||
let group_sector = abs_sector - abs_group_sector;
|
||||
let consecutive_sectors = sector_group.consecutive_sectors(group_sector);
|
||||
if consecutive_sectors == 0 {
|
||||
return Ok(&[]);
|
||||
}
|
||||
let num_sectors = group_sector + consecutive_sectors;
|
||||
|
||||
// Read new sector into buffer
|
||||
if abs_sector != self.sector_idx {
|
||||
match (self.mode, partition, self.disc_header.has_partition_encryption()) {
|
||||
(PartitionEncryptionMode::Original, Some(partition), true)
|
||||
| (PartitionEncryptionMode::ForceEncrypted, Some(partition), _) => {
|
||||
self.block.encrypt(
|
||||
self.sector_buf.as_mut(),
|
||||
self.block_buf.as_ref(),
|
||||
abs_sector,
|
||||
partition,
|
||||
)?;
|
||||
}
|
||||
(PartitionEncryptionMode::ForceDecrypted, Some(partition), _) => {
|
||||
self.block.decrypt(
|
||||
self.sector_buf.as_mut(),
|
||||
self.block_buf.as_ref(),
|
||||
abs_sector,
|
||||
partition,
|
||||
)?;
|
||||
}
|
||||
(PartitionEncryptionMode::AsIs, _, _) | (_, None, _) | (_, _, false) => {
|
||||
self.block.copy_raw(
|
||||
self.sector_buf.as_mut(),
|
||||
self.block_buf.as_ref(),
|
||||
abs_sector,
|
||||
&self.disc_header,
|
||||
)?;
|
||||
}
|
||||
}
|
||||
self.sector_idx = abs_sector;
|
||||
|
||||
if self.sector_idx == 0
|
||||
&& self.disc_header.is_wii()
|
||||
&& matches!(
|
||||
self.mode,
|
||||
PartitionEncryptionMode::ForceDecrypted
|
||||
| PartitionEncryptionMode::ForceEncrypted
|
||||
)
|
||||
{
|
||||
let (disc_header, _) = DiscHeader::mut_from_prefix(self.sector_buf.as_mut())
|
||||
.expect("Invalid disc header alignment");
|
||||
disc_header.no_partition_encryption = match self.mode {
|
||||
PartitionEncryptionMode::ForceDecrypted => 1,
|
||||
PartitionEncryptionMode::ForceEncrypted => 0,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Read from sector buffer
|
||||
let offset = (self.pos % SECTOR_SIZE as u64) as usize;
|
||||
Ok(&self.sector_buf[offset..])
|
||||
// Read from sector group buffer
|
||||
let group_start = abs_group_sector as u64 * SECTOR_SIZE as u64;
|
||||
let offset = (self.pos - group_start) as usize;
|
||||
let end = (num_sectors as u64 * SECTOR_SIZE as u64).min(self.size - group_start) as usize;
|
||||
Ok(§or_group.data[offset..end])
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn consume(&mut self, amt: usize) { self.pos += amt as u64; }
|
||||
}
|
||||
|
||||
impl Read for DiscReader {
|
||||
#[inline]
|
||||
fn read(&mut self, out: &mut [u8]) -> io::Result<usize> {
|
||||
let buf = self.fill_buf()?;
|
||||
let len = buf.len().min(out.len());
|
||||
out[..len].copy_from_slice(&buf[..len]);
|
||||
self.consume(len);
|
||||
Ok(len)
|
||||
}
|
||||
}
|
||||
impl_read_for_bufread!(DiscReader);
|
||||
|
||||
impl Seek for DiscReader {
|
||||
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
|
||||
|
@ -273,7 +353,10 @@ impl Seek for DiscReader {
|
|||
}
|
||||
}
|
||||
|
||||
fn read_partition_info(reader: &mut DiscReader) -> Result<Vec<PartitionInfo>> {
|
||||
fn read_partition_info(
|
||||
reader: &mut DirectDiscReader,
|
||||
disc_header: Arc<DiscHeader>,
|
||||
) -> Result<Vec<PartitionInfo>> {
|
||||
reader.seek(SeekFrom::Start(WII_PART_GROUP_OFF)).context("Seeking to partition groups")?;
|
||||
let part_groups: [WiiPartGroup; 4] = read_from(reader).context("Reading partition groups")?;
|
||||
let mut part_info = Vec::new();
|
||||
|
@ -292,7 +375,7 @@ fn read_partition_info(reader: &mut DiscReader) -> Result<Vec<PartitionInfo>> {
|
|||
reader
|
||||
.seek(SeekFrom::Start(offset))
|
||||
.with_context(|| format!("Seeking to partition data {group_idx}:{part_idx}"))?;
|
||||
let header: Box<WiiPartitionHeader> = read_box(reader)
|
||||
let header: Arc<WiiPartitionHeader> = read_arc(reader)
|
||||
.with_context(|| format!("Reading partition header {group_idx}:{part_idx}"))?;
|
||||
|
||||
let key = header.ticket.decrypt_title_key()?;
|
||||
|
@ -303,17 +386,8 @@ fn read_partition_info(reader: &mut DiscReader) -> Result<Vec<PartitionInfo>> {
|
|||
)));
|
||||
}
|
||||
|
||||
let disc_header = reader.header();
|
||||
let data_start_offset = entry.offset() + header.data_off();
|
||||
let mut data_size = header.data_size();
|
||||
if data_size == 0 {
|
||||
// Read until next partition or end of disc
|
||||
// TODO: handle multiple partition groups
|
||||
data_size = entries
|
||||
.get(part_idx + 1)
|
||||
.map(|part| part.offset() - data_start_offset)
|
||||
.unwrap_or(reader.disc_size() - data_start_offset);
|
||||
}
|
||||
let data_size = header.data_size();
|
||||
let data_end_offset = data_start_offset + data_size;
|
||||
if data_start_offset % SECTOR_SIZE as u64 != 0
|
||||
|| data_end_offset % SECTOR_SIZE as u64 != 0
|
||||
|
@ -322,32 +396,58 @@ fn read_partition_info(reader: &mut DiscReader) -> Result<Vec<PartitionInfo>> {
|
|||
"Partition {group_idx}:{part_idx} data is not sector aligned",
|
||||
)));
|
||||
}
|
||||
let mut info = PartitionInfo {
|
||||
let start_sector = (start_offset / SECTOR_SIZE as u64) as u32;
|
||||
let data_start_sector = (data_start_offset / SECTOR_SIZE as u64) as u32;
|
||||
let mut data_end_sector = (data_end_offset / SECTOR_SIZE as u64) as u32;
|
||||
|
||||
reader.reset(DirectDiscReaderMode::Partition {
|
||||
disc_header: disc_header.clone(),
|
||||
data_start_sector,
|
||||
key,
|
||||
});
|
||||
let partition_disc_header: Arc<DiscHeader> =
|
||||
read_arc(reader).context("Reading partition disc header")?;
|
||||
let partition_header = read_arc(reader).context("Reading partition header")?;
|
||||
if partition_disc_header.is_wii() {
|
||||
let raw_fst = read_fst(reader, &partition_header, true)?;
|
||||
let fst = Fst::new(&raw_fst)?;
|
||||
let max_fst_offset = fst
|
||||
.nodes
|
||||
.iter()
|
||||
.filter_map(|n| match n.kind() {
|
||||
NodeKind::File => Some(n.offset(true) + n.length() as u64),
|
||||
_ => None,
|
||||
})
|
||||
.max()
|
||||
.unwrap_or(0);
|
||||
if max_fst_offset > data_size {
|
||||
if data_size == 0 {
|
||||
// Guess data size for decrypted partitions
|
||||
data_end_sector = max_fst_offset.div_ceil(SECTOR_SIZE as u64) as u32;
|
||||
} else {
|
||||
return Err(Error::DiscFormat(format!(
|
||||
"Partition {group_idx}:{part_idx} FST exceeds data size",
|
||||
)));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
warn!("Partition {group_idx}:{part_idx} is not valid");
|
||||
}
|
||||
reader.reset(DirectDiscReaderMode::Raw);
|
||||
|
||||
part_info.push(PartitionInfo {
|
||||
index: part_info.len(),
|
||||
kind: entry.kind.get().into(),
|
||||
start_sector: (start_offset / SECTOR_SIZE as u64) as u32,
|
||||
data_start_sector: (data_start_offset / SECTOR_SIZE as u64) as u32,
|
||||
data_end_sector: (data_end_offset / SECTOR_SIZE as u64) as u32,
|
||||
start_sector,
|
||||
data_start_sector,
|
||||
data_end_sector,
|
||||
key,
|
||||
header,
|
||||
disc_header: DiscHeader::new_box_zeroed()?,
|
||||
partition_header: PartitionHeader::new_box_zeroed()?,
|
||||
hash_table: None,
|
||||
disc_header: partition_disc_header,
|
||||
partition_header,
|
||||
has_encryption: disc_header.has_partition_encryption(),
|
||||
has_hashes: disc_header.has_partition_hashes(),
|
||||
};
|
||||
|
||||
let mut partition_reader = PartitionWii::new(
|
||||
reader.io.clone(),
|
||||
reader.disc_header.clone(),
|
||||
&info,
|
||||
&PartitionOptions { validate_hashes: false },
|
||||
)?;
|
||||
info.disc_header = read_box(&mut partition_reader).context("Reading disc header")?;
|
||||
info.partition_header =
|
||||
read_box(&mut partition_reader).context("Reading partition header")?;
|
||||
|
||||
part_info.push(info);
|
||||
});
|
||||
}
|
||||
}
|
||||
Ok(part_info)
|
||||
|
@ -356,18 +456,9 @@ fn read_partition_info(reader: &mut DiscReader) -> Result<Vec<PartitionInfo>> {
|
|||
fn guess_disc_size(part_info: &[PartitionInfo]) -> u64 {
|
||||
let max_offset = part_info
|
||||
.iter()
|
||||
.flat_map(|v| {
|
||||
let offset = v.start_sector as u64 * SECTOR_SIZE as u64;
|
||||
[
|
||||
offset + v.header.tmd_off() + v.header.tmd_size(),
|
||||
offset + v.header.cert_chain_off() + v.header.cert_chain_size(),
|
||||
offset + v.header.h3_table_off() + v.header.h3_table_size(),
|
||||
offset + v.header.data_off() + v.header.data_size(),
|
||||
]
|
||||
})
|
||||
.map(|v| v.data_end_sector as u64 * SECTOR_SIZE as u64)
|
||||
.max()
|
||||
.unwrap_or(0x50000);
|
||||
// TODO add FST offsets (decrypted partitions)
|
||||
if max_offset <= MINI_DVD_SIZE && !part_info.iter().any(|v| v.kind == PartitionKind::Data) {
|
||||
// Datel disc
|
||||
MINI_DVD_SIZE
|
||||
|
|
|
@ -1,101 +0,0 @@
|
|||
//! Partition file read stream.
|
||||
|
||||
use std::{
|
||||
io,
|
||||
io::{BufRead, Read, Seek, SeekFrom},
|
||||
};
|
||||
|
||||
use super::PartitionBase;
|
||||
|
||||
/// A file read stream borrowing a [`PartitionBase`].
|
||||
pub type FileStream<'a> = WindowedStream<&'a mut dyn PartitionBase>;
|
||||
|
||||
/// A file read stream owning a [`PartitionBase`].
|
||||
pub type OwnedFileStream = WindowedStream<Box<dyn PartitionBase>>;
|
||||
|
||||
/// A read stream with a fixed window.
|
||||
#[derive(Clone)]
|
||||
pub struct WindowedStream<T>
|
||||
where T: BufRead + Seek
|
||||
{
|
||||
base: T,
|
||||
pos: u64,
|
||||
begin: u64,
|
||||
end: u64,
|
||||
}
|
||||
|
||||
impl<T> WindowedStream<T>
|
||||
where T: BufRead + Seek
|
||||
{
|
||||
/// Creates a new windowed stream with offset and size.
|
||||
///
|
||||
/// Seeks underlying stream immediately.
|
||||
#[inline]
|
||||
pub fn new(mut base: T, offset: u64, size: u64) -> io::Result<Self> {
|
||||
base.seek(SeekFrom::Start(offset))?;
|
||||
Ok(Self { base, pos: offset, begin: offset, end: offset + size })
|
||||
}
|
||||
|
||||
/// Returns the length of the window.
|
||||
#[inline]
|
||||
#[allow(clippy::len_without_is_empty)]
|
||||
pub fn len(&self) -> u64 { self.end - self.begin }
|
||||
}
|
||||
|
||||
impl<T> Read for WindowedStream<T>
|
||||
where T: BufRead + Seek
|
||||
{
|
||||
#[inline]
|
||||
fn read(&mut self, out: &mut [u8]) -> io::Result<usize> {
|
||||
let buf = self.fill_buf()?;
|
||||
let len = buf.len().min(out.len());
|
||||
out[..len].copy_from_slice(&buf[..len]);
|
||||
self.consume(len);
|
||||
Ok(len)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> BufRead for WindowedStream<T>
|
||||
where T: BufRead + Seek
|
||||
{
|
||||
#[inline]
|
||||
fn fill_buf(&mut self) -> io::Result<&[u8]> {
|
||||
let limit = self.end.saturating_sub(self.pos);
|
||||
if limit == 0 {
|
||||
return Ok(&[]);
|
||||
}
|
||||
let buf = self.base.fill_buf()?;
|
||||
let max = (buf.len() as u64).min(limit) as usize;
|
||||
Ok(&buf[..max])
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn consume(&mut self, amt: usize) {
|
||||
self.base.consume(amt);
|
||||
self.pos += amt as u64;
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Seek for WindowedStream<T>
|
||||
where T: BufRead + Seek
|
||||
{
|
||||
#[inline]
|
||||
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
|
||||
let mut pos = match pos {
|
||||
SeekFrom::Start(p) => self.begin + p,
|
||||
SeekFrom::End(p) => self.end.saturating_add_signed(p),
|
||||
SeekFrom::Current(p) => self.pos.saturating_add_signed(p),
|
||||
};
|
||||
if pos < self.begin {
|
||||
pos = self.begin;
|
||||
} else if pos > self.end {
|
||||
pos = self.end;
|
||||
}
|
||||
let result = self.base.seek(SeekFrom::Start(pos))?;
|
||||
self.pos = result;
|
||||
Ok(result - self.begin)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn stream_position(&mut self) -> io::Result<u64> { Ok(self.pos) }
|
||||
}
|
|
@ -1,46 +1,53 @@
|
|||
//! Wii disc types.
|
||||
|
||||
use std::{
|
||||
ffi::CStr,
|
||||
io,
|
||||
io::{BufRead, Read, Seek, SeekFrom},
|
||||
io::{BufRead, Seek, SeekFrom},
|
||||
mem::size_of,
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use sha1::{Digest, Sha1};
|
||||
use zerocopy::{big_endian::*, FromBytes, FromZeros, Immutable, IntoBytes, KnownLayout};
|
||||
use zerocopy::{big_endian::*, FromBytes, Immutable, IntoBytes, KnownLayout};
|
||||
|
||||
use super::{
|
||||
gcn::{read_part_meta, PartitionGC},
|
||||
DiscHeader, FileStream, Node, PartitionBase, PartitionMeta, SECTOR_SIZE,
|
||||
};
|
||||
use crate::{
|
||||
array_ref,
|
||||
disc::streams::OwnedFileStream,
|
||||
io::{
|
||||
aes_cbc_decrypt,
|
||||
block::{Block, BlockIO, PartitionInfo},
|
||||
HashBytes, KeyBytes,
|
||||
common::{HashBytes, KeyBytes, PartitionInfo},
|
||||
disc::{
|
||||
gcn::{read_part_meta, PartitionReaderGC},
|
||||
hashes::sha1_hash,
|
||||
preloader::{Preloader, SectorGroup, SectorGroupRequest},
|
||||
SECTOR_GROUP_SIZE, SECTOR_SIZE,
|
||||
},
|
||||
static_assert,
|
||||
util::{div_rem, read::read_box_slice},
|
||||
Error, PartitionOptions, Result, ResultContext,
|
||||
io::block::BlockReader,
|
||||
read::{PartitionEncryption, PartitionMeta, PartitionOptions, PartitionReader},
|
||||
util::{
|
||||
aes::aes_cbc_decrypt,
|
||||
array_ref, div_rem, impl_read_for_bufread,
|
||||
read::{read_arc, read_arc_slice},
|
||||
static_assert,
|
||||
},
|
||||
Error, Result, ResultContext,
|
||||
};
|
||||
|
||||
/// Size in bytes of the hashes block in a Wii disc sector
|
||||
pub(crate) const HASHES_SIZE: usize = 0x400;
|
||||
pub const HASHES_SIZE: usize = 0x400;
|
||||
|
||||
/// Size in bytes of the data block in a Wii disc sector (excluding hashes)
|
||||
pub(crate) const SECTOR_DATA_SIZE: usize = SECTOR_SIZE - HASHES_SIZE; // 0x7C00
|
||||
pub const SECTOR_DATA_SIZE: usize = SECTOR_SIZE - HASHES_SIZE; // 0x7C00
|
||||
|
||||
/// Size of the disc region info (region.bin)
|
||||
/// Size in bytes of the disc region info (region.bin)
|
||||
pub const REGION_SIZE: usize = 0x20;
|
||||
|
||||
/// Size in bytes of the H3 table (h3.bin)
|
||||
pub const H3_TABLE_SIZE: usize = 0x18000;
|
||||
|
||||
/// Offset of the disc region info
|
||||
pub const REGION_OFFSET: u64 = 0x4E000;
|
||||
|
||||
// ppki (Retail)
|
||||
const RVL_CERT_ISSUER_PPKI_TICKET: &str = "Root-CA00000001-XS00000003";
|
||||
pub(crate) const RVL_CERT_ISSUER_PPKI_TICKET: &str = "Root-CA00000001-XS00000003";
|
||||
#[rustfmt::skip]
|
||||
const RETAIL_COMMON_KEYS: [KeyBytes; 3] = [
|
||||
pub(crate) const RETAIL_COMMON_KEYS: [KeyBytes; 3] = [
|
||||
/* RVL_KEY_RETAIL */
|
||||
[0xeb, 0xe4, 0x2a, 0x22, 0x5e, 0x85, 0x93, 0xe4, 0x48, 0xd9, 0xc5, 0x45, 0x73, 0x81, 0xaa, 0xf7],
|
||||
/* RVL_KEY_KOREAN */
|
||||
|
@ -50,9 +57,9 @@ const RETAIL_COMMON_KEYS: [KeyBytes; 3] = [
|
|||
];
|
||||
|
||||
// dpki (Debug)
|
||||
const RVL_CERT_ISSUER_DPKI_TICKET: &str = "Root-CA00000002-XS00000006";
|
||||
pub(crate) const RVL_CERT_ISSUER_DPKI_TICKET: &str = "Root-CA00000002-XS00000006";
|
||||
#[rustfmt::skip]
|
||||
const DEBUG_COMMON_KEYS: [KeyBytes; 3] = [
|
||||
pub(crate) const DEBUG_COMMON_KEYS: [KeyBytes; 3] = [
|
||||
/* RVL_KEY_DEBUG */
|
||||
[0xa1, 0x60, 0x4a, 0x6a, 0x71, 0x23, 0xb5, 0x29, 0xae, 0x8b, 0xec, 0x32, 0xc8, 0x16, 0xfc, 0xaa],
|
||||
/* RVL_KEY_KOREAN_DEBUG */
|
||||
|
@ -159,7 +166,6 @@ static_assert!(size_of::<Ticket>() == 0x2A4);
|
|||
|
||||
impl Ticket {
|
||||
/// Decrypts the ticket title key using the appropriate common key
|
||||
#[allow(clippy::missing_inline_in_public_items)]
|
||||
pub fn decrypt_title_key(&self) -> Result<KeyBytes> {
|
||||
let mut iv: KeyBytes = [0; 16];
|
||||
iv[..8].copy_from_slice(&self.title_id);
|
||||
|
@ -249,11 +255,11 @@ pub struct ContentMetadata {
|
|||
|
||||
static_assert!(size_of::<ContentMetadata>() == 0x24);
|
||||
|
||||
pub const H3_TABLE_SIZE: usize = 0x18000;
|
||||
|
||||
/// Wii partition header.
|
||||
#[derive(Debug, Clone, PartialEq, FromBytes, IntoBytes, Immutable, KnownLayout)]
|
||||
#[repr(C, align(4))]
|
||||
pub struct WiiPartitionHeader {
|
||||
/// Ticket
|
||||
pub ticket: Ticket,
|
||||
tmd_size: U32,
|
||||
tmd_off: U32,
|
||||
|
@ -267,172 +273,146 @@ pub struct WiiPartitionHeader {
|
|||
static_assert!(size_of::<WiiPartitionHeader>() == 0x2C0);
|
||||
|
||||
impl WiiPartitionHeader {
|
||||
/// TMD size in bytes
|
||||
pub fn tmd_size(&self) -> u64 { self.tmd_size.get() as u64 }
|
||||
|
||||
/// TMD offset in bytes (relative to the partition start)
|
||||
pub fn tmd_off(&self) -> u64 { (self.tmd_off.get() as u64) << 2 }
|
||||
|
||||
/// Certificate chain size in bytes
|
||||
pub fn cert_chain_size(&self) -> u64 { self.cert_chain_size.get() as u64 }
|
||||
|
||||
/// Certificate chain offset in bytes (relative to the partition start)
|
||||
pub fn cert_chain_off(&self) -> u64 { (self.cert_chain_off.get() as u64) << 2 }
|
||||
|
||||
/// H3 table offset in bytes (relative to the partition start)
|
||||
pub fn h3_table_off(&self) -> u64 { (self.h3_table_off.get() as u64) << 2 }
|
||||
|
||||
/// H3 table size in bytes (always H3_TABLE_SIZE)
|
||||
pub fn h3_table_size(&self) -> u64 { H3_TABLE_SIZE as u64 }
|
||||
|
||||
/// Data offset in bytes (relative to the partition start)
|
||||
pub fn data_off(&self) -> u64 { (self.data_off.get() as u64) << 2 }
|
||||
|
||||
/// Data size in bytes
|
||||
pub fn data_size(&self) -> u64 { (self.data_size.get() as u64) << 2 }
|
||||
}
|
||||
|
||||
pub struct PartitionWii {
|
||||
io: Box<dyn BlockIO>,
|
||||
pub(crate) struct PartitionReaderWii {
|
||||
io: Box<dyn BlockReader>,
|
||||
preloader: Arc<Preloader>,
|
||||
partition: PartitionInfo,
|
||||
block: Block,
|
||||
block_buf: Box<[u8]>,
|
||||
block_idx: u32,
|
||||
sector_buf: Box<[u8; SECTOR_SIZE]>,
|
||||
sector: u32,
|
||||
pos: u64,
|
||||
options: PartitionOptions,
|
||||
raw_tmd: Option<Box<[u8]>>,
|
||||
raw_cert_chain: Option<Box<[u8]>>,
|
||||
raw_h3_table: Option<Box<[u8]>>,
|
||||
sector_group: Option<SectorGroup>,
|
||||
meta: Option<PartitionMeta>,
|
||||
}
|
||||
|
||||
impl Clone for PartitionWii {
|
||||
impl Clone for PartitionReaderWii {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
io: self.io.clone(),
|
||||
preloader: self.preloader.clone(),
|
||||
partition: self.partition.clone(),
|
||||
block: Block::default(),
|
||||
block_buf: <[u8]>::new_box_zeroed_with_elems(self.block_buf.len()).unwrap(),
|
||||
block_idx: u32::MAX,
|
||||
sector_buf: <[u8; SECTOR_SIZE]>::new_box_zeroed().unwrap(),
|
||||
sector: u32::MAX,
|
||||
pos: 0,
|
||||
options: self.options.clone(),
|
||||
raw_tmd: self.raw_tmd.clone(),
|
||||
raw_cert_chain: self.raw_cert_chain.clone(),
|
||||
raw_h3_table: self.raw_h3_table.clone(),
|
||||
sector_group: None,
|
||||
meta: self.meta.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PartitionWii {
|
||||
impl PartitionReaderWii {
|
||||
pub fn new(
|
||||
inner: Box<dyn BlockIO>,
|
||||
disc_header: Box<DiscHeader>,
|
||||
io: Box<dyn BlockReader>,
|
||||
preloader: Arc<Preloader>,
|
||||
partition: &PartitionInfo,
|
||||
options: &PartitionOptions,
|
||||
) -> Result<Box<Self>> {
|
||||
let block_size = inner.block_size();
|
||||
let mut reader = PartitionGC::new(inner, disc_header)?;
|
||||
|
||||
// Read TMD, cert chain, and H3 table
|
||||
let offset = partition.start_sector as u64 * SECTOR_SIZE as u64;
|
||||
let raw_tmd = if partition.header.tmd_size() != 0 {
|
||||
reader
|
||||
.seek(SeekFrom::Start(offset + partition.header.tmd_off()))
|
||||
.context("Seeking to TMD offset")?;
|
||||
Some(
|
||||
read_box_slice::<u8, _>(&mut reader, partition.header.tmd_size() as usize)
|
||||
.context("Reading TMD")?,
|
||||
)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let raw_cert_chain = if partition.header.cert_chain_size() != 0 {
|
||||
reader
|
||||
.seek(SeekFrom::Start(offset + partition.header.cert_chain_off()))
|
||||
.context("Seeking to cert chain offset")?;
|
||||
Some(
|
||||
read_box_slice::<u8, _>(&mut reader, partition.header.cert_chain_size() as usize)
|
||||
.context("Reading cert chain")?,
|
||||
)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let raw_h3_table = if partition.has_hashes {
|
||||
reader
|
||||
.seek(SeekFrom::Start(offset + partition.header.h3_table_off()))
|
||||
.context("Seeking to H3 table offset")?;
|
||||
Some(read_box_slice::<u8, _>(&mut reader, H3_TABLE_SIZE).context("Reading H3 table")?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(Box::new(Self {
|
||||
io: reader.into_inner(),
|
||||
let mut reader = Self {
|
||||
io,
|
||||
preloader,
|
||||
partition: partition.clone(),
|
||||
block: Block::default(),
|
||||
block_buf: <[u8]>::new_box_zeroed_with_elems(block_size as usize)?,
|
||||
block_idx: u32::MAX,
|
||||
sector_buf: <[u8; SECTOR_SIZE]>::new_box_zeroed()?,
|
||||
sector: u32::MAX,
|
||||
pos: 0,
|
||||
options: options.clone(),
|
||||
raw_tmd,
|
||||
raw_cert_chain,
|
||||
raw_h3_table,
|
||||
}))
|
||||
sector_group: None,
|
||||
meta: None,
|
||||
};
|
||||
if options.validate_hashes {
|
||||
// Ensure we cache the H3 table
|
||||
reader.meta()?;
|
||||
}
|
||||
Ok(Box::new(reader))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn len(&self) -> u64 { self.partition.data_size() }
|
||||
}
|
||||
|
||||
impl BufRead for PartitionWii {
|
||||
impl BufRead for PartitionReaderWii {
|
||||
fn fill_buf(&mut self) -> io::Result<&[u8]> {
|
||||
let part_sector = if self.partition.has_hashes {
|
||||
(self.pos / SECTOR_DATA_SIZE as u64) as u32
|
||||
let (part_sector, sector_offset) = if self.partition.has_hashes {
|
||||
(
|
||||
(self.pos / SECTOR_DATA_SIZE as u64) as u32,
|
||||
(self.pos % SECTOR_DATA_SIZE as u64) as usize,
|
||||
)
|
||||
} else {
|
||||
(self.pos / SECTOR_SIZE as u64) as u32
|
||||
((self.pos / SECTOR_SIZE as u64) as u32, (self.pos % SECTOR_SIZE as u64) as usize)
|
||||
};
|
||||
let abs_sector = self.partition.data_start_sector + part_sector;
|
||||
if abs_sector >= self.partition.data_end_sector {
|
||||
return Ok(&[]);
|
||||
}
|
||||
|
||||
// Read new block if necessary
|
||||
let block_idx =
|
||||
(abs_sector as u64 * SECTOR_SIZE as u64 / self.block_buf.len() as u64) as u32;
|
||||
if block_idx != self.block_idx {
|
||||
self.block = self.io.read_block(
|
||||
self.block_buf.as_mut(),
|
||||
block_idx,
|
||||
self.partition.has_encryption.then_some(&self.partition),
|
||||
)?;
|
||||
self.block_idx = block_idx;
|
||||
}
|
||||
let group_idx = part_sector / 64;
|
||||
let group_sector = part_sector % 64;
|
||||
|
||||
// Decrypt sector if necessary
|
||||
if abs_sector != self.sector {
|
||||
if self.partition.has_encryption {
|
||||
self.block.decrypt(
|
||||
self.sector_buf.as_mut(),
|
||||
self.block_buf.as_ref(),
|
||||
abs_sector,
|
||||
&self.partition,
|
||||
)?;
|
||||
let max_groups =
|
||||
(self.partition.data_end_sector - self.partition.data_start_sector).div_ceil(64);
|
||||
let request = SectorGroupRequest {
|
||||
group_idx,
|
||||
partition_idx: Some(self.partition.index as u8),
|
||||
mode: if self.options.validate_hashes {
|
||||
PartitionEncryption::ForceDecrypted
|
||||
} else {
|
||||
self.block.copy_raw(
|
||||
self.sector_buf.as_mut(),
|
||||
self.block_buf.as_ref(),
|
||||
abs_sector,
|
||||
&self.partition.disc_header,
|
||||
)?;
|
||||
}
|
||||
PartitionEncryption::ForceDecryptedNoHashes
|
||||
},
|
||||
};
|
||||
let sector_group = if matches!(&self.sector_group, Some(sector_group) if sector_group.request == request)
|
||||
{
|
||||
// We can improve this in Rust 2024 with `if_let_rescope`
|
||||
// https://github.com/rust-lang/rust/issues/124085
|
||||
self.sector_group.as_ref().unwrap()
|
||||
} else {
|
||||
let sector_group = self.preloader.fetch(request, max_groups)?;
|
||||
if self.options.validate_hashes {
|
||||
if let Some(h3_table) = self.raw_h3_table.as_deref() {
|
||||
verify_hashes(self.sector_buf.as_ref(), part_sector, h3_table)?;
|
||||
if let Some(h3_table) = self.meta.as_ref().and_then(|m| m.raw_h3_table.as_deref()) {
|
||||
verify_hashes(
|
||||
array_ref![sector_group.data, 0, SECTOR_GROUP_SIZE],
|
||||
group_idx,
|
||||
h3_table,
|
||||
)?;
|
||||
}
|
||||
}
|
||||
self.sector = abs_sector;
|
||||
}
|
||||
self.sector_group.insert(sector_group)
|
||||
};
|
||||
|
||||
// Read from sector group buffer
|
||||
let consecutive_sectors = sector_group.consecutive_sectors(group_sector);
|
||||
if consecutive_sectors == 0 {
|
||||
return Ok(&[]);
|
||||
}
|
||||
let group_sector_offset = group_sector as usize * SECTOR_SIZE;
|
||||
if self.partition.has_hashes {
|
||||
let offset = (self.pos % SECTOR_DATA_SIZE as u64) as usize;
|
||||
Ok(&self.sector_buf[HASHES_SIZE + offset..])
|
||||
// Read until end of sector (avoid the next hash block)
|
||||
let offset = group_sector_offset + HASHES_SIZE + sector_offset;
|
||||
let end = group_sector_offset + SECTOR_SIZE;
|
||||
Ok(§or_group.data[offset..end])
|
||||
} else {
|
||||
let offset = (self.pos % SECTOR_SIZE as u64) as usize;
|
||||
Ok(&self.sector_buf[offset..])
|
||||
// Read until end of sector group (no hashes)
|
||||
let offset = group_sector_offset + sector_offset;
|
||||
let end = (group_sector + consecutive_sectors) as usize * SECTOR_SIZE;
|
||||
Ok(§or_group.data[offset..end])
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -440,133 +420,130 @@ impl BufRead for PartitionWii {
|
|||
fn consume(&mut self, amt: usize) { self.pos += amt as u64; }
|
||||
}
|
||||
|
||||
impl Read for PartitionWii {
|
||||
#[inline]
|
||||
fn read(&mut self, out: &mut [u8]) -> io::Result<usize> {
|
||||
let buf = self.fill_buf()?;
|
||||
let len = buf.len().min(out.len());
|
||||
out[..len].copy_from_slice(&buf[..len]);
|
||||
self.consume(len);
|
||||
Ok(len)
|
||||
}
|
||||
}
|
||||
impl_read_for_bufread!(PartitionReaderWii);
|
||||
|
||||
impl Seek for PartitionWii {
|
||||
impl Seek for PartitionReaderWii {
|
||||
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
|
||||
self.pos = match pos {
|
||||
SeekFrom::Start(v) => v,
|
||||
SeekFrom::End(_) => {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Unsupported,
|
||||
"WiiPartitionReader: SeekFrom::End is not supported".to_string(),
|
||||
));
|
||||
}
|
||||
SeekFrom::End(v) => self.len().saturating_add_signed(v),
|
||||
SeekFrom::Current(v) => self.pos.saturating_add_signed(v),
|
||||
};
|
||||
Ok(self.pos)
|
||||
}
|
||||
|
||||
fn stream_position(&mut self) -> io::Result<u64> { Ok(self.pos) }
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub(crate) fn as_digest(slice: &[u8; 20]) -> digest::Output<Sha1> { (*slice).into() }
|
||||
fn verify_hashes(buf: &[u8; SECTOR_GROUP_SIZE], group_idx: u32, h3_table: &[u8]) -> io::Result<()> {
|
||||
for sector in 0..64 {
|
||||
let buf = array_ref![buf, sector * SECTOR_SIZE, SECTOR_SIZE];
|
||||
let part_sector = group_idx * 64 + sector as u32;
|
||||
let (cluster, sector) = div_rem(part_sector as usize, 8);
|
||||
let (group, sub_group) = div_rem(cluster, 8);
|
||||
|
||||
fn verify_hashes(buf: &[u8; SECTOR_SIZE], part_sector: u32, h3_table: &[u8]) -> io::Result<()> {
|
||||
let (cluster, sector) = div_rem(part_sector as usize, 8);
|
||||
let (group, sub_group) = div_rem(cluster, 8);
|
||||
|
||||
// H0 hashes
|
||||
for i in 0..31 {
|
||||
let mut hash = Sha1::new();
|
||||
hash.update(array_ref![buf, (i + 1) * 0x400, 0x400]);
|
||||
let expected = as_digest(array_ref![buf, i * 20, 20]);
|
||||
let output = hash.finalize();
|
||||
if output != expected {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!("Invalid H0 hash! (block {:?}) {:x}\n\texpected {:x}", i, output, expected),
|
||||
));
|
||||
// H0 hashes
|
||||
for i in 0..31 {
|
||||
let expected = array_ref![buf, i * 20, 20];
|
||||
let output = sha1_hash(array_ref![buf, (i + 1) * 0x400, 0x400]);
|
||||
if output != *expected {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!("Invalid H0 hash! (block {i})"),
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// H1 hash
|
||||
{
|
||||
let mut hash = Sha1::new();
|
||||
hash.update(array_ref![buf, 0, 0x26C]);
|
||||
let expected = as_digest(array_ref![buf, 0x280 + sector * 20, 20]);
|
||||
let output = hash.finalize();
|
||||
if output != expected {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!(
|
||||
"Invalid H1 hash! (subgroup {:?}) {:x}\n\texpected {:x}",
|
||||
sector, output, expected
|
||||
),
|
||||
));
|
||||
// H1 hash
|
||||
{
|
||||
let expected = array_ref![buf, 0x280 + sector * 20, 20];
|
||||
let output = sha1_hash(array_ref![buf, 0, 0x26C]);
|
||||
if output != *expected {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!("Invalid H1 hash! (sector {sector})",),
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// H2 hash
|
||||
{
|
||||
let mut hash = Sha1::new();
|
||||
hash.update(array_ref![buf, 0x280, 0xA0]);
|
||||
let expected = as_digest(array_ref![buf, 0x340 + sub_group * 20, 20]);
|
||||
let output = hash.finalize();
|
||||
if output != expected {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!(
|
||||
"Invalid H2 hash! (group {:?}) {:x}\n\texpected {:x}",
|
||||
sub_group, output, expected
|
||||
),
|
||||
));
|
||||
// H2 hash
|
||||
{
|
||||
let expected = array_ref![buf, 0x340 + sub_group * 20, 20];
|
||||
let output = sha1_hash(array_ref![buf, 0x280, 0xA0]);
|
||||
if output != *expected {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!("Invalid H2 hash! (subgroup {sub_group})"),
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// H3 hash
|
||||
{
|
||||
let mut hash = Sha1::new();
|
||||
hash.update(array_ref![buf, 0x340, 0xA0]);
|
||||
let expected = as_digest(array_ref![h3_table, group * 20, 20]);
|
||||
let output = hash.finalize();
|
||||
if output != expected {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!("Invalid H3 hash! {:x}\n\texpected {:x}", output, expected),
|
||||
));
|
||||
// H3 hash
|
||||
{
|
||||
let expected = array_ref![h3_table, group * 20, 20];
|
||||
let output = sha1_hash(array_ref![buf, 0x340, 0xA0]);
|
||||
if output != *expected {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!("Invalid H3 hash! (group {group})"),
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
impl PartitionBase for PartitionWii {
|
||||
fn meta(&mut self) -> Result<Box<PartitionMeta>> {
|
||||
impl PartitionReader for PartitionReaderWii {
|
||||
fn is_wii(&self) -> bool { true }
|
||||
|
||||
fn meta(&mut self) -> Result<PartitionMeta> {
|
||||
if let Some(meta) = &self.meta {
|
||||
return Ok(meta.clone());
|
||||
}
|
||||
self.seek(SeekFrom::Start(0)).context("Seeking to partition header")?;
|
||||
let mut meta = read_part_meta(self, true)?;
|
||||
meta.raw_ticket = Some(Box::from(self.partition.header.ticket.as_bytes()));
|
||||
meta.raw_tmd = self.raw_tmd.clone();
|
||||
meta.raw_cert_chain = self.raw_cert_chain.clone();
|
||||
meta.raw_h3_table = self.raw_h3_table.clone();
|
||||
meta.raw_ticket = Some(Arc::from(self.partition.header.ticket.as_bytes()));
|
||||
|
||||
// Read TMD, cert chain, and H3 table
|
||||
let mut reader = PartitionReaderGC::new(self.io.clone(), self.preloader.clone(), u64::MAX)?;
|
||||
let offset = self.partition.start_sector as u64 * SECTOR_SIZE as u64;
|
||||
meta.raw_tmd = if self.partition.header.tmd_size() != 0 {
|
||||
reader
|
||||
.seek(SeekFrom::Start(offset + self.partition.header.tmd_off()))
|
||||
.context("Seeking to TMD offset")?;
|
||||
Some(
|
||||
read_arc_slice::<u8, _>(&mut reader, self.partition.header.tmd_size() as usize)
|
||||
.context("Reading TMD")?,
|
||||
)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
meta.raw_cert_chain = if self.partition.header.cert_chain_size() != 0 {
|
||||
reader
|
||||
.seek(SeekFrom::Start(offset + self.partition.header.cert_chain_off()))
|
||||
.context("Seeking to cert chain offset")?;
|
||||
Some(
|
||||
read_arc_slice::<u8, _>(
|
||||
&mut reader,
|
||||
self.partition.header.cert_chain_size() as usize,
|
||||
)
|
||||
.context("Reading cert chain")?,
|
||||
)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
meta.raw_h3_table = if self.partition.has_hashes {
|
||||
reader
|
||||
.seek(SeekFrom::Start(offset + self.partition.header.h3_table_off()))
|
||||
.context("Seeking to H3 table offset")?;
|
||||
|
||||
Some(read_arc::<[u8; H3_TABLE_SIZE], _>(&mut reader).context("Reading H3 table")?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
self.meta = Some(meta.clone());
|
||||
Ok(meta)
|
||||
}
|
||||
|
||||
fn open_file(&mut self, node: Node) -> io::Result<FileStream> {
|
||||
if !node.is_file() {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"Node is not a file".to_string(),
|
||||
));
|
||||
}
|
||||
FileStream::new(self, node.offset(true), node.length())
|
||||
}
|
||||
|
||||
fn into_open_file(self: Box<Self>, node: Node) -> io::Result<OwnedFileStream> {
|
||||
if !node.is_file() {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"Node is not a file".to_string(),
|
||||
));
|
||||
}
|
||||
OwnedFileStream::new(self, node.offset(true), node.length())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,273 @@
|
|||
use std::{
|
||||
io,
|
||||
io::{BufRead, Read},
|
||||
};
|
||||
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use dyn_clone::DynClone;
|
||||
use rayon::prelude::*;
|
||||
|
||||
use crate::{
|
||||
common::PartitionInfo,
|
||||
disc::{
|
||||
reader::DiscReader,
|
||||
wii::{HASHES_SIZE, SECTOR_DATA_SIZE},
|
||||
SECTOR_SIZE,
|
||||
},
|
||||
util::{aes::decrypt_sector_b2b, array_ref, array_ref_mut, lfg::LaggedFibonacci},
|
||||
write::{DiscFinalization, DiscWriterWeight, ProcessOptions},
|
||||
Error, Result, ResultContext,
|
||||
};
|
||||
|
||||
/// A callback for writing disc data.
|
||||
///
|
||||
/// The callback should write all data to the output stream before returning, or return an error if
|
||||
/// writing fails. The second and third arguments are the current bytes processed and the total
|
||||
/// bytes to process, respectively. For most formats, this has no relation to the written disc size,
|
||||
/// but can be used to display progress.
|
||||
pub type DataCallback<'a> = dyn FnMut(Bytes, u64, u64) -> io::Result<()> + Send + 'a;
|
||||
|
||||
/// A trait for writing disc images.
|
||||
pub trait DiscWriter: DynClone {
|
||||
/// Processes the disc writer to completion.
|
||||
///
|
||||
/// The data callback will be called, in order, for each block of data to write to the output
|
||||
/// file. The callback should write all data before returning, or return an error if writing
|
||||
/// fails.
|
||||
fn process(
|
||||
&self,
|
||||
data_callback: &mut DataCallback,
|
||||
options: &ProcessOptions,
|
||||
) -> Result<DiscFinalization>;
|
||||
|
||||
/// Returns the progress upper bound for the disc writer.
|
||||
///
|
||||
/// For most formats, this has no relation to the written disc size, but can be used to display
|
||||
/// progress.
|
||||
fn progress_bound(&self) -> u64;
|
||||
|
||||
/// Returns the weight of the disc writer.
|
||||
///
|
||||
/// This can help determine the number of threads to dedicate for output processing, and may
|
||||
/// differ based on the format's configuration, such as whether compression is enabled.
|
||||
fn weight(&self) -> DiscWriterWeight;
|
||||
}
|
||||
|
||||
dyn_clone::clone_trait_object!(DiscWriter);
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct BlockResult<T> {
|
||||
/// Input block index
|
||||
pub block_idx: u32,
|
||||
/// Input disc data (before processing)
|
||||
pub disc_data: Bytes,
|
||||
/// Output block data (after processing). If None, the disc data is used.
|
||||
pub block_data: Bytes,
|
||||
/// Output metadata
|
||||
pub meta: T,
|
||||
}
|
||||
|
||||
pub trait BlockProcessor: Clone + Send + Sync {
|
||||
type BlockMeta;
|
||||
|
||||
fn process_block(&mut self, block_idx: u32) -> io::Result<BlockResult<Self::BlockMeta>>;
|
||||
}
|
||||
|
||||
pub fn read_block(reader: &mut DiscReader, block_size: usize) -> io::Result<(Bytes, Bytes)> {
|
||||
let initial_block = reader.fill_buf_internal()?;
|
||||
if initial_block.len() >= block_size {
|
||||
// Happy path: we have a full block that we can cheaply slice
|
||||
let data = initial_block.slice(0..block_size);
|
||||
reader.consume(block_size);
|
||||
return Ok((data.clone(), data));
|
||||
} else if initial_block.is_empty() {
|
||||
return Err(io::Error::from(io::ErrorKind::UnexpectedEof));
|
||||
}
|
||||
reader.consume(initial_block.len());
|
||||
|
||||
// Combine smaller blocks into a new buffer
|
||||
let mut buf = BytesMut::zeroed(block_size);
|
||||
let mut len = initial_block.len();
|
||||
buf[..len].copy_from_slice(initial_block.as_ref());
|
||||
drop(initial_block);
|
||||
while len < block_size {
|
||||
let read = reader.read(&mut buf[len..])?;
|
||||
if read == 0 {
|
||||
break;
|
||||
}
|
||||
len += read;
|
||||
}
|
||||
// The block data is full size, padded with zeroes
|
||||
let block_data = buf.freeze();
|
||||
// The disc data is the actual data read, without padding
|
||||
let disc_data = block_data.slice(0..len);
|
||||
Ok((block_data, disc_data))
|
||||
}
|
||||
|
||||
/// Process blocks in parallel, ensuring that they are written in order.
|
||||
pub(crate) fn par_process<P, T>(
|
||||
create_processor: impl Fn() -> P + Sync,
|
||||
block_count: u32,
|
||||
num_threads: usize,
|
||||
mut callback: impl FnMut(BlockResult<T>) -> Result<()> + Send,
|
||||
) -> Result<()>
|
||||
where
|
||||
T: Send,
|
||||
P: BlockProcessor<BlockMeta = T>,
|
||||
{
|
||||
if num_threads == 0 {
|
||||
// Fall back to single-threaded processing
|
||||
let mut processor = create_processor();
|
||||
for block_idx in 0..block_count {
|
||||
let block = processor
|
||||
.process_block(block_idx)
|
||||
.with_context(|| format!("Failed to process block {block_idx}"))?;
|
||||
callback(block)?;
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let (block_tx, block_rx) = crossbeam_channel::bounded(block_count as usize);
|
||||
for block_idx in 0..block_count {
|
||||
block_tx.send(block_idx).unwrap();
|
||||
}
|
||||
drop(block_tx); // Disconnect channel
|
||||
|
||||
let (result_tx, result_rx) = crossbeam_channel::bounded(0);
|
||||
let mut process_error = None;
|
||||
let mut write_error = None;
|
||||
rayon::join(
|
||||
|| {
|
||||
if let Err(e) = (0..num_threads).into_par_iter().try_for_each_init(
|
||||
|| (block_rx.clone(), result_tx.clone(), create_processor()),
|
||||
|(receiver, block_tx, processor), _| {
|
||||
while let Ok(block_idx) = receiver.recv() {
|
||||
let block = processor
|
||||
.process_block(block_idx)
|
||||
.with_context(|| format!("Failed to process block {block_idx}"))?;
|
||||
if block_tx.send(block).is_err() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Ok::<_, Error>(())
|
||||
},
|
||||
) {
|
||||
process_error = Some(e);
|
||||
}
|
||||
drop(result_tx); // Disconnect channel
|
||||
},
|
||||
|| {
|
||||
let mut current_block = 0;
|
||||
let mut out_of_order = Vec::<BlockResult<T>>::new();
|
||||
'outer: while let Ok(result) = result_rx.recv() {
|
||||
if result.block_idx == current_block {
|
||||
if let Err(e) = callback(result) {
|
||||
write_error = Some(e);
|
||||
break;
|
||||
}
|
||||
current_block += 1;
|
||||
// Check if any out of order blocks can be written
|
||||
while out_of_order.first().is_some_and(|r| r.block_idx == current_block) {
|
||||
let result = out_of_order.remove(0);
|
||||
if let Err(e) = callback(result) {
|
||||
write_error = Some(e);
|
||||
break 'outer;
|
||||
}
|
||||
current_block += 1;
|
||||
}
|
||||
} else {
|
||||
out_of_order.push(result);
|
||||
out_of_order.sort_unstable_by_key(|r| r.block_idx);
|
||||
}
|
||||
}
|
||||
},
|
||||
);
|
||||
if let Some(e) = process_error {
|
||||
return Err(e);
|
||||
}
|
||||
if let Some(e) = write_error {
|
||||
return Err(e);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// The determined block type.
|
||||
pub enum CheckBlockResult {
|
||||
Normal,
|
||||
Zeroed,
|
||||
Junk,
|
||||
}
|
||||
|
||||
/// Check if a block is zeroed or junk data.
|
||||
pub(crate) fn check_block(
|
||||
buf: &[u8],
|
||||
decrypted_block: &mut [u8],
|
||||
input_position: u64,
|
||||
partition_info: &[PartitionInfo],
|
||||
lfg: &mut LaggedFibonacci,
|
||||
disc_id: [u8; 4],
|
||||
disc_num: u8,
|
||||
) -> io::Result<CheckBlockResult> {
|
||||
let start_sector = (input_position / SECTOR_SIZE as u64) as u32;
|
||||
let end_sector = ((input_position + buf.len() as u64) / SECTOR_SIZE as u64) as u32;
|
||||
if let Some(partition) = partition_info.iter().find(|p| {
|
||||
p.has_hashes && start_sector >= p.data_start_sector && end_sector < p.data_end_sector
|
||||
}) {
|
||||
if input_position % SECTOR_SIZE as u64 != 0 {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
"Partition block not aligned to sector boundary",
|
||||
));
|
||||
}
|
||||
if buf.len() % SECTOR_SIZE != 0 {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
"Partition block not a multiple of sector size",
|
||||
));
|
||||
}
|
||||
let block = if partition.has_encryption {
|
||||
if decrypted_block.len() < buf.len() {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
"Decrypted block buffer too small",
|
||||
));
|
||||
}
|
||||
for i in 0..buf.len() / SECTOR_SIZE {
|
||||
decrypt_sector_b2b(
|
||||
array_ref![buf, SECTOR_SIZE * i, SECTOR_SIZE],
|
||||
array_ref_mut![decrypted_block, SECTOR_SIZE * i, SECTOR_SIZE],
|
||||
&partition.key,
|
||||
);
|
||||
}
|
||||
&decrypted_block[..buf.len()]
|
||||
} else {
|
||||
buf
|
||||
};
|
||||
if sector_data_iter(block).all(|sector_data| sector_data.iter().all(|&b| b == 0)) {
|
||||
return Ok(CheckBlockResult::Zeroed);
|
||||
}
|
||||
let partition_start = partition.data_start_sector as u64 * SECTOR_SIZE as u64;
|
||||
let partition_offset =
|
||||
((input_position - partition_start) / SECTOR_SIZE as u64) * SECTOR_DATA_SIZE as u64;
|
||||
if sector_data_iter(block).enumerate().all(|(i, sector_data)| {
|
||||
let sector_offset = partition_offset + i as u64 * SECTOR_DATA_SIZE as u64;
|
||||
lfg.check_sector_chunked(sector_data, disc_id, disc_num, sector_offset)
|
||||
}) {
|
||||
return Ok(CheckBlockResult::Junk);
|
||||
}
|
||||
} else {
|
||||
if buf.iter().all(|&b| b == 0) {
|
||||
return Ok(CheckBlockResult::Zeroed);
|
||||
}
|
||||
if lfg.check_sector_chunked(buf, disc_id, disc_num, input_position) {
|
||||
return Ok(CheckBlockResult::Junk);
|
||||
}
|
||||
}
|
||||
Ok(CheckBlockResult::Normal)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn sector_data_iter(buf: &[u8]) -> impl Iterator<Item = &[u8; SECTOR_DATA_SIZE]> {
|
||||
buf.chunks_exact(SECTOR_SIZE).map(|chunk| (&chunk[HASHES_SIZE..]).try_into().unwrap())
|
||||
}
|
|
@ -1,106 +1,45 @@
|
|||
use std::{
|
||||
fs, io,
|
||||
io::{Read, Seek},
|
||||
path::Path,
|
||||
};
|
||||
use std::{fs, io, io::Read, path::Path};
|
||||
|
||||
use dyn_clone::DynClone;
|
||||
use zerocopy::transmute_ref;
|
||||
|
||||
use crate::{
|
||||
array_ref,
|
||||
common::{Format, KeyBytes, MagicBytes, PartitionInfo},
|
||||
disc::{
|
||||
hashes::HashTable,
|
||||
wii::{WiiPartitionHeader, HASHES_SIZE, SECTOR_DATA_SIZE},
|
||||
DiscHeader, PartitionHeader, PartitionKind, GCN_MAGIC, SECTOR_SIZE, WII_MAGIC,
|
||||
wii::{HASHES_SIZE, SECTOR_DATA_SIZE},
|
||||
DiscHeader, GCN_MAGIC, SECTOR_SIZE, WII_MAGIC,
|
||||
},
|
||||
io::{
|
||||
aes_cbc_decrypt, aes_cbc_encrypt, split::SplitFileReader, DiscMeta, Format, KeyBytes,
|
||||
MagicBytes,
|
||||
split::SplitFileReader,
|
||||
wia::{WIAException, WIAExceptionList},
|
||||
},
|
||||
util::{lfg::LaggedFibonacci, read::read_from},
|
||||
read::{DiscMeta, DiscStream},
|
||||
util::{aes::decrypt_sector, array_ref, array_ref_mut, lfg::LaggedFibonacci, read::read_from},
|
||||
Error, Result, ResultContext,
|
||||
};
|
||||
|
||||
/// Required trait bounds for reading disc images.
|
||||
pub trait DiscStream: Read + Seek + DynClone + Send + Sync {}
|
||||
|
||||
impl<T> DiscStream for T where T: Read + Seek + DynClone + Send + Sync + ?Sized {}
|
||||
|
||||
dyn_clone::clone_trait_object!(DiscStream);
|
||||
|
||||
/// Block I/O trait for reading disc images.
|
||||
pub trait BlockIO: DynClone + Send + Sync {
|
||||
/// Reads a block from the disc image.
|
||||
fn read_block_internal(
|
||||
&mut self,
|
||||
out: &mut [u8],
|
||||
block: u32,
|
||||
partition: Option<&PartitionInfo>,
|
||||
) -> io::Result<Block>;
|
||||
|
||||
/// Reads a full block from the disc image, combining smaller blocks if necessary.
|
||||
fn read_block(
|
||||
&mut self,
|
||||
out: &mut [u8],
|
||||
block: u32,
|
||||
partition: Option<&PartitionInfo>,
|
||||
) -> io::Result<Block> {
|
||||
let block_size_internal = self.block_size_internal();
|
||||
let block_size = self.block_size();
|
||||
if block_size_internal == block_size {
|
||||
self.read_block_internal(out, block, partition)
|
||||
} else {
|
||||
let mut offset = 0usize;
|
||||
let mut result = None;
|
||||
let mut block_idx =
|
||||
((block as u64 * block_size as u64) / block_size_internal as u64) as u32;
|
||||
while offset < block_size as usize {
|
||||
let block = self.read_block_internal(
|
||||
&mut out[offset..offset + block_size_internal as usize],
|
||||
block_idx,
|
||||
partition,
|
||||
)?;
|
||||
if result.is_none() {
|
||||
result = Some(block);
|
||||
} else if result != Some(block) {
|
||||
if block == Block::Zero {
|
||||
out[offset..offset + block_size_internal as usize].fill(0);
|
||||
} else {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
"Inconsistent block types in split block",
|
||||
));
|
||||
}
|
||||
}
|
||||
offset += block_size_internal as usize;
|
||||
block_idx += 1;
|
||||
}
|
||||
Ok(result.unwrap_or_default())
|
||||
}
|
||||
}
|
||||
|
||||
/// The format's block size in bytes. Can be smaller than the sector size (0x8000).
|
||||
fn block_size_internal(&self) -> u32;
|
||||
/// Block reader trait for reading disc images.
|
||||
pub trait BlockReader: DynClone + Send + Sync {
|
||||
/// Reads a block from the disc image containing the specified sector.
|
||||
fn read_block(&mut self, out: &mut [u8], sector: u32) -> io::Result<Block>;
|
||||
|
||||
/// The block size used for processing. Must be a multiple of the sector size (0x8000).
|
||||
fn block_size(&self) -> u32 { self.block_size_internal().max(SECTOR_SIZE as u32) }
|
||||
fn block_size(&self) -> u32;
|
||||
|
||||
/// Returns extra metadata included in the disc file format, if any.
|
||||
fn meta(&self) -> DiscMeta;
|
||||
}
|
||||
|
||||
dyn_clone::clone_trait_object!(BlockIO);
|
||||
dyn_clone::clone_trait_object!(BlockReader);
|
||||
|
||||
/// Creates a new [`BlockIO`] instance from a stream.
|
||||
pub fn new(mut stream: Box<dyn DiscStream>) -> Result<Box<dyn BlockIO>> {
|
||||
let io: Box<dyn BlockIO> = match detect(stream.as_mut()).context("Detecting file type")? {
|
||||
Some(Format::Iso) => crate::io::iso::DiscIOISO::new(stream)?,
|
||||
Some(Format::Ciso) => crate::io::ciso::DiscIOCISO::new(stream)?,
|
||||
/// Creates a new [`BlockReader`] instance from a stream.
|
||||
pub fn new(mut stream: Box<dyn DiscStream>) -> Result<Box<dyn BlockReader>> {
|
||||
let io: Box<dyn BlockReader> = match detect(stream.as_mut()).context("Detecting file type")? {
|
||||
Some(Format::Iso) => crate::io::iso::BlockReaderISO::new(stream)?,
|
||||
Some(Format::Ciso) => crate::io::ciso::BlockReaderCISO::new(stream)?,
|
||||
Some(Format::Gcz) => {
|
||||
#[cfg(feature = "compress-zlib")]
|
||||
{
|
||||
crate::io::gcz::DiscIOGCZ::new(stream)?
|
||||
crate::io::gcz::BlockReaderGCZ::new(stream)?
|
||||
}
|
||||
#[cfg(not(feature = "compress-zlib"))]
|
||||
return Err(Error::DiscFormat("GCZ support is disabled".to_string()));
|
||||
|
@ -108,17 +47,17 @@ pub fn new(mut stream: Box<dyn DiscStream>) -> Result<Box<dyn BlockIO>> {
|
|||
Some(Format::Nfs) => {
|
||||
return Err(Error::DiscFormat("NFS requires a filesystem path".to_string()))
|
||||
}
|
||||
Some(Format::Wbfs) => crate::io::wbfs::DiscIOWBFS::new(stream)?,
|
||||
Some(Format::Wia | Format::Rvz) => crate::io::wia::DiscIOWIA::new(stream)?,
|
||||
Some(Format::Tgc) => crate::io::tgc::DiscIOTGC::new(stream)?,
|
||||
Some(Format::Wbfs) => crate::io::wbfs::BlockReaderWBFS::new(stream)?,
|
||||
Some(Format::Wia | Format::Rvz) => crate::io::wia::BlockReaderWIA::new(stream)?,
|
||||
Some(Format::Tgc) => crate::io::tgc::BlockReaderTGC::new(stream)?,
|
||||
None => return Err(Error::DiscFormat("Unknown disc format".to_string())),
|
||||
};
|
||||
check_block_size(io.as_ref())?;
|
||||
Ok(io)
|
||||
}
|
||||
|
||||
/// Creates a new [`BlockIO`] instance from a filesystem path.
|
||||
pub fn open(filename: &Path) -> Result<Box<dyn BlockIO>> {
|
||||
/// Creates a new [`BlockReader`] instance from a filesystem path.
|
||||
pub fn open(filename: &Path) -> Result<Box<dyn BlockReader>> {
|
||||
let path_result = fs::canonicalize(filename);
|
||||
if let Err(err) = path_result {
|
||||
return Err(Error::Io(format!("Failed to open {}", filename.display()), err));
|
||||
|
@ -132,28 +71,28 @@ pub fn open(filename: &Path) -> Result<Box<dyn BlockIO>> {
|
|||
return Err(Error::DiscFormat(format!("Input is not a file: {}", filename.display())));
|
||||
}
|
||||
let mut stream = Box::new(SplitFileReader::new(filename)?);
|
||||
let io: Box<dyn BlockIO> = match detect(stream.as_mut()).context("Detecting file type")? {
|
||||
Some(Format::Iso) => crate::io::iso::DiscIOISO::new(stream)?,
|
||||
Some(Format::Ciso) => crate::io::ciso::DiscIOCISO::new(stream)?,
|
||||
let io: Box<dyn BlockReader> = match detect(stream.as_mut()).context("Detecting file type")? {
|
||||
Some(Format::Iso) => crate::io::iso::BlockReaderISO::new(stream)?,
|
||||
Some(Format::Ciso) => crate::io::ciso::BlockReaderCISO::new(stream)?,
|
||||
Some(Format::Gcz) => {
|
||||
#[cfg(feature = "compress-zlib")]
|
||||
{
|
||||
crate::io::gcz::DiscIOGCZ::new(stream)?
|
||||
crate::io::gcz::BlockReaderGCZ::new(stream)?
|
||||
}
|
||||
#[cfg(not(feature = "compress-zlib"))]
|
||||
return Err(Error::DiscFormat("GCZ support is disabled".to_string()));
|
||||
}
|
||||
Some(Format::Nfs) => match path.parent() {
|
||||
Some(parent) if parent.is_dir() => {
|
||||
crate::io::nfs::DiscIONFS::new(path.parent().unwrap())?
|
||||
crate::io::nfs::BlockReaderNFS::new(path.parent().unwrap())?
|
||||
}
|
||||
_ => {
|
||||
return Err(Error::DiscFormat("Failed to locate NFS parent directory".to_string()));
|
||||
}
|
||||
},
|
||||
Some(Format::Tgc) => crate::io::tgc::DiscIOTGC::new(stream)?,
|
||||
Some(Format::Wbfs) => crate::io::wbfs::DiscIOWBFS::new(stream)?,
|
||||
Some(Format::Wia | Format::Rvz) => crate::io::wia::DiscIOWIA::new(stream)?,
|
||||
Some(Format::Tgc) => crate::io::tgc::BlockReaderTGC::new(stream)?,
|
||||
Some(Format::Wbfs) => crate::io::wbfs::BlockReaderWBFS::new(stream)?,
|
||||
Some(Format::Wia | Format::Rvz) => crate::io::wia::BlockReaderWIA::new(stream)?,
|
||||
None => return Err(Error::DiscFormat("Unknown disc format".to_string())),
|
||||
};
|
||||
check_block_size(io.as_ref())?;
|
||||
|
@ -163,7 +102,7 @@ pub fn open(filename: &Path) -> Result<Box<dyn BlockIO>> {
|
|||
pub const CISO_MAGIC: MagicBytes = *b"CISO";
|
||||
pub const GCZ_MAGIC: MagicBytes = [0x01, 0xC0, 0x0B, 0xB1];
|
||||
pub const NFS_MAGIC: MagicBytes = *b"EGGS";
|
||||
pub const TGC_MAGIC: MagicBytes = [0xae, 0x0f, 0x38, 0xa2];
|
||||
pub const TGC_MAGIC: MagicBytes = [0xAE, 0x0F, 0x38, 0xA2];
|
||||
pub const WBFS_MAGIC: MagicBytes = *b"WBFS";
|
||||
pub const WIA_MAGIC: MagicBytes = *b"WIA\x01";
|
||||
pub const RVZ_MAGIC: MagicBytes = *b"RVZ\x01";
|
||||
|
@ -190,16 +129,7 @@ pub fn detect<R: Read + ?Sized>(stream: &mut R) -> io::Result<Option<Format>> {
|
|||
Ok(out)
|
||||
}
|
||||
|
||||
fn check_block_size(io: &dyn BlockIO) -> Result<()> {
|
||||
if io.block_size_internal() < SECTOR_SIZE as u32
|
||||
&& SECTOR_SIZE as u32 % io.block_size_internal() != 0
|
||||
{
|
||||
return Err(Error::DiscFormat(format!(
|
||||
"Sector size {} is not divisible by block size {}",
|
||||
SECTOR_SIZE,
|
||||
io.block_size_internal(),
|
||||
)));
|
||||
}
|
||||
fn check_block_size(io: &dyn BlockReader) -> Result<()> {
|
||||
if io.block_size() % SECTOR_SIZE as u32 != 0 {
|
||||
return Err(Error::DiscFormat(format!(
|
||||
"Block size {} is not a multiple of sector size {}",
|
||||
|
@ -210,182 +140,263 @@ fn check_block_size(io: &dyn BlockIO) -> Result<()> {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// Wii partition information.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct PartitionInfo {
|
||||
/// The partition index.
|
||||
pub index: usize,
|
||||
/// The kind of disc partition.
|
||||
pub kind: PartitionKind,
|
||||
/// The start sector of the partition.
|
||||
pub start_sector: u32,
|
||||
/// The start sector of the partition's data.
|
||||
pub data_start_sector: u32,
|
||||
/// The end sector of the partition's data.
|
||||
pub data_end_sector: u32,
|
||||
/// The AES key for the partition, also known as the "title key".
|
||||
pub key: KeyBytes,
|
||||
/// The Wii partition header.
|
||||
pub header: Box<WiiPartitionHeader>,
|
||||
/// The disc header within the partition.
|
||||
pub disc_header: Box<DiscHeader>,
|
||||
/// The partition header within the partition.
|
||||
pub partition_header: Box<PartitionHeader>,
|
||||
/// The hash table for the partition, if rebuilt.
|
||||
pub hash_table: Option<HashTable>,
|
||||
/// Whether the partition data is encrypted
|
||||
pub has_encryption: bool,
|
||||
/// Whether the partition data hashes are present
|
||||
pub has_hashes: bool,
|
||||
}
|
||||
|
||||
/// The block kind returned by [`BlockIO::read_block`].
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
||||
pub enum Block {
|
||||
/// Raw data or encrypted Wii partition data
|
||||
Raw,
|
||||
/// Encrypted Wii partition data
|
||||
PartEncrypted,
|
||||
/// Decrypted Wii partition data
|
||||
PartDecrypted {
|
||||
/// Whether the sector has its hash block intact
|
||||
has_hashes: bool,
|
||||
},
|
||||
/// Wii partition junk data
|
||||
Junk,
|
||||
/// All zeroes
|
||||
#[default]
|
||||
Zero,
|
||||
/// A block of sectors within a disc image.
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct Block {
|
||||
/// The starting sector of the block.
|
||||
pub sector: u32,
|
||||
/// The number of sectors in the block.
|
||||
pub count: u32,
|
||||
/// The block kind.
|
||||
pub kind: BlockKind,
|
||||
/// Any hash exceptions for the block.
|
||||
pub hash_exceptions: Box<[WIAExceptionList]>,
|
||||
/// The duration of I/O operations, if available.
|
||||
pub io_duration: Option<std::time::Duration>,
|
||||
}
|
||||
|
||||
impl Block {
|
||||
/// Decrypts the block's data (if necessary) and writes it to the output buffer.
|
||||
pub(crate) fn decrypt(
|
||||
self,
|
||||
out: &mut [u8; SECTOR_SIZE],
|
||||
data: &[u8],
|
||||
abs_sector: u32,
|
||||
partition: &PartitionInfo,
|
||||
) -> io::Result<()> {
|
||||
let part_sector = abs_sector - partition.data_start_sector;
|
||||
match self {
|
||||
Block::Raw => {
|
||||
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, abs_sector)?);
|
||||
}
|
||||
Block::PartEncrypted => {
|
||||
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, abs_sector)?);
|
||||
decrypt_sector(out, partition);
|
||||
}
|
||||
Block::PartDecrypted { has_hashes } => {
|
||||
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, abs_sector)?);
|
||||
if !has_hashes {
|
||||
rebuild_hash_block(out, part_sector, partition);
|
||||
}
|
||||
}
|
||||
Block::Junk => {
|
||||
generate_junk(out, part_sector, Some(partition), &partition.disc_header);
|
||||
rebuild_hash_block(out, part_sector, partition);
|
||||
}
|
||||
Block::Zero => {
|
||||
out.fill(0);
|
||||
rebuild_hash_block(out, part_sector, partition);
|
||||
}
|
||||
/// Creates a new block from a block of sectors.
|
||||
#[inline]
|
||||
pub fn new(block_idx: u32, block_size: u32, kind: BlockKind) -> Self {
|
||||
let sectors_per_block = block_size / SECTOR_SIZE as u32;
|
||||
Self {
|
||||
sector: block_idx * sectors_per_block,
|
||||
count: sectors_per_block,
|
||||
kind,
|
||||
hash_exceptions: Default::default(),
|
||||
io_duration: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new block from a single sector.
|
||||
#[inline]
|
||||
pub fn sector(sector: u32, kind: BlockKind) -> Self {
|
||||
Self { sector, count: 1, kind, hash_exceptions: Default::default(), io_duration: None }
|
||||
}
|
||||
|
||||
/// Creates a new block from a range of sectors.
|
||||
#[inline]
|
||||
pub fn sectors(sector: u32, count: u32, kind: BlockKind) -> Self {
|
||||
Self { sector, count, kind, hash_exceptions: Default::default(), io_duration: None }
|
||||
}
|
||||
|
||||
/// Returns whether the block contains the specified sector.
|
||||
#[inline]
|
||||
pub fn contains(&self, sector: u32) -> bool {
|
||||
sector >= self.sector && sector < self.sector + self.count
|
||||
}
|
||||
|
||||
/// Returns an error if the block does not contain the specified sector.
|
||||
pub fn ensure_contains(&self, sector: u32) -> io::Result<()> {
|
||||
if !self.contains(sector) {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!(
|
||||
"Sector {} not in block range {}-{}",
|
||||
sector,
|
||||
self.sector,
|
||||
self.sector + self.count
|
||||
),
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Encrypts the block's data (if necessary) and writes it to the output buffer.
|
||||
pub(crate) fn encrypt(
|
||||
self,
|
||||
out: &mut [u8; SECTOR_SIZE],
|
||||
data: &[u8],
|
||||
abs_sector: u32,
|
||||
partition: &PartitionInfo,
|
||||
) -> io::Result<()> {
|
||||
let part_sector = abs_sector - partition.data_start_sector;
|
||||
match self {
|
||||
Block::Raw => {
|
||||
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, abs_sector)?);
|
||||
encrypt_sector(out, partition);
|
||||
}
|
||||
Block::PartEncrypted => {
|
||||
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, abs_sector)?);
|
||||
}
|
||||
Block::PartDecrypted { has_hashes } => {
|
||||
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, abs_sector)?);
|
||||
if !has_hashes {
|
||||
rebuild_hash_block(out, part_sector, partition);
|
||||
/// Decrypts block data in-place. The decrypted data can be accessed using
|
||||
/// [`partition_data`](Block::partition_data).
|
||||
pub(crate) fn decrypt_block(&self, data: &mut [u8], key: Option<KeyBytes>) -> io::Result<()> {
|
||||
match self.kind {
|
||||
BlockKind::None => {}
|
||||
BlockKind::Raw => {
|
||||
if let Some(key) = key {
|
||||
for i in 0..self.count as usize {
|
||||
decrypt_sector(array_ref_mut![data, i * SECTOR_SIZE, SECTOR_SIZE], &key);
|
||||
}
|
||||
}
|
||||
encrypt_sector(out, partition);
|
||||
}
|
||||
Block::Junk => {
|
||||
generate_junk(out, part_sector, Some(partition), &partition.disc_header);
|
||||
rebuild_hash_block(out, part_sector, partition);
|
||||
encrypt_sector(out, partition);
|
||||
BlockKind::PartDecrypted { .. } => {
|
||||
// no-op
|
||||
}
|
||||
Block::Zero => {
|
||||
out.fill(0);
|
||||
rebuild_hash_block(out, part_sector, partition);
|
||||
encrypt_sector(out, partition);
|
||||
BlockKind::Junk => {
|
||||
// unsupported, used for DirectDiscReader
|
||||
data.fill(0);
|
||||
}
|
||||
BlockKind::Zero => data.fill(0),
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Copies the block's raw data to the output buffer.
|
||||
pub(crate) fn copy_raw(
|
||||
self,
|
||||
/// Copies a sector's raw data to the output buffer. Returns whether the sector is encrypted
|
||||
/// and whether it has hashes.
|
||||
pub(crate) fn copy_sector(
|
||||
&self,
|
||||
out: &mut [u8; SECTOR_SIZE],
|
||||
data: &[u8],
|
||||
abs_sector: u32,
|
||||
disc_header: &DiscHeader,
|
||||
) -> io::Result<()> {
|
||||
match self {
|
||||
Block::Raw | Block::PartEncrypted | Block::PartDecrypted { .. } => {
|
||||
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, abs_sector)?);
|
||||
partition: Option<&PartitionInfo>,
|
||||
) -> io::Result<(bool, bool)> {
|
||||
let mut encrypted = false;
|
||||
let mut has_hashes = false;
|
||||
match self.kind {
|
||||
BlockKind::None => {}
|
||||
BlockKind::Raw => {
|
||||
*out = *self.sector_buf(data, abs_sector)?;
|
||||
if partition.is_some_and(|p| p.has_encryption) {
|
||||
encrypted = true;
|
||||
}
|
||||
if partition.is_some_and(|p| p.has_hashes) {
|
||||
has_hashes = true;
|
||||
}
|
||||
}
|
||||
Block::Junk => generate_junk(out, abs_sector, None, disc_header),
|
||||
Block::Zero => out.fill(0),
|
||||
BlockKind::PartDecrypted { hash_block } => {
|
||||
if hash_block {
|
||||
*out = *self.sector_buf(data, abs_sector)?;
|
||||
has_hashes = partition.is_some_and(|p| p.has_hashes);
|
||||
} else {
|
||||
*array_ref_mut![out, HASHES_SIZE, SECTOR_DATA_SIZE] =
|
||||
*self.sector_data_buf(data, abs_sector)?;
|
||||
}
|
||||
}
|
||||
BlockKind::Junk => generate_junk_sector(out, abs_sector, partition, disc_header),
|
||||
BlockKind::Zero => out.fill(0),
|
||||
}
|
||||
Ok((encrypted, has_hashes))
|
||||
}
|
||||
|
||||
/// Returns a sector's data from the block buffer.
|
||||
pub(crate) fn sector_buf<'a>(
|
||||
&self,
|
||||
data: &'a [u8],
|
||||
abs_sector: u32,
|
||||
) -> io::Result<&'a [u8; SECTOR_SIZE]> {
|
||||
self.ensure_contains(abs_sector)?;
|
||||
let block_offset = ((abs_sector - self.sector) * SECTOR_SIZE as u32) as usize;
|
||||
Ok(array_ref!(data, block_offset, SECTOR_SIZE))
|
||||
}
|
||||
|
||||
/// Returns a sector's partition data (excluding hashes) from the block buffer.
|
||||
pub(crate) fn sector_data_buf<'a>(
|
||||
&self,
|
||||
data: &'a [u8],
|
||||
abs_sector: u32,
|
||||
) -> io::Result<&'a [u8; SECTOR_DATA_SIZE]> {
|
||||
self.ensure_contains(abs_sector)?;
|
||||
let block_offset = ((abs_sector - self.sector) * SECTOR_DATA_SIZE as u32) as usize;
|
||||
Ok(array_ref!(data, block_offset, SECTOR_DATA_SIZE))
|
||||
}
|
||||
|
||||
/// Returns raw data from the block buffer, starting at the specified position.
|
||||
pub(crate) fn data<'a>(&self, data: &'a [u8], pos: u64) -> io::Result<&'a [u8]> {
|
||||
if self.kind == BlockKind::None {
|
||||
return Ok(&[]);
|
||||
}
|
||||
self.ensure_contains((pos / SECTOR_SIZE as u64) as u32)?;
|
||||
let offset = (pos - self.sector as u64 * SECTOR_SIZE as u64) as usize;
|
||||
let end = self.count as usize * SECTOR_SIZE;
|
||||
Ok(&data[offset..end])
|
||||
}
|
||||
|
||||
/// Returns partition data (excluding hashes) from the block buffer, starting at the specified
|
||||
/// position within the partition.
|
||||
///
|
||||
/// If the block does not contain hashes, this will return the full block data. Otherwise, this
|
||||
/// will return only the corresponding sector's data, ending at the sector boundary, to avoid
|
||||
/// reading into the next sector's hash block.
|
||||
pub(crate) fn partition_data<'a>(
|
||||
&self,
|
||||
data: &'a [u8],
|
||||
pos: u64,
|
||||
data_start_sector: u32,
|
||||
partition_has_hashes: bool,
|
||||
) -> io::Result<&'a [u8]> {
|
||||
let block_has_hashes = match self.kind {
|
||||
BlockKind::Raw => partition_has_hashes,
|
||||
BlockKind::PartDecrypted { hash_block, .. } => hash_block && partition_has_hashes,
|
||||
BlockKind::Junk | BlockKind::Zero => false,
|
||||
BlockKind::None => return Ok(&[]),
|
||||
};
|
||||
let (part_sector, sector_offset) = if partition_has_hashes {
|
||||
((pos / SECTOR_DATA_SIZE as u64) as u32, (pos % SECTOR_DATA_SIZE as u64) as usize)
|
||||
} else {
|
||||
((pos / SECTOR_SIZE as u64) as u32, (pos % SECTOR_SIZE as u64) as usize)
|
||||
};
|
||||
let abs_sector = part_sector + data_start_sector;
|
||||
self.ensure_contains(abs_sector)?;
|
||||
let block_sector = (abs_sector - self.sector) as usize;
|
||||
if block_has_hashes {
|
||||
let offset = block_sector * SECTOR_SIZE + HASHES_SIZE + sector_offset;
|
||||
let end = (block_sector + 1) * SECTOR_SIZE; // end of sector
|
||||
Ok(&data[offset..end])
|
||||
} else if partition_has_hashes {
|
||||
let offset = block_sector * SECTOR_DATA_SIZE + sector_offset;
|
||||
let end = self.count as usize * SECTOR_DATA_SIZE; // end of block
|
||||
Ok(&data[offset..end])
|
||||
} else {
|
||||
let offset = block_sector * SECTOR_SIZE + sector_offset;
|
||||
let end = self.count as usize * SECTOR_SIZE; // end of block
|
||||
Ok(&data[offset..end])
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn append_hash_exceptions(
|
||||
&self,
|
||||
abs_sector: u32,
|
||||
group_sector: u32,
|
||||
out: &mut Vec<WIAException>,
|
||||
) -> io::Result<()> {
|
||||
self.ensure_contains(abs_sector)?;
|
||||
let block_sector = abs_sector - self.sector;
|
||||
let group = (block_sector / 64) as usize;
|
||||
let base_offset = ((block_sector % 64) as usize * HASHES_SIZE) as u16;
|
||||
let new_base_offset = (group_sector * HASHES_SIZE as u32) as u16;
|
||||
out.extend(self.hash_exceptions.get(group).iter().flat_map(|list| {
|
||||
list.iter().filter_map(|exception| {
|
||||
let offset = exception.offset.get();
|
||||
if offset >= base_offset && offset < base_offset + HASHES_SIZE as u16 {
|
||||
let new_offset = (offset - base_offset) + new_base_offset;
|
||||
Some(WIAException { offset: new_offset.into(), hash: exception.hash })
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
}));
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn block_sector<const N: usize>(data: &[u8], sector_idx: u32) -> io::Result<&[u8; N]> {
|
||||
if data.len() % N != 0 {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!("Expected block size {} to be a multiple of {}", data.len(), N),
|
||||
));
|
||||
}
|
||||
let rel_sector = sector_idx % (data.len() / N) as u32;
|
||||
let offset = rel_sector as usize * N;
|
||||
data.get(offset..offset + N)
|
||||
.ok_or_else(|| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!(
|
||||
"Sector {} out of range (block size {}, sector size {})",
|
||||
rel_sector,
|
||||
data.len(),
|
||||
N
|
||||
),
|
||||
)
|
||||
})
|
||||
.map(|v| unsafe { &*(v as *const [u8] as *const [u8; N]) })
|
||||
/// The block kind.
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Default)]
|
||||
pub enum BlockKind {
|
||||
/// Empty block, likely end of disc
|
||||
#[default]
|
||||
None,
|
||||
/// Raw data or encrypted Wii partition data
|
||||
Raw,
|
||||
/// Decrypted Wii partition data
|
||||
PartDecrypted {
|
||||
/// Whether the sector has its hash block intact
|
||||
hash_block: bool,
|
||||
},
|
||||
/// Wii partition junk data
|
||||
Junk,
|
||||
/// All zeroes
|
||||
Zero,
|
||||
}
|
||||
|
||||
fn generate_junk(
|
||||
/// Generates junk data for a single sector.
|
||||
pub fn generate_junk_sector(
|
||||
out: &mut [u8; SECTOR_SIZE],
|
||||
sector: u32,
|
||||
abs_sector: u32,
|
||||
partition: Option<&PartitionInfo>,
|
||||
disc_header: &DiscHeader,
|
||||
) {
|
||||
let (pos, offset) = if partition.is_some() {
|
||||
let (pos, offset) = if partition.is_some_and(|p| p.has_hashes) {
|
||||
let sector = abs_sector - partition.unwrap().data_start_sector;
|
||||
(sector as u64 * SECTOR_DATA_SIZE as u64, HASHES_SIZE)
|
||||
} else {
|
||||
(sector as u64 * SECTOR_SIZE as u64, 0)
|
||||
(abs_sector as u64 * SECTOR_SIZE as u64, 0)
|
||||
};
|
||||
out[..offset].fill(0);
|
||||
let mut lfg = LaggedFibonacci::default();
|
||||
|
@ -396,33 +407,3 @@ fn generate_junk(
|
|||
pos,
|
||||
);
|
||||
}
|
||||
|
||||
fn rebuild_hash_block(out: &mut [u8; SECTOR_SIZE], part_sector: u32, partition: &PartitionInfo) {
|
||||
let Some(hash_table) = partition.hash_table.as_ref() else {
|
||||
return;
|
||||
};
|
||||
let sector_idx = part_sector as usize;
|
||||
let h0_hashes: &[u8; 0x26C] =
|
||||
transmute_ref!(array_ref![hash_table.h0_hashes, sector_idx * 31, 31]);
|
||||
out[0..0x26C].copy_from_slice(h0_hashes);
|
||||
let h1_hashes: &[u8; 0xA0] =
|
||||
transmute_ref!(array_ref![hash_table.h1_hashes, sector_idx & !7, 8]);
|
||||
out[0x280..0x320].copy_from_slice(h1_hashes);
|
||||
let h2_hashes: &[u8; 0xA0] =
|
||||
transmute_ref!(array_ref![hash_table.h2_hashes, (sector_idx / 8) & !7, 8]);
|
||||
out[0x340..0x3E0].copy_from_slice(h2_hashes);
|
||||
}
|
||||
|
||||
fn encrypt_sector(out: &mut [u8; SECTOR_SIZE], partition: &PartitionInfo) {
|
||||
aes_cbc_encrypt(&partition.key, &[0u8; 16], &mut out[..HASHES_SIZE]);
|
||||
// Data IV from encrypted hash block
|
||||
let iv = *array_ref![out, 0x3D0, 16];
|
||||
aes_cbc_encrypt(&partition.key, &iv, &mut out[HASHES_SIZE..]);
|
||||
}
|
||||
|
||||
fn decrypt_sector(out: &mut [u8; SECTOR_SIZE], partition: &PartitionInfo) {
|
||||
// Data IV from encrypted hash block
|
||||
let iv = *array_ref![out, 0x3D0, 16];
|
||||
aes_cbc_decrypt(&partition.key, &[0u8; 16], &mut out[..HASHES_SIZE]);
|
||||
aes_cbc_decrypt(&partition.key, &iv, &mut out[HASHES_SIZE..]);
|
||||
}
|
||||
|
|
|
@ -2,20 +2,36 @@ use std::{
|
|||
io,
|
||||
io::{Read, Seek, SeekFrom},
|
||||
mem::size_of,
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use zerocopy::{little_endian::*, FromBytes, Immutable, IntoBytes, KnownLayout};
|
||||
use bytes::{BufMut, Bytes, BytesMut};
|
||||
use zerocopy::{little_endian::*, FromBytes, FromZeros, Immutable, IntoBytes, KnownLayout};
|
||||
|
||||
use crate::{
|
||||
disc::SECTOR_SIZE,
|
||||
io::{
|
||||
block::{Block, BlockIO, DiscStream, PartitionInfo, CISO_MAGIC},
|
||||
nkit::NKitHeader,
|
||||
Format, MagicBytes,
|
||||
common::{Compression, Format, MagicBytes},
|
||||
disc::{
|
||||
reader::DiscReader,
|
||||
writer::{
|
||||
check_block, par_process, read_block, BlockProcessor, BlockResult, CheckBlockResult,
|
||||
DataCallback, DiscWriter,
|
||||
},
|
||||
SECTOR_SIZE,
|
||||
},
|
||||
static_assert,
|
||||
util::read::read_from,
|
||||
DiscMeta, Error, Result, ResultContext,
|
||||
io::{
|
||||
block::{Block, BlockKind, BlockReader, CISO_MAGIC},
|
||||
nkit::{JunkBits, NKitHeader},
|
||||
},
|
||||
read::{DiscMeta, DiscStream},
|
||||
util::{
|
||||
array_ref,
|
||||
digest::DigestManager,
|
||||
lfg::LaggedFibonacci,
|
||||
read::{box_to_bytes, read_arc},
|
||||
static_assert,
|
||||
},
|
||||
write::{DiscFinalization, DiscWriterWeight, FormatOptions, ProcessOptions},
|
||||
Error, Result, ResultContext,
|
||||
};
|
||||
|
||||
pub const CISO_MAP_SIZE: usize = SECTOR_SIZE - 8;
|
||||
|
@ -32,18 +48,18 @@ struct CISOHeader {
|
|||
static_assert!(size_of::<CISOHeader>() == SECTOR_SIZE);
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct DiscIOCISO {
|
||||
pub struct BlockReaderCISO {
|
||||
inner: Box<dyn DiscStream>,
|
||||
header: CISOHeader,
|
||||
block_map: [u16; CISO_MAP_SIZE],
|
||||
header: Arc<CISOHeader>,
|
||||
block_map: Arc<[u16; CISO_MAP_SIZE]>,
|
||||
nkit_header: Option<NKitHeader>,
|
||||
}
|
||||
|
||||
impl DiscIOCISO {
|
||||
impl BlockReaderCISO {
|
||||
pub fn new(mut inner: Box<dyn DiscStream>) -> Result<Box<Self>> {
|
||||
// Read header
|
||||
inner.seek(SeekFrom::Start(0)).context("Seeking to start")?;
|
||||
let header: CISOHeader = read_from(inner.as_mut()).context("Reading CISO header")?;
|
||||
let header: Arc<CISOHeader> = read_arc(inner.as_mut()).context("Reading CISO header")?;
|
||||
if header.magic != CISO_MAGIC {
|
||||
return Err(Error::DiscFormat("Invalid CISO magic".to_string()));
|
||||
}
|
||||
|
@ -69,54 +85,47 @@ impl DiscIOCISO {
|
|||
}
|
||||
|
||||
// Read NKit header if present (after CISO data)
|
||||
let nkit_header = if len > file_size + 4 {
|
||||
let nkit_header = if len > file_size + 12 {
|
||||
inner.seek(SeekFrom::Start(file_size)).context("Seeking to NKit header")?;
|
||||
NKitHeader::try_read_from(inner.as_mut(), header.block_size.get(), true)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(Box::new(Self { inner, header, block_map, nkit_header }))
|
||||
Ok(Box::new(Self { inner, header, block_map: Arc::new(block_map), nkit_header }))
|
||||
}
|
||||
}
|
||||
|
||||
impl BlockIO for DiscIOCISO {
|
||||
fn read_block_internal(
|
||||
&mut self,
|
||||
out: &mut [u8],
|
||||
block: u32,
|
||||
partition: Option<&PartitionInfo>,
|
||||
) -> io::Result<Block> {
|
||||
if block >= CISO_MAP_SIZE as u32 {
|
||||
impl BlockReader for BlockReaderCISO {
|
||||
fn read_block(&mut self, out: &mut [u8], sector: u32) -> io::Result<Block> {
|
||||
let block_size = self.header.block_size.get();
|
||||
let block_idx = ((sector as u64 * SECTOR_SIZE as u64) / block_size as u64) as u32;
|
||||
if block_idx >= CISO_MAP_SIZE as u32 {
|
||||
// Out of bounds
|
||||
return Ok(Block::Zero);
|
||||
return Ok(Block::new(block_idx, block_size, BlockKind::None));
|
||||
}
|
||||
|
||||
// Find the block in the map
|
||||
let phys_block = self.block_map[block as usize];
|
||||
let phys_block = self.block_map[block_idx as usize];
|
||||
if phys_block == u16::MAX {
|
||||
// Check if block is junk data
|
||||
if self.nkit_header.as_ref().and_then(|h| h.is_junk_block(block)).unwrap_or(false) {
|
||||
return Ok(Block::Junk);
|
||||
if self.nkit_header.as_ref().and_then(|h| h.is_junk_block(block_idx)).unwrap_or(false) {
|
||||
return Ok(Block::new(block_idx, block_size, BlockKind::Junk));
|
||||
};
|
||||
|
||||
// Otherwise, read zeroes
|
||||
return Ok(Block::Zero);
|
||||
return Ok(Block::new(block_idx, block_size, BlockKind::Zero));
|
||||
}
|
||||
|
||||
// Read block
|
||||
let file_offset = size_of::<CISOHeader>() as u64
|
||||
+ phys_block as u64 * self.header.block_size.get() as u64;
|
||||
let file_offset = size_of::<CISOHeader>() as u64 + phys_block as u64 * block_size as u64;
|
||||
self.inner.seek(SeekFrom::Start(file_offset))?;
|
||||
self.inner.read_exact(out)?;
|
||||
|
||||
match partition {
|
||||
Some(partition) if partition.has_encryption => Ok(Block::PartEncrypted),
|
||||
_ => Ok(Block::Raw),
|
||||
}
|
||||
Ok(Block::new(block_idx, block_size, BlockKind::Raw))
|
||||
}
|
||||
|
||||
fn block_size_internal(&self) -> u32 { self.header.block_size.get() }
|
||||
fn block_size(&self) -> u32 { self.header.block_size.get() }
|
||||
|
||||
fn meta(&self) -> DiscMeta {
|
||||
let mut result = DiscMeta {
|
||||
|
@ -130,3 +139,187 @@ impl BlockIO for DiscIOCISO {
|
|||
result
|
||||
}
|
||||
}
|
||||
|
||||
struct BlockProcessorCISO {
|
||||
inner: DiscReader,
|
||||
block_size: u32,
|
||||
decrypted_block: Box<[u8]>,
|
||||
lfg: LaggedFibonacci,
|
||||
disc_id: [u8; 4],
|
||||
disc_num: u8,
|
||||
}
|
||||
|
||||
impl Clone for BlockProcessorCISO {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
inner: self.inner.clone(),
|
||||
block_size: self.block_size,
|
||||
decrypted_block: <[u8]>::new_box_zeroed_with_elems(self.block_size as usize).unwrap(),
|
||||
lfg: LaggedFibonacci::default(),
|
||||
disc_id: self.disc_id,
|
||||
disc_num: self.disc_num,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl BlockProcessor for BlockProcessorCISO {
|
||||
type BlockMeta = CheckBlockResult;
|
||||
|
||||
fn process_block(&mut self, block_idx: u32) -> io::Result<BlockResult<Self::BlockMeta>> {
|
||||
let block_size = self.block_size as usize;
|
||||
let input_position = block_idx as u64 * block_size as u64;
|
||||
self.inner.seek(SeekFrom::Start(input_position))?;
|
||||
let (block_data, disc_data) = read_block(&mut self.inner, block_size)?;
|
||||
|
||||
// Check if block is zeroed or junk
|
||||
let result = match check_block(
|
||||
disc_data.as_ref(),
|
||||
&mut self.decrypted_block,
|
||||
input_position,
|
||||
self.inner.partitions(),
|
||||
&mut self.lfg,
|
||||
self.disc_id,
|
||||
self.disc_num,
|
||||
)? {
|
||||
CheckBlockResult::Normal => {
|
||||
BlockResult { block_idx, disc_data, block_data, meta: CheckBlockResult::Normal }
|
||||
}
|
||||
CheckBlockResult::Zeroed => BlockResult {
|
||||
block_idx,
|
||||
disc_data,
|
||||
block_data: Bytes::new(),
|
||||
meta: CheckBlockResult::Zeroed,
|
||||
},
|
||||
CheckBlockResult::Junk => BlockResult {
|
||||
block_idx,
|
||||
disc_data,
|
||||
block_data: Bytes::new(),
|
||||
meta: CheckBlockResult::Junk,
|
||||
},
|
||||
};
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct DiscWriterCISO {
|
||||
inner: DiscReader,
|
||||
block_size: u32,
|
||||
block_count: u32,
|
||||
disc_size: u64,
|
||||
}
|
||||
|
||||
pub const DEFAULT_BLOCK_SIZE: u32 = 0x200000; // 2 MiB
|
||||
|
||||
impl DiscWriterCISO {
|
||||
pub fn new(inner: DiscReader, options: &FormatOptions) -> Result<Box<dyn DiscWriter>> {
|
||||
if options.format != Format::Ciso {
|
||||
return Err(Error::DiscFormat("Invalid format for CISO writer".to_string()));
|
||||
}
|
||||
if options.compression != Compression::None {
|
||||
return Err(Error::DiscFormat("CISO does not support compression".to_string()));
|
||||
}
|
||||
let block_size = DEFAULT_BLOCK_SIZE;
|
||||
|
||||
let disc_size = inner.disc_size();
|
||||
let block_count = disc_size.div_ceil(block_size as u64) as u32;
|
||||
if block_count > CISO_MAP_SIZE as u32 {
|
||||
return Err(Error::DiscFormat(format!(
|
||||
"CISO block count exceeds maximum: {} > {}",
|
||||
block_count, CISO_MAP_SIZE
|
||||
)));
|
||||
}
|
||||
|
||||
Ok(Box::new(Self { inner, block_size, block_count, disc_size }))
|
||||
}
|
||||
}
|
||||
|
||||
impl DiscWriter for DiscWriterCISO {
|
||||
fn process(
|
||||
&self,
|
||||
data_callback: &mut DataCallback,
|
||||
options: &ProcessOptions,
|
||||
) -> Result<DiscFinalization> {
|
||||
data_callback(BytesMut::zeroed(SECTOR_SIZE).freeze(), 0, self.disc_size)
|
||||
.context("Failed to write header")?;
|
||||
|
||||
// Determine junk data values
|
||||
let disc_header = self.inner.header();
|
||||
let disc_id = *array_ref![disc_header.game_id, 0, 4];
|
||||
let disc_num = disc_header.disc_num;
|
||||
|
||||
// Create hashers
|
||||
let digest = DigestManager::new(options);
|
||||
let block_size = self.block_size;
|
||||
let mut junk_bits = JunkBits::new(block_size);
|
||||
let mut input_position = 0;
|
||||
|
||||
let mut block_count = 0;
|
||||
let mut header = CISOHeader::new_box_zeroed()?;
|
||||
header.magic = CISO_MAGIC;
|
||||
header.block_size = block_size.into();
|
||||
par_process(
|
||||
|| BlockProcessorCISO {
|
||||
inner: self.inner.clone(),
|
||||
block_size,
|
||||
decrypted_block: <[u8]>::new_box_zeroed_with_elems(block_size as usize).unwrap(),
|
||||
lfg: LaggedFibonacci::default(),
|
||||
disc_id,
|
||||
disc_num,
|
||||
},
|
||||
self.block_count,
|
||||
options.processor_threads,
|
||||
|block| -> Result<()> {
|
||||
// Update hashers
|
||||
let disc_data_len = block.disc_data.len() as u64;
|
||||
digest.send(block.disc_data);
|
||||
|
||||
// Check if block is zeroed or junk
|
||||
match block.meta {
|
||||
CheckBlockResult::Normal => {
|
||||
header.block_present[block.block_idx as usize] = 1;
|
||||
block_count += 1;
|
||||
}
|
||||
CheckBlockResult::Zeroed => {}
|
||||
CheckBlockResult::Junk => {
|
||||
junk_bits.set(block.block_idx, true);
|
||||
}
|
||||
}
|
||||
|
||||
input_position += disc_data_len;
|
||||
data_callback(block.block_data, input_position, self.disc_size)
|
||||
.with_context(|| format!("Failed to write block {}", block.block_idx))?;
|
||||
Ok(())
|
||||
},
|
||||
)?;
|
||||
|
||||
// Collect hash results
|
||||
let digest_results = digest.finish();
|
||||
let mut nkit_header = NKitHeader {
|
||||
version: 2,
|
||||
size: Some(self.disc_size),
|
||||
crc32: None,
|
||||
md5: None,
|
||||
sha1: None,
|
||||
xxh64: None,
|
||||
junk_bits: Some(junk_bits),
|
||||
encrypted: true,
|
||||
};
|
||||
nkit_header.apply_digests(&digest_results);
|
||||
|
||||
// Write NKit header after data
|
||||
let mut buffer = BytesMut::new().writer();
|
||||
nkit_header.write_to(&mut buffer).context("Writing NKit header")?;
|
||||
data_callback(buffer.into_inner().freeze(), self.disc_size, self.disc_size)
|
||||
.context("Failed to write NKit header")?;
|
||||
|
||||
let header = Bytes::from(box_to_bytes(header));
|
||||
let mut finalization = DiscFinalization { header, ..Default::default() };
|
||||
finalization.apply_digests(&digest_results);
|
||||
Ok(finalization)
|
||||
}
|
||||
|
||||
fn progress_bound(&self) -> u64 { self.disc_size }
|
||||
|
||||
fn weight(&self) -> DiscWriterWeight { DiscWriterWeight::Medium }
|
||||
}
|
||||
|
|
|
@ -2,21 +2,30 @@ use std::{
|
|||
io,
|
||||
io::{Read, Seek, SeekFrom},
|
||||
mem::size_of,
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use adler::adler32_slice;
|
||||
use miniz_oxide::{inflate, inflate::core::inflate_flags};
|
||||
use bytes::{BufMut, Bytes, BytesMut};
|
||||
use zerocopy::{little_endian::*, FromBytes, FromZeros, Immutable, IntoBytes, KnownLayout};
|
||||
use zstd::zstd_safe::WriteBuf;
|
||||
|
||||
use crate::{
|
||||
io::{
|
||||
block::{Block, BlockIO, DiscStream, GCZ_MAGIC},
|
||||
MagicBytes,
|
||||
common::{Compression, Format, MagicBytes},
|
||||
disc::{
|
||||
reader::DiscReader,
|
||||
writer::{par_process, read_block, BlockProcessor, BlockResult, DataCallback, DiscWriter},
|
||||
SECTOR_SIZE,
|
||||
},
|
||||
static_assert,
|
||||
util::read::{read_box_slice, read_from},
|
||||
Compression, DiscMeta, Error, Format, PartitionInfo, Result, ResultContext,
|
||||
io::block::{Block, BlockKind, BlockReader, GCZ_MAGIC},
|
||||
read::{DiscMeta, DiscStream},
|
||||
util::{
|
||||
compress::{Compressor, DecompressionKind, Decompressor},
|
||||
digest::DigestManager,
|
||||
read::{read_arc_slice, read_from},
|
||||
static_assert,
|
||||
},
|
||||
write::{DiscFinalization, DiscWriterWeight, FormatOptions, ProcessOptions},
|
||||
Error, Result, ResultContext,
|
||||
};
|
||||
|
||||
/// GCZ header (little endian)
|
||||
|
@ -33,16 +42,17 @@ struct GCZHeader {
|
|||
|
||||
static_assert!(size_of::<GCZHeader>() == 32);
|
||||
|
||||
pub struct DiscIOGCZ {
|
||||
pub struct BlockReaderGCZ {
|
||||
inner: Box<dyn DiscStream>,
|
||||
header: GCZHeader,
|
||||
block_map: Box<[U64]>,
|
||||
block_hashes: Box<[U32]>,
|
||||
block_map: Arc<[U64]>,
|
||||
block_hashes: Arc<[U32]>,
|
||||
block_buf: Box<[u8]>,
|
||||
data_offset: u64,
|
||||
decompressor: Decompressor,
|
||||
}
|
||||
|
||||
impl Clone for DiscIOGCZ {
|
||||
impl Clone for BlockReaderGCZ {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
inner: self.inner.clone(),
|
||||
|
@ -51,11 +61,12 @@ impl Clone for DiscIOGCZ {
|
|||
block_hashes: self.block_hashes.clone(),
|
||||
block_buf: <[u8]>::new_box_zeroed_with_elems(self.block_buf.len()).unwrap(),
|
||||
data_offset: self.data_offset,
|
||||
decompressor: self.decompressor.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DiscIOGCZ {
|
||||
impl BlockReaderGCZ {
|
||||
pub fn new(mut inner: Box<dyn DiscStream>) -> Result<Box<Self>> {
|
||||
// Read header
|
||||
inner.seek(SeekFrom::Start(0)).context("Seeking to start")?;
|
||||
|
@ -66,41 +77,50 @@ impl DiscIOGCZ {
|
|||
|
||||
// Read block map and hashes
|
||||
let block_count = header.block_count.get();
|
||||
let block_map = read_box_slice(inner.as_mut(), block_count as usize)
|
||||
let block_map = read_arc_slice(inner.as_mut(), block_count as usize)
|
||||
.context("Reading GCZ block map")?;
|
||||
let block_hashes = read_box_slice(inner.as_mut(), block_count as usize)
|
||||
let block_hashes = read_arc_slice(inner.as_mut(), block_count as usize)
|
||||
.context("Reading GCZ block hashes")?;
|
||||
|
||||
// header + block_count * (u64 + u32)
|
||||
let data_offset = size_of::<GCZHeader>() as u64 + block_count as u64 * 12;
|
||||
let block_buf = <[u8]>::new_box_zeroed_with_elems(header.block_size.get() as usize)?;
|
||||
Ok(Box::new(Self { inner, header, block_map, block_hashes, block_buf, data_offset }))
|
||||
let decompressor = Decompressor::new(DecompressionKind::Deflate);
|
||||
Ok(Box::new(Self {
|
||||
inner,
|
||||
header,
|
||||
block_map,
|
||||
block_hashes,
|
||||
block_buf,
|
||||
data_offset,
|
||||
decompressor,
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
impl BlockIO for DiscIOGCZ {
|
||||
fn read_block_internal(
|
||||
&mut self,
|
||||
out: &mut [u8],
|
||||
block: u32,
|
||||
partition: Option<&PartitionInfo>,
|
||||
) -> io::Result<Block> {
|
||||
if block >= self.header.block_count.get() {
|
||||
impl BlockReader for BlockReaderGCZ {
|
||||
fn read_block(&mut self, out: &mut [u8], sector: u32) -> io::Result<Block> {
|
||||
let block_size = self.header.block_size.get();
|
||||
let block_idx = ((sector as u64 * SECTOR_SIZE as u64) / block_size as u64) as u32;
|
||||
if block_idx >= self.header.block_count.get() {
|
||||
// Out of bounds
|
||||
return Ok(Block::Zero);
|
||||
return Ok(Block::new(block_idx, block_size, BlockKind::None));
|
||||
}
|
||||
|
||||
// Find block offset and size
|
||||
let mut file_offset = self.block_map[block as usize].get();
|
||||
let mut file_offset = self.block_map[block_idx as usize].get();
|
||||
let mut compressed = true;
|
||||
if file_offset & (1 << 63) != 0 {
|
||||
file_offset &= !(1 << 63);
|
||||
compressed = false;
|
||||
}
|
||||
let compressed_size =
|
||||
((self.block_map.get(block as usize + 1).unwrap_or(&self.header.compressed_size).get()
|
||||
& !(1 << 63))
|
||||
- file_offset) as usize;
|
||||
let compressed_size = ((self
|
||||
.block_map
|
||||
.get(block_idx as usize + 1)
|
||||
.unwrap_or(&self.header.compressed_size)
|
||||
.get()
|
||||
& !(1 << 63))
|
||||
- file_offset) as usize;
|
||||
if compressed_size > self.block_buf.len() {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
|
@ -127,58 +147,43 @@ impl BlockIO for DiscIOGCZ {
|
|||
|
||||
// Verify block checksum
|
||||
let checksum = adler32_slice(&self.block_buf[..compressed_size]);
|
||||
let expected_checksum = self.block_hashes[block as usize].get();
|
||||
let expected_checksum = self.block_hashes[block_idx as usize].get();
|
||||
if checksum != expected_checksum {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!(
|
||||
"Block checksum mismatch: {:#010x} != {:#010x}",
|
||||
checksum, expected_checksum
|
||||
"Block {} checksum mismatch: {:#010x} != {:#010x}",
|
||||
block_idx, checksum, expected_checksum
|
||||
),
|
||||
));
|
||||
}
|
||||
|
||||
if compressed {
|
||||
// Decompress block
|
||||
let mut decompressor = inflate::core::DecompressorOxide::new();
|
||||
let input = &self.block_buf[..compressed_size];
|
||||
let (status, in_size, out_size) = inflate::core::decompress(
|
||||
&mut decompressor,
|
||||
input,
|
||||
out,
|
||||
0,
|
||||
inflate_flags::TINFL_FLAG_PARSE_ZLIB_HEADER
|
||||
| inflate_flags::TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF,
|
||||
);
|
||||
if status != inflate::TINFLStatus::Done
|
||||
|| in_size != compressed_size
|
||||
|| out_size != self.block_buf.len()
|
||||
{
|
||||
let out_len = self.decompressor.decompress(&self.block_buf[..compressed_size], out)?;
|
||||
if out_len != block_size as usize {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!(
|
||||
"Deflate decompression failed: {:?} (in: {}, out: {})",
|
||||
status, in_size, out_size
|
||||
"Block {} decompression failed: in: {}, out: {}",
|
||||
block_idx, compressed_size, out_len
|
||||
),
|
||||
));
|
||||
}
|
||||
} else {
|
||||
// Copy uncompressed block
|
||||
out.copy_from_slice(self.block_buf.as_slice());
|
||||
out.copy_from_slice(self.block_buf.as_ref());
|
||||
}
|
||||
|
||||
match partition {
|
||||
Some(partition) if partition.has_encryption => Ok(Block::PartEncrypted),
|
||||
_ => Ok(Block::Raw),
|
||||
}
|
||||
Ok(Block::new(block_idx, block_size, BlockKind::Raw))
|
||||
}
|
||||
|
||||
fn block_size_internal(&self) -> u32 { self.header.block_size.get() }
|
||||
fn block_size(&self) -> u32 { self.header.block_size.get() }
|
||||
|
||||
fn meta(&self) -> DiscMeta {
|
||||
DiscMeta {
|
||||
format: Format::Gcz,
|
||||
compression: Compression::Deflate,
|
||||
compression: Compression::Deflate(0),
|
||||
block_size: Some(self.header.block_size.get()),
|
||||
lossless: true,
|
||||
disc_size: Some(self.header.disc_size.get()),
|
||||
|
@ -186,3 +191,174 @@ impl BlockIO for DiscIOGCZ {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct BlockProcessorGCZ {
|
||||
inner: DiscReader,
|
||||
header: GCZHeader,
|
||||
compressor: Compressor,
|
||||
}
|
||||
|
||||
impl Clone for BlockProcessorGCZ {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
inner: self.inner.clone(),
|
||||
header: self.header.clone(),
|
||||
compressor: self.compressor.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct BlockMetaGCZ {
|
||||
is_compressed: bool,
|
||||
block_hash: u32,
|
||||
}
|
||||
|
||||
impl BlockProcessor for BlockProcessorGCZ {
|
||||
type BlockMeta = BlockMetaGCZ;
|
||||
|
||||
fn process_block(&mut self, block_idx: u32) -> io::Result<BlockResult<Self::BlockMeta>> {
|
||||
let block_size = self.header.block_size.get();
|
||||
self.inner.seek(SeekFrom::Start(block_idx as u64 * block_size as u64))?;
|
||||
let (mut block_data, disc_data) = read_block(&mut self.inner, block_size as usize)?;
|
||||
|
||||
// Try to compress block
|
||||
let is_compressed = if self.compressor.compress(&block_data)? {
|
||||
println!("Compressed block {} to {}", block_idx, self.compressor.buffer.len());
|
||||
block_data = Bytes::copy_from_slice(self.compressor.buffer.as_slice());
|
||||
true
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
let block_hash = adler32_slice(block_data.as_ref());
|
||||
Ok(BlockResult {
|
||||
block_idx,
|
||||
disc_data,
|
||||
block_data,
|
||||
meta: BlockMetaGCZ { is_compressed, block_hash },
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct DiscWriterGCZ {
|
||||
inner: DiscReader,
|
||||
header: GCZHeader,
|
||||
compression: Compression,
|
||||
}
|
||||
|
||||
pub const DEFAULT_BLOCK_SIZE: u32 = 0x8000; // 32 KiB
|
||||
|
||||
// Level 0 will be converted to the default level in [`Compression::validate_level`]
|
||||
pub const DEFAULT_COMPRESSION: Compression = Compression::Deflate(0);
|
||||
|
||||
impl DiscWriterGCZ {
|
||||
pub fn new(inner: DiscReader, options: &FormatOptions) -> Result<Box<dyn DiscWriter>> {
|
||||
if options.format != Format::Gcz {
|
||||
return Err(Error::DiscFormat("Invalid format for GCZ writer".to_string()));
|
||||
}
|
||||
if !matches!(options.compression, Compression::Deflate(_)) {
|
||||
return Err(Error::DiscFormat(format!(
|
||||
"Unsupported compression for GCZ: {:?}",
|
||||
options.compression
|
||||
)));
|
||||
}
|
||||
|
||||
let block_size = options.block_size;
|
||||
if block_size < SECTOR_SIZE as u32 || block_size % SECTOR_SIZE as u32 != 0 {
|
||||
return Err(Error::DiscFormat("Invalid block size for GCZ".to_string()));
|
||||
}
|
||||
|
||||
let disc_header = inner.header();
|
||||
let disc_size = inner.disc_size();
|
||||
let block_count = disc_size.div_ceil(block_size as u64) as u32;
|
||||
|
||||
// Generate header
|
||||
let header = GCZHeader {
|
||||
magic: GCZ_MAGIC,
|
||||
disc_type: if disc_header.is_wii() { 1 } else { 0 }.into(),
|
||||
compressed_size: 0.into(), // Written when finalized
|
||||
disc_size: disc_size.into(),
|
||||
block_size: block_size.into(),
|
||||
block_count: block_count.into(),
|
||||
};
|
||||
|
||||
Ok(Box::new(Self { inner, header, compression: options.compression }))
|
||||
}
|
||||
}
|
||||
|
||||
impl DiscWriter for DiscWriterGCZ {
|
||||
fn process(
|
||||
&self,
|
||||
data_callback: &mut DataCallback,
|
||||
options: &ProcessOptions,
|
||||
) -> Result<DiscFinalization> {
|
||||
let disc_size = self.header.disc_size.get();
|
||||
let block_size = self.header.block_size.get();
|
||||
let block_count = self.header.block_count.get();
|
||||
|
||||
// Create hashers
|
||||
let digest = DigestManager::new(options);
|
||||
|
||||
// Generate block map and hashes
|
||||
let mut block_map = <[U64]>::new_box_zeroed_with_elems(block_count as usize)?;
|
||||
let mut block_hashes = <[U32]>::new_box_zeroed_with_elems(block_count as usize)?;
|
||||
|
||||
let header_data_size = size_of::<GCZHeader>()
|
||||
+ size_of_val(block_map.as_ref())
|
||||
+ size_of_val(block_hashes.as_ref());
|
||||
let mut header_data = BytesMut::with_capacity(header_data_size);
|
||||
header_data.put_slice(self.header.as_bytes());
|
||||
header_data.resize(header_data_size, 0);
|
||||
data_callback(header_data.freeze(), 0, disc_size).context("Failed to write GCZ header")?;
|
||||
|
||||
let mut input_position = 0;
|
||||
let mut data_position = 0;
|
||||
par_process(
|
||||
|| BlockProcessorGCZ {
|
||||
inner: self.inner.clone(),
|
||||
header: self.header.clone(),
|
||||
compressor: Compressor::new(self.compression, block_size as usize),
|
||||
},
|
||||
block_count,
|
||||
options.processor_threads,
|
||||
|block| {
|
||||
// Update hashers
|
||||
let disc_data_len = block.disc_data.len() as u64;
|
||||
digest.send(block.disc_data);
|
||||
|
||||
// Update block map and hash
|
||||
if block.meta.is_compressed {
|
||||
block_map[block.block_idx as usize] = data_position.into();
|
||||
} else {
|
||||
block_map[block.block_idx as usize] = (data_position | (1 << 63)).into();
|
||||
}
|
||||
block_hashes[block.block_idx as usize] = block.meta.block_hash.into();
|
||||
|
||||
// Write block data
|
||||
input_position += disc_data_len;
|
||||
data_position += block.block_data.len() as u64;
|
||||
data_callback(block.block_data, input_position, disc_size)
|
||||
.with_context(|| format!("Failed to write block {}", block.block_idx))?;
|
||||
Ok(())
|
||||
},
|
||||
)?;
|
||||
|
||||
// Write updated header, block map and hashes
|
||||
let mut header = self.header.clone();
|
||||
header.compressed_size = data_position.into();
|
||||
let mut header_data = BytesMut::with_capacity(header_data_size);
|
||||
header_data.extend_from_slice(header.as_bytes());
|
||||
header_data.extend_from_slice(block_map.as_bytes());
|
||||
header_data.extend_from_slice(block_hashes.as_bytes());
|
||||
|
||||
let mut finalization =
|
||||
DiscFinalization { header: header_data.freeze(), ..Default::default() };
|
||||
finalization.apply_digests(&digest.finish());
|
||||
Ok(finalization)
|
||||
}
|
||||
|
||||
fn progress_bound(&self) -> u64 { self.header.disc_size.get() }
|
||||
|
||||
fn weight(&self) -> DiscWriterWeight { DiscWriterWeight::Heavy }
|
||||
}
|
||||
|
|
|
@ -1,68 +1,97 @@
|
|||
use std::{
|
||||
io,
|
||||
io::{Read, Seek, SeekFrom},
|
||||
io::{BufRead, Read, Seek, SeekFrom},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
disc::SECTOR_SIZE,
|
||||
io::{
|
||||
block::{Block, BlockIO, DiscStream, PartitionInfo},
|
||||
Format,
|
||||
common::Format,
|
||||
disc::{
|
||||
reader::DiscReader,
|
||||
writer::{DataCallback, DiscWriter},
|
||||
SECTOR_SIZE,
|
||||
},
|
||||
DiscMeta, Result, ResultContext,
|
||||
io::block::{Block, BlockKind, BlockReader},
|
||||
read::{DiscMeta, DiscStream},
|
||||
util::digest::DigestManager,
|
||||
write::{DiscFinalization, DiscWriterWeight, ProcessOptions},
|
||||
Result, ResultContext,
|
||||
};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct DiscIOISO {
|
||||
pub struct BlockReaderISO {
|
||||
inner: Box<dyn DiscStream>,
|
||||
stream_len: u64,
|
||||
disc_size: u64,
|
||||
}
|
||||
|
||||
impl DiscIOISO {
|
||||
impl BlockReaderISO {
|
||||
pub fn new(mut inner: Box<dyn DiscStream>) -> Result<Box<Self>> {
|
||||
let stream_len = inner.seek(SeekFrom::End(0)).context("Determining stream length")?;
|
||||
inner.seek(SeekFrom::Start(0)).context("Seeking to start")?;
|
||||
Ok(Box::new(Self { inner, stream_len }))
|
||||
let disc_size = inner.seek(SeekFrom::End(0)).context("Determining stream length")?;
|
||||
Ok(Box::new(Self { inner, disc_size }))
|
||||
}
|
||||
}
|
||||
|
||||
impl BlockIO for DiscIOISO {
|
||||
fn read_block_internal(
|
||||
&mut self,
|
||||
out: &mut [u8],
|
||||
block: u32,
|
||||
partition: Option<&PartitionInfo>,
|
||||
) -> io::Result<Block> {
|
||||
let offset = block as u64 * SECTOR_SIZE as u64;
|
||||
if offset >= self.stream_len {
|
||||
impl BlockReader for BlockReaderISO {
|
||||
fn read_block(&mut self, out: &mut [u8], sector: u32) -> io::Result<Block> {
|
||||
let pos = sector as u64 * SECTOR_SIZE as u64;
|
||||
if pos >= self.disc_size {
|
||||
// End of file
|
||||
return Ok(Block::Zero);
|
||||
return Ok(Block::sector(sector, BlockKind::None));
|
||||
}
|
||||
|
||||
self.inner.seek(SeekFrom::Start(offset))?;
|
||||
if offset + SECTOR_SIZE as u64 > self.stream_len {
|
||||
self.inner.seek(SeekFrom::Start(pos))?;
|
||||
if pos + SECTOR_SIZE as u64 > self.disc_size {
|
||||
// If the last block is not a full sector, fill the rest with zeroes
|
||||
let read = (self.stream_len - offset) as usize;
|
||||
let read = (self.disc_size - pos) as usize;
|
||||
self.inner.read_exact(&mut out[..read])?;
|
||||
out[read..].fill(0);
|
||||
} else {
|
||||
self.inner.read_exact(out)?;
|
||||
}
|
||||
|
||||
match partition {
|
||||
Some(partition) if partition.has_encryption => Ok(Block::PartEncrypted),
|
||||
_ => Ok(Block::Raw),
|
||||
}
|
||||
Ok(Block::sector(sector, BlockKind::Raw))
|
||||
}
|
||||
|
||||
fn block_size_internal(&self) -> u32 { SECTOR_SIZE as u32 }
|
||||
fn block_size(&self) -> u32 { SECTOR_SIZE as u32 }
|
||||
|
||||
fn meta(&self) -> DiscMeta {
|
||||
DiscMeta {
|
||||
format: Format::Iso,
|
||||
lossless: true,
|
||||
disc_size: Some(self.stream_len),
|
||||
disc_size: Some(self.disc_size),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DiscWriter for DiscReader {
|
||||
fn process(
|
||||
&self,
|
||||
data_callback: &mut DataCallback,
|
||||
options: &ProcessOptions,
|
||||
) -> Result<DiscFinalization> {
|
||||
let mut reader = self.clone();
|
||||
let digest = DigestManager::new(options);
|
||||
loop {
|
||||
let pos = reader.position();
|
||||
let data = reader
|
||||
.fill_buf_internal()
|
||||
.with_context(|| format!("Reading disc data at offset {pos}"))?;
|
||||
let len = data.len();
|
||||
if len == 0 {
|
||||
break;
|
||||
}
|
||||
// Update hashers
|
||||
digest.send(data.clone());
|
||||
data_callback(data, pos + len as u64, reader.disc_size())
|
||||
.context("Failed to write disc data")?;
|
||||
reader.consume(len);
|
||||
}
|
||||
let mut finalization = DiscFinalization::default();
|
||||
finalization.apply_digests(&digest.finish());
|
||||
Ok(finalization)
|
||||
}
|
||||
|
||||
fn progress_bound(&self) -> u64 { self.disc_size() }
|
||||
|
||||
fn weight(&self) -> DiscWriterWeight { DiscWriterWeight::Light }
|
||||
}
|
||||
|
|
|
@ -0,0 +1,51 @@
|
|||
use std::{io, path::Path, sync::Arc};
|
||||
|
||||
use memmap2::Mmap;
|
||||
|
||||
use crate::{util::impl_read_for_bufread, Result, ResultContext};
|
||||
|
||||
pub struct MappedFileReader {
|
||||
inner: Arc<Mmap>,
|
||||
pos: usize,
|
||||
}
|
||||
|
||||
impl Clone for MappedFileReader {
|
||||
fn clone(&self) -> Self { Self { inner: self.inner.clone(), pos: 0 } }
|
||||
}
|
||||
|
||||
impl MappedFileReader {
|
||||
#[expect(unused)]
|
||||
pub fn new(path: &Path) -> Result<Self> {
|
||||
let file = std::fs::File::open(path)
|
||||
.with_context(|| format!("Failed to open file {}", path.display()))?;
|
||||
let inner = unsafe { Mmap::map(&file) }
|
||||
.with_context(|| format!("Failed to map file {}", path.display()))?;
|
||||
Ok(Self { inner: Arc::new(inner), pos: 0 })
|
||||
}
|
||||
}
|
||||
|
||||
impl io::BufRead for MappedFileReader {
|
||||
fn fill_buf(&mut self) -> io::Result<&[u8]> {
|
||||
if self.pos < self.inner.len() {
|
||||
Ok(&self.inner[self.pos..])
|
||||
} else {
|
||||
Ok(&[])
|
||||
}
|
||||
}
|
||||
|
||||
fn consume(&mut self, amt: usize) { self.pos = self.pos.saturating_add(amt); }
|
||||
}
|
||||
|
||||
impl_read_for_bufread!(MappedFileReader);
|
||||
|
||||
impl io::Seek for MappedFileReader {
|
||||
fn seek(&mut self, pos: io::SeekFrom) -> io::Result<u64> {
|
||||
let pos = match pos {
|
||||
io::SeekFrom::Start(pos) => pos,
|
||||
io::SeekFrom::End(pos) => (self.inner.len() as u64).saturating_add_signed(pos),
|
||||
io::SeekFrom::Current(off) => (self.pos as u64).saturating_add_signed(off),
|
||||
};
|
||||
self.pos = pos.try_into().map_err(|_| io::ErrorKind::UnexpectedEof)?;
|
||||
Ok(pos)
|
||||
}
|
||||
}
|
|
@ -1,142 +1,14 @@
|
|||
//! Disc file format related logic (CISO, NFS, WBFS, WIA, etc.)
|
||||
|
||||
use std::fmt;
|
||||
|
||||
pub(crate) mod block;
|
||||
pub(crate) mod ciso;
|
||||
#[cfg(feature = "compress-zlib")]
|
||||
pub(crate) mod gcz;
|
||||
pub(crate) mod iso;
|
||||
pub(crate) mod mapped;
|
||||
pub(crate) mod nfs;
|
||||
pub(crate) mod nkit;
|
||||
pub(crate) mod split;
|
||||
pub(crate) mod tgc;
|
||||
pub(crate) mod wbfs;
|
||||
pub(crate) mod wia;
|
||||
|
||||
/// SHA-1 hash bytes
|
||||
pub type HashBytes = [u8; 20];
|
||||
|
||||
/// AES key bytes
|
||||
pub type KeyBytes = [u8; 16];
|
||||
|
||||
/// Magic bytes
|
||||
pub type MagicBytes = [u8; 4];
|
||||
|
||||
/// The disc file format.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
||||
pub enum Format {
|
||||
/// ISO / GCM (GameCube master disc)
|
||||
#[default]
|
||||
Iso,
|
||||
/// CISO (Compact ISO)
|
||||
Ciso,
|
||||
/// GCZ
|
||||
Gcz,
|
||||
/// NFS (Wii U VC)
|
||||
Nfs,
|
||||
/// RVZ
|
||||
Rvz,
|
||||
/// WBFS
|
||||
Wbfs,
|
||||
/// WIA
|
||||
Wia,
|
||||
/// TGC
|
||||
Tgc,
|
||||
}
|
||||
|
||||
impl fmt::Display for Format {
|
||||
#[inline]
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Format::Iso => write!(f, "ISO"),
|
||||
Format::Ciso => write!(f, "CISO"),
|
||||
Format::Gcz => write!(f, "GCZ"),
|
||||
Format::Nfs => write!(f, "NFS"),
|
||||
Format::Rvz => write!(f, "RVZ"),
|
||||
Format::Wbfs => write!(f, "WBFS"),
|
||||
Format::Wia => write!(f, "WIA"),
|
||||
Format::Tgc => write!(f, "TGC"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The disc file format's compression algorithm.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
||||
pub enum Compression {
|
||||
/// No compression
|
||||
#[default]
|
||||
None,
|
||||
/// BZIP2
|
||||
Bzip2,
|
||||
/// Deflate (GCZ only)
|
||||
Deflate,
|
||||
/// LZMA
|
||||
Lzma,
|
||||
/// LZMA2
|
||||
Lzma2,
|
||||
/// Purge (WIA only)
|
||||
Purge,
|
||||
/// Zstandard
|
||||
Zstandard,
|
||||
}
|
||||
|
||||
impl fmt::Display for Compression {
|
||||
#[inline]
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Compression::None => write!(f, "None"),
|
||||
Compression::Bzip2 => write!(f, "BZIP2"),
|
||||
Compression::Deflate => write!(f, "Deflate"),
|
||||
Compression::Lzma => write!(f, "LZMA"),
|
||||
Compression::Lzma2 => write!(f, "LZMA2"),
|
||||
Compression::Purge => write!(f, "Purge"),
|
||||
Compression::Zstandard => write!(f, "Zstandard"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Extra metadata about the underlying disc file format.
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct DiscMeta {
|
||||
/// The disc file format.
|
||||
pub format: Format,
|
||||
/// The format's compression algorithm.
|
||||
pub compression: Compression,
|
||||
/// If the format uses blocks, the block size in bytes.
|
||||
pub block_size: Option<u32>,
|
||||
/// Whether Wii partitions are stored decrypted in the format.
|
||||
pub decrypted: bool,
|
||||
/// Whether the format omits Wii partition data hashes.
|
||||
pub needs_hash_recovery: bool,
|
||||
/// Whether the format supports recovering the original disc data losslessly.
|
||||
pub lossless: bool,
|
||||
/// The original disc's size in bytes, if stored by the format.
|
||||
pub disc_size: Option<u64>,
|
||||
/// The original disc's CRC32 hash, if stored by the format.
|
||||
pub crc32: Option<u32>,
|
||||
/// The original disc's MD5 hash, if stored by the format.
|
||||
pub md5: Option<[u8; 16]>,
|
||||
/// The original disc's SHA-1 hash, if stored by the format.
|
||||
pub sha1: Option<[u8; 20]>,
|
||||
/// The original disc's XXH64 hash, if stored by the format.
|
||||
pub xxhash64: Option<u64>,
|
||||
}
|
||||
|
||||
/// Encrypts data in-place using AES-128-CBC with the given key and IV.
|
||||
/// Requires the data length to be a multiple of the AES block size (16 bytes).
|
||||
pub fn aes_cbc_encrypt(key: &KeyBytes, iv: &KeyBytes, data: &mut [u8]) {
|
||||
use aes::cipher::{block_padding::NoPadding, BlockEncryptMut, KeyIvInit};
|
||||
<cbc::Encryptor<aes::Aes128>>::new(key.into(), iv.into())
|
||||
.encrypt_padded_mut::<NoPadding>(data, data.len())
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
/// Decrypts data in-place using AES-128-CBC with the given key and IV.
|
||||
/// Requires the data length to be a multiple of the AES block size (16 bytes).
|
||||
pub fn aes_cbc_decrypt(key: &KeyBytes, iv: &KeyBytes, data: &mut [u8]) {
|
||||
use aes::cipher::{block_padding::NoPadding, BlockDecryptMut, KeyIvInit};
|
||||
<cbc::Decryptor<aes::Aes128>>::new(key.into(), iv.into())
|
||||
.decrypt_padded_mut::<NoPadding>(data)
|
||||
.unwrap();
|
||||
}
|
||||
|
|
|
@ -4,22 +4,21 @@ use std::{
|
|||
io::{BufReader, Read, Seek, SeekFrom},
|
||||
mem::size_of,
|
||||
path::{Component, Path, PathBuf},
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use zerocopy::{big_endian::U32, FromBytes, FromZeros, Immutable, IntoBytes, KnownLayout};
|
||||
|
||||
use crate::{
|
||||
array_ref_mut,
|
||||
common::{Format, KeyBytes, MagicBytes},
|
||||
disc::SECTOR_SIZE,
|
||||
io::{
|
||||
aes_cbc_decrypt,
|
||||
block::{Block, BlockIO, PartitionInfo, NFS_MAGIC},
|
||||
block::{Block, BlockKind, BlockReader, NFS_MAGIC},
|
||||
split::SplitFileReader,
|
||||
Format, KeyBytes, MagicBytes,
|
||||
},
|
||||
static_assert,
|
||||
util::read::read_from,
|
||||
DiscMeta, Error, Result, ResultContext,
|
||||
read::DiscMeta,
|
||||
util::{aes::aes_cbc_decrypt, array_ref_mut, read::read_arc, static_assert},
|
||||
Error, Result, ResultContext,
|
||||
};
|
||||
|
||||
pub const NFS_END_MAGIC: MagicBytes = *b"SGGE";
|
||||
|
@ -84,19 +83,19 @@ impl NFSHeader {
|
|||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct DiscIONFS {
|
||||
pub struct BlockReaderNFS {
|
||||
inner: SplitFileReader,
|
||||
header: NFSHeader,
|
||||
header: Arc<NFSHeader>,
|
||||
raw_size: u64,
|
||||
disc_size: u64,
|
||||
key: KeyBytes,
|
||||
}
|
||||
|
||||
impl DiscIONFS {
|
||||
impl BlockReaderNFS {
|
||||
pub fn new(directory: &Path) -> Result<Box<Self>> {
|
||||
let mut disc_io = Box::new(Self {
|
||||
inner: SplitFileReader::empty(),
|
||||
header: NFSHeader::new_zeroed(),
|
||||
header: Arc::new(NFSHeader::new_zeroed()),
|
||||
raw_size: 0,
|
||||
disc_size: 0,
|
||||
key: [0; 16],
|
||||
|
@ -106,18 +105,13 @@ impl DiscIONFS {
|
|||
}
|
||||
}
|
||||
|
||||
impl BlockIO for DiscIONFS {
|
||||
fn read_block_internal(
|
||||
&mut self,
|
||||
out: &mut [u8],
|
||||
sector: u32,
|
||||
partition: Option<&PartitionInfo>,
|
||||
) -> io::Result<Block> {
|
||||
impl BlockReader for BlockReaderNFS {
|
||||
fn read_block(&mut self, out: &mut [u8], sector: u32) -> io::Result<Block> {
|
||||
// Calculate physical sector
|
||||
let phys_sector = self.header.phys_sector(sector);
|
||||
if phys_sector == u32::MAX {
|
||||
// Logical zero sector
|
||||
return Ok(Block::Zero);
|
||||
return Ok(Block::sector(sector, BlockKind::Raw));
|
||||
}
|
||||
|
||||
// Read sector
|
||||
|
@ -130,15 +124,10 @@ impl BlockIO for DiscIONFS {
|
|||
*array_ref_mut!(iv, 12, 4) = sector.to_be_bytes();
|
||||
aes_cbc_decrypt(&self.key, &iv, out);
|
||||
|
||||
match partition {
|
||||
Some(partition) if partition.has_encryption => {
|
||||
Ok(Block::PartDecrypted { has_hashes: true })
|
||||
}
|
||||
_ => Ok(Block::Raw),
|
||||
}
|
||||
Ok(Block::sector(sector, BlockKind::PartDecrypted { hash_block: true }))
|
||||
}
|
||||
|
||||
fn block_size_internal(&self) -> u32 { SECTOR_SIZE as u32 }
|
||||
fn block_size(&self) -> u32 { SECTOR_SIZE as u32 }
|
||||
|
||||
fn meta(&self) -> DiscMeta {
|
||||
DiscMeta { format: Format::Nfs, decrypted: true, ..Default::default() }
|
||||
|
@ -168,7 +157,7 @@ fn get_nfs(directory: &Path, num: u32) -> Result<PathBuf> {
|
|||
}
|
||||
}
|
||||
|
||||
impl DiscIONFS {
|
||||
impl BlockReaderNFS {
|
||||
pub fn load_files(&mut self, directory: &Path) -> Result<()> {
|
||||
{
|
||||
// Load key file
|
||||
|
@ -201,7 +190,7 @@ impl DiscIONFS {
|
|||
let mut file = BufReader::new(
|
||||
File::open(&path).with_context(|| format!("Opening file {}", path.display()))?,
|
||||
);
|
||||
let header: NFSHeader = read_from(&mut file)
|
||||
let header: Arc<NFSHeader> = read_arc(&mut file)
|
||||
.with_context(|| format!("Reading NFS header from file {}", path.display()))?;
|
||||
header.validate()?;
|
||||
// log::debug!("{:?}", header);
|
||||
|
|
|
@ -1,13 +1,15 @@
|
|||
use std::{
|
||||
io,
|
||||
io::{Read, Seek, SeekFrom},
|
||||
io::{Read, Seek, SeekFrom, Write},
|
||||
};
|
||||
|
||||
use tracing::warn;
|
||||
|
||||
use crate::{
|
||||
common::MagicBytes,
|
||||
disc::DL_DVD_SIZE,
|
||||
io::MagicBytes,
|
||||
read::DiscMeta,
|
||||
util::read::{read_from, read_u16_be, read_u32_be, read_u64_be, read_vec},
|
||||
DiscMeta,
|
||||
};
|
||||
|
||||
#[allow(unused)]
|
||||
|
@ -56,19 +58,32 @@ const fn calc_header_size(version: u8, flags: u16, key_len: u32) -> usize {
|
|||
size
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct NKitHeader {
|
||||
pub version: u8,
|
||||
pub flags: u16,
|
||||
pub size: Option<u64>,
|
||||
pub crc32: Option<u32>,
|
||||
pub md5: Option<[u8; 16]>,
|
||||
pub sha1: Option<[u8; 20]>,
|
||||
pub xxhash64: Option<u64>,
|
||||
pub xxh64: Option<u64>,
|
||||
/// Bitstream of blocks that are junk data
|
||||
pub junk_bits: Option<Vec<u8>>,
|
||||
pub block_size: u32,
|
||||
pub junk_bits: Option<JunkBits>,
|
||||
pub encrypted: bool,
|
||||
}
|
||||
|
||||
impl Default for NKitHeader {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
version: 2,
|
||||
size: None,
|
||||
crc32: None,
|
||||
md5: None,
|
||||
sha1: None,
|
||||
xxh64: None,
|
||||
junk_bits: None,
|
||||
encrypted: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const VERSION_PREFIX: [u8; 7] = *b"NKIT v";
|
||||
|
@ -82,7 +97,7 @@ impl NKitHeader {
|
|||
match NKitHeader::read_from(reader, block_size, has_junk_bits) {
|
||||
Ok(header) => Some(header),
|
||||
Err(e) => {
|
||||
log::warn!("Failed to read NKit header: {}", e);
|
||||
warn!("Failed to read NKit header: {}", e);
|
||||
None
|
||||
}
|
||||
}
|
||||
|
@ -136,25 +151,20 @@ impl NKitHeader {
|
|||
let sha1 = (flags & NKitHeaderFlags::Sha1 as u16 != 0)
|
||||
.then(|| read_from::<[u8; 20], _>(&mut inner))
|
||||
.transpose()?;
|
||||
let xxhash64 = (flags & NKitHeaderFlags::Xxhash64 as u16 != 0)
|
||||
let xxh64 = (flags & NKitHeaderFlags::Xxhash64 as u16 != 0)
|
||||
.then(|| read_u64_be(&mut inner))
|
||||
.transpose()?;
|
||||
|
||||
let junk_bits = if has_junk_bits {
|
||||
let n = DL_DVD_SIZE.div_ceil(block_size as u64).div_ceil(8);
|
||||
Some(read_vec(reader, n as usize)?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let junk_bits =
|
||||
if has_junk_bits { Some(JunkBits::read_from(reader, block_size)?) } else { None };
|
||||
|
||||
Ok(Self { version, flags, size, crc32, md5, sha1, xxhash64, junk_bits, block_size })
|
||||
let encrypted = flags & NKitHeaderFlags::Encrypted as u16 != 0;
|
||||
|
||||
Ok(Self { version, size, crc32, md5, sha1, xxh64, junk_bits, encrypted })
|
||||
}
|
||||
|
||||
pub fn is_junk_block(&self, block: u32) -> Option<bool> {
|
||||
self.junk_bits
|
||||
.as_ref()
|
||||
.and_then(|v| v.get((block / 8) as usize))
|
||||
.map(|&b| b & (1 << (7 - (block & 7))) != 0)
|
||||
self.junk_bits.as_ref().map(|v| v.get(block))
|
||||
}
|
||||
|
||||
pub fn apply(&self, meta: &mut DiscMeta) {
|
||||
|
@ -164,6 +174,128 @@ impl NKitHeader {
|
|||
meta.crc32 = self.crc32;
|
||||
meta.md5 = self.md5;
|
||||
meta.sha1 = self.sha1;
|
||||
meta.xxhash64 = self.xxhash64;
|
||||
meta.xxh64 = self.xxh64;
|
||||
}
|
||||
|
||||
fn calc_flags(&self) -> u16 {
|
||||
let mut flags = 0;
|
||||
if self.size.is_some() {
|
||||
flags |= NKitHeaderFlags::Size as u16;
|
||||
}
|
||||
if self.crc32.is_some() {
|
||||
flags |= NKitHeaderFlags::Crc32 as u16;
|
||||
}
|
||||
if self.md5.is_some() {
|
||||
flags |= NKitHeaderFlags::Md5 as u16;
|
||||
}
|
||||
if self.sha1.is_some() {
|
||||
flags |= NKitHeaderFlags::Sha1 as u16;
|
||||
}
|
||||
if self.xxh64.is_some() {
|
||||
flags |= NKitHeaderFlags::Xxhash64 as u16;
|
||||
}
|
||||
if self.encrypted {
|
||||
flags |= NKitHeaderFlags::Encrypted as u16;
|
||||
}
|
||||
flags
|
||||
}
|
||||
|
||||
pub fn write_to<W>(&self, w: &mut W) -> io::Result<()>
|
||||
where W: Write + ?Sized {
|
||||
w.write_all(&VERSION_PREFIX)?;
|
||||
w.write_all(&[b'0' + self.version])?;
|
||||
let flags = self.calc_flags();
|
||||
match self.version {
|
||||
1 => {}
|
||||
2 => {
|
||||
let header_size = calc_header_size(self.version, flags, 0) as u16;
|
||||
w.write_all(&header_size.to_be_bytes())?;
|
||||
w.write_all(&flags.to_be_bytes())?;
|
||||
}
|
||||
version => {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!("Unsupported NKit header version: {}", version),
|
||||
));
|
||||
}
|
||||
};
|
||||
if let Some(size) = self.size {
|
||||
w.write_all(&size.to_be_bytes())?;
|
||||
}
|
||||
if let Some(crc32) = self.crc32 {
|
||||
w.write_all(&crc32.to_be_bytes())?;
|
||||
} else if self.version == 1 {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
"Missing CRC32 in NKit v1 header",
|
||||
));
|
||||
}
|
||||
if let Some(md5) = self.md5 {
|
||||
w.write_all(&md5)?;
|
||||
} else if self.version == 1 {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
"Missing MD5 in NKit v1 header",
|
||||
));
|
||||
}
|
||||
if let Some(sha1) = self.sha1 {
|
||||
w.write_all(&sha1)?;
|
||||
} else if self.version == 1 {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
"Missing SHA1 in NKit v1 header",
|
||||
));
|
||||
}
|
||||
if let Some(xxh64) = self.xxh64 {
|
||||
w.write_all(&xxh64.to_be_bytes())?;
|
||||
} else if self.version == 1 {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
"Missing XXHash64 in NKit header",
|
||||
));
|
||||
}
|
||||
if let Some(junk_bits) = &self.junk_bits {
|
||||
junk_bits.write_to(w)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct JunkBits(Vec<u8>);
|
||||
|
||||
impl JunkBits {
|
||||
pub fn new(block_size: u32) -> Self { Self(vec![0; Self::len(block_size)]) }
|
||||
|
||||
pub fn read_from<R>(reader: &mut R, block_size: u32) -> io::Result<Self>
|
||||
where R: Read + ?Sized {
|
||||
Ok(Self(read_vec(reader, Self::len(block_size))?))
|
||||
}
|
||||
|
||||
pub fn write_to<W>(&self, w: &mut W) -> io::Result<()>
|
||||
where W: Write + ?Sized {
|
||||
w.write_all(&self.0)
|
||||
}
|
||||
|
||||
pub fn set(&mut self, block: u32, is_junk: bool) {
|
||||
let Some(byte) = self.0.get_mut((block / 8) as usize) else {
|
||||
return;
|
||||
};
|
||||
if is_junk {
|
||||
*byte |= 1 << (7 - (block & 7));
|
||||
} else {
|
||||
*byte &= !(1 << (7 - (block & 7)));
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get(&self, block: u32) -> bool {
|
||||
let Some(&byte) = self.0.get((block / 8) as usize) else {
|
||||
return false;
|
||||
};
|
||||
byte & (1 << (7 - (block & 7))) != 0
|
||||
}
|
||||
|
||||
fn len(block_size: u32) -> usize {
|
||||
DL_DVD_SIZE.div_ceil(block_size as u64).div_ceil(8) as usize
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
use std::{
|
||||
cmp::min,
|
||||
fs::File,
|
||||
io,
|
||||
io::{BufReader, Read, Seek, SeekFrom},
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::{ErrorContext, Result, ResultContext};
|
||||
|
||||
#[derive(Debug)]
|
||||
|
@ -101,10 +102,9 @@ impl SplitFileReader {
|
|||
}
|
||||
|
||||
pub fn len(&self) -> u64 { self.files.last().map_or(0, |f| f.begin + f.size) }
|
||||
}
|
||||
|
||||
impl Read for SplitFileReader {
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
#[instrument(name = "SplitFileReader::check_open_file", skip_all)]
|
||||
fn check_open_file(&mut self) -> io::Result<Option<&mut Split<BufReader<File>>>> {
|
||||
if self.open_file.is_none() || !self.open_file.as_ref().unwrap().contains(self.pos) {
|
||||
self.open_file = if let Some(split) = self.files.iter().find(|f| f.contains(self.pos)) {
|
||||
let mut file = BufReader::new(File::open(&split.inner)?);
|
||||
|
@ -115,10 +115,18 @@ impl Read for SplitFileReader {
|
|||
None
|
||||
};
|
||||
}
|
||||
let Some(split) = self.open_file.as_mut() else {
|
||||
Ok(self.open_file.as_mut())
|
||||
}
|
||||
}
|
||||
|
||||
impl Read for SplitFileReader {
|
||||
#[instrument(name = "SplitFileReader::read", skip_all)]
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
let pos = self.pos;
|
||||
let Some(split) = self.check_open_file()? else {
|
||||
return Ok(0);
|
||||
};
|
||||
let to_read = min(buf.len(), (split.begin + split.size - self.pos) as usize);
|
||||
let to_read = buf.len().min((split.begin + split.size - pos) as usize);
|
||||
let read = split.inner.read(&mut buf[..to_read])?;
|
||||
self.pos += read as u64;
|
||||
Ok(read)
|
||||
|
@ -126,6 +134,7 @@ impl Read for SplitFileReader {
|
|||
}
|
||||
|
||||
impl Seek for SplitFileReader {
|
||||
#[instrument(name = "SplitFileReader::seek", skip_all)]
|
||||
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
|
||||
self.pos = match pos {
|
||||
SeekFrom::Start(pos) => pos,
|
||||
|
|
|
@ -1,19 +1,31 @@
|
|||
use std::{
|
||||
io,
|
||||
io::{Read, Seek, SeekFrom},
|
||||
mem::size_of,
|
||||
io::{BufRead, Read, Seek, SeekFrom},
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use zerocopy::{big_endian::U32, FromBytes, Immutable, IntoBytes, KnownLayout};
|
||||
use bytes::{BufMut, Bytes, BytesMut};
|
||||
use zerocopy::{big_endian::U32, FromBytes, FromZeros, Immutable, IntoBytes, KnownLayout};
|
||||
|
||||
use crate::{
|
||||
disc::SECTOR_SIZE,
|
||||
io::{
|
||||
block::{Block, BlockIO, DiscStream, PartitionInfo, TGC_MAGIC},
|
||||
Format, MagicBytes,
|
||||
build::gc::{insert_junk_data, FileCallback, GCPartitionStream, WriteInfo, WriteKind},
|
||||
common::{Compression, Format, MagicBytes, PartitionKind},
|
||||
disc::{
|
||||
fst::Fst,
|
||||
gcn::{read_dol, read_fst},
|
||||
reader::DiscReader,
|
||||
writer::{DataCallback, DiscWriter},
|
||||
DiscHeader, PartitionHeader, SECTOR_SIZE,
|
||||
},
|
||||
util::read::{read_box_slice, read_from},
|
||||
DiscHeader, DiscMeta, Error, Node, PartitionHeader, Result, ResultContext,
|
||||
io::block::{Block, BlockKind, BlockReader, TGC_MAGIC},
|
||||
read::{DiscMeta, DiscStream, PartitionOptions, PartitionReader},
|
||||
util::{
|
||||
align_up_32, array_ref,
|
||||
read::{read_arc, read_arc_slice, read_from, read_with_zero_fill},
|
||||
static_assert,
|
||||
},
|
||||
write::{DiscFinalization, DiscWriterWeight, FormatOptions, ProcessOptions},
|
||||
Error, Result, ResultContext,
|
||||
};
|
||||
|
||||
/// TGC header (big endian)
|
||||
|
@ -46,21 +58,21 @@ struct TGCHeader {
|
|||
banner_offset: U32,
|
||||
/// Size of the banner
|
||||
banner_size: U32,
|
||||
/// Original user data offset in the GCM
|
||||
gcm_user_offset: U32,
|
||||
/// Start of user files in the original GCM
|
||||
gcm_files_start: U32,
|
||||
}
|
||||
|
||||
static_assert!(size_of::<TGCHeader>() == 0x38);
|
||||
|
||||
const GCM_HEADER_SIZE: usize = 0x100000;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct DiscIOTGC {
|
||||
inner: Box<dyn DiscStream>,
|
||||
stream_len: u64,
|
||||
header: TGCHeader,
|
||||
fst: Box<[u8]>,
|
||||
pub struct BlockReaderTGC {
|
||||
inner: GCPartitionStream<FileCallbackTGC>,
|
||||
}
|
||||
|
||||
impl DiscIOTGC {
|
||||
pub fn new(mut inner: Box<dyn DiscStream>) -> Result<Box<Self>> {
|
||||
let stream_len = inner.seek(SeekFrom::End(0)).context("Determining stream length")?;
|
||||
impl BlockReaderTGC {
|
||||
pub fn new(mut inner: Box<dyn DiscStream>) -> Result<Box<dyn BlockReader>> {
|
||||
inner.seek(SeekFrom::Start(0)).context("Seeking to start")?;
|
||||
|
||||
// Read header
|
||||
|
@ -68,89 +80,253 @@ impl DiscIOTGC {
|
|||
if header.magic != TGC_MAGIC {
|
||||
return Err(Error::DiscFormat("Invalid TGC magic".to_string()));
|
||||
}
|
||||
let disc_size = (header.gcm_files_start.get() + header.user_size.get()) as u64;
|
||||
|
||||
// Read FST and adjust offsets
|
||||
// Read disc header and partition header
|
||||
inner
|
||||
.seek(SeekFrom::Start(header.fst_offset.get() as u64))
|
||||
.context("Seeking to TGC FST")?;
|
||||
let mut fst = read_box_slice(inner.as_mut(), header.fst_size.get() as usize)
|
||||
.context("Reading TGC FST")?;
|
||||
let (root_node, _) = Node::ref_from_prefix(&fst)
|
||||
.map_err(|_| Error::DiscFormat("Invalid TGC FST".to_string()))?;
|
||||
let node_count = root_node.length() as usize;
|
||||
let (nodes, _) = <[Node]>::mut_from_prefix_with_elems(&mut fst, node_count)
|
||||
.map_err(|_| Error::DiscFormat("Invalid TGC FST".to_string()))?;
|
||||
for node in nodes {
|
||||
.seek(SeekFrom::Start(header.header_offset.get() as u64))
|
||||
.context("Seeking to GCM header")?;
|
||||
let raw_header =
|
||||
read_arc::<[u8; GCM_HEADER_SIZE], _>(inner.as_mut()).context("Reading GCM header")?;
|
||||
|
||||
let (disc_header, remain) = DiscHeader::ref_from_prefix(raw_header.as_ref())
|
||||
.expect("Invalid disc header alignment");
|
||||
let disc_header = disc_header.clone();
|
||||
let (partition_header, _) =
|
||||
PartitionHeader::ref_from_prefix(remain).expect("Invalid partition header alignment");
|
||||
let partition_header = partition_header.clone();
|
||||
|
||||
// Read DOL
|
||||
inner.seek(SeekFrom::Start(header.dol_offset.get() as u64)).context("Seeking to DOL")?;
|
||||
let raw_dol = read_arc_slice::<u8, _>(inner.as_mut(), header.dol_size.get() as usize)
|
||||
.context("Reading DOL")?;
|
||||
|
||||
// Read FST
|
||||
inner.seek(SeekFrom::Start(header.fst_offset.get() as u64)).context("Seeking to FST")?;
|
||||
let raw_fst = read_arc_slice::<u8, _>(inner.as_mut(), header.fst_size.get() as usize)
|
||||
.context("Reading FST")?;
|
||||
let fst = Fst::new(&raw_fst)?;
|
||||
|
||||
let mut write_info = Vec::with_capacity(5 + fst.num_files());
|
||||
write_info.push(WriteInfo {
|
||||
kind: WriteKind::Static(raw_header, "sys/header.bin"),
|
||||
size: GCM_HEADER_SIZE as u64,
|
||||
offset: 0,
|
||||
});
|
||||
write_info.push(WriteInfo {
|
||||
kind: WriteKind::Static(raw_dol, "sys/main.dol"),
|
||||
size: header.dol_size.get() as u64,
|
||||
offset: partition_header.dol_offset(false),
|
||||
});
|
||||
write_info.push(WriteInfo {
|
||||
kind: WriteKind::Static(raw_fst.clone(), "sys/fst.bin"),
|
||||
size: header.fst_size.get() as u64,
|
||||
offset: partition_header.fst_offset(false),
|
||||
});
|
||||
|
||||
// Collect files
|
||||
for (_, node, path) in fst.iter() {
|
||||
if node.is_dir() {
|
||||
continue;
|
||||
}
|
||||
write_info.push(WriteInfo {
|
||||
kind: WriteKind::File(path),
|
||||
size: node.length() as u64,
|
||||
offset: node.offset(false),
|
||||
});
|
||||
}
|
||||
write_info.sort_unstable_by(|a, b| a.offset.cmp(&b.offset).then(a.size.cmp(&b.size)));
|
||||
let write_info = insert_junk_data(write_info, &partition_header);
|
||||
|
||||
let file_callback = FileCallbackTGC::new(inner, raw_fst, header);
|
||||
let disc_id = *array_ref![disc_header.game_id, 0, 4];
|
||||
let disc_num = disc_header.disc_num;
|
||||
Ok(Box::new(Self {
|
||||
inner: GCPartitionStream::new(
|
||||
file_callback,
|
||||
Arc::from(write_info),
|
||||
disc_size,
|
||||
disc_id,
|
||||
disc_num,
|
||||
),
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
impl BlockReader for BlockReaderTGC {
|
||||
fn read_block(&mut self, out: &mut [u8], sector: u32) -> io::Result<Block> {
|
||||
let count = (out.len() / SECTOR_SIZE) as u32;
|
||||
self.inner.set_position(sector as u64 * SECTOR_SIZE as u64);
|
||||
let read = read_with_zero_fill(&mut self.inner, out)?;
|
||||
Ok(Block::sectors(sector, count, if read == 0 { BlockKind::None } else { BlockKind::Raw }))
|
||||
}
|
||||
|
||||
fn block_size(&self) -> u32 { SECTOR_SIZE as u32 }
|
||||
|
||||
fn meta(&self) -> DiscMeta {
|
||||
DiscMeta { format: Format::Tgc, disc_size: Some(self.inner.len()), ..Default::default() }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct FileCallbackTGC {
|
||||
inner: Box<dyn DiscStream>,
|
||||
fst: Arc<[u8]>,
|
||||
header: TGCHeader,
|
||||
}
|
||||
|
||||
impl FileCallbackTGC {
|
||||
fn new(inner: Box<dyn DiscStream>, fst: Arc<[u8]>, header: TGCHeader) -> Self {
|
||||
Self { inner, fst, header }
|
||||
}
|
||||
}
|
||||
|
||||
impl FileCallback for FileCallbackTGC {
|
||||
fn read_file(&mut self, out: &mut [u8], name: &str, offset: u64) -> io::Result<()> {
|
||||
let fst = Fst::new(&self.fst).map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
|
||||
let (_, node) = fst.find(name).ok_or_else(|| {
|
||||
io::Error::new(io::ErrorKind::NotFound, format!("File not found in FST: {}", name))
|
||||
})?;
|
||||
if !node.is_file() {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!("Path is a directory: {}", name),
|
||||
));
|
||||
}
|
||||
// Calculate file offset in TGC
|
||||
let file_start = (node.offset(false) as u32 - self.header.gcm_files_start.get())
|
||||
+ self.header.user_offset.get();
|
||||
self.inner.seek(SeekFrom::Start(file_start as u64 + offset))?;
|
||||
self.inner.read_exact(out)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct DiscWriterTGC {
|
||||
inner: Box<dyn PartitionReader>,
|
||||
header: TGCHeader,
|
||||
header_data: Bytes,
|
||||
output_size: u64,
|
||||
}
|
||||
|
||||
impl DiscWriterTGC {
|
||||
pub fn new(reader: DiscReader, options: &FormatOptions) -> Result<Box<dyn DiscWriter>> {
|
||||
if options.format != Format::Tgc {
|
||||
return Err(Error::DiscFormat("Invalid format for TGC writer".to_string()));
|
||||
}
|
||||
if options.compression != Compression::None {
|
||||
return Err(Error::DiscFormat("TGC does not support compression".to_string()));
|
||||
}
|
||||
|
||||
let mut inner =
|
||||
reader.open_partition_kind(PartitionKind::Data, &PartitionOptions::default())?;
|
||||
|
||||
// Read GCM header
|
||||
let mut raw_header = <[u8; GCM_HEADER_SIZE]>::new_box_zeroed()?;
|
||||
inner.read_exact(raw_header.as_mut()).context("Reading GCM header")?;
|
||||
let (_, remain) = DiscHeader::ref_from_prefix(raw_header.as_ref())
|
||||
.expect("Invalid disc header alignment");
|
||||
let (partition_header, _) =
|
||||
PartitionHeader::ref_from_prefix(remain).expect("Invalid partition header alignment");
|
||||
|
||||
// Read DOL
|
||||
let raw_dol = read_dol(inner.as_mut(), partition_header, false)?;
|
||||
let raw_fst = read_fst(inner.as_mut(), partition_header, false)?;
|
||||
|
||||
// Parse FST
|
||||
let fst = Fst::new(&raw_fst)?;
|
||||
let mut gcm_files_start = u32::MAX;
|
||||
for (_, node, _) in fst.iter() {
|
||||
if node.is_file() {
|
||||
node.offset = node.offset - header.gcm_user_offset
|
||||
+ (header.user_offset - header.header_offset);
|
||||
let start = node.offset(false) as u32;
|
||||
if start < gcm_files_start {
|
||||
gcm_files_start = start;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Box::new(Self { inner, stream_len, header, fst }))
|
||||
// Layout system files
|
||||
let gcm_header_offset = SECTOR_SIZE as u32;
|
||||
let fst_offset = gcm_header_offset + GCM_HEADER_SIZE as u32;
|
||||
let dol_offset = align_up_32(fst_offset + partition_header.fst_size.get(), 32);
|
||||
let user_size =
|
||||
partition_header.user_offset.get() + partition_header.user_size.get() - gcm_files_start;
|
||||
let user_end =
|
||||
align_up_32(dol_offset + raw_dol.len() as u32 + user_size, SECTOR_SIZE as u32);
|
||||
let user_offset = user_end - user_size;
|
||||
|
||||
let header = TGCHeader {
|
||||
magic: TGC_MAGIC,
|
||||
version: 0.into(),
|
||||
header_offset: gcm_header_offset.into(),
|
||||
header_size: (GCM_HEADER_SIZE as u32).into(),
|
||||
fst_offset: fst_offset.into(),
|
||||
fst_size: partition_header.fst_size,
|
||||
fst_max_size: partition_header.fst_max_size,
|
||||
dol_offset: dol_offset.into(),
|
||||
dol_size: (raw_dol.len() as u32).into(),
|
||||
user_offset: user_offset.into(),
|
||||
user_size: user_size.into(),
|
||||
banner_offset: 0.into(),
|
||||
banner_size: 0.into(),
|
||||
gcm_files_start: gcm_files_start.into(),
|
||||
};
|
||||
let mut buffer = BytesMut::with_capacity(user_offset as usize);
|
||||
buffer.put_slice(header.as_bytes());
|
||||
buffer.put_bytes(0, gcm_header_offset as usize - buffer.len());
|
||||
|
||||
// Write GCM header
|
||||
buffer.put_slice(raw_header.as_ref());
|
||||
buffer.put_bytes(0, fst_offset as usize - buffer.len());
|
||||
|
||||
// Write FST
|
||||
buffer.put_slice(raw_fst.as_ref());
|
||||
buffer.put_bytes(0, dol_offset as usize - buffer.len());
|
||||
|
||||
// Write DOL
|
||||
buffer.put_slice(raw_dol.as_ref());
|
||||
buffer.put_bytes(0, user_offset as usize - buffer.len());
|
||||
|
||||
let header_data = buffer.freeze();
|
||||
Ok(Box::new(Self { inner, header, header_data, output_size: user_end as u64 }))
|
||||
}
|
||||
}
|
||||
|
||||
impl BlockIO for DiscIOTGC {
|
||||
fn read_block_internal(
|
||||
&mut self,
|
||||
out: &mut [u8],
|
||||
block: u32,
|
||||
partition: Option<&PartitionInfo>,
|
||||
) -> io::Result<Block> {
|
||||
let offset = self.header.header_offset.get() as u64 + block as u64 * SECTOR_SIZE as u64;
|
||||
if offset >= self.stream_len {
|
||||
// End of file
|
||||
return Ok(Block::Zero);
|
||||
impl DiscWriter for DiscWriterTGC {
|
||||
fn process(
|
||||
&self,
|
||||
data_callback: &mut DataCallback,
|
||||
_options: &ProcessOptions,
|
||||
) -> Result<DiscFinalization> {
|
||||
let mut data_position = self.header.user_offset.get() as u64;
|
||||
data_callback(self.header_data.clone(), data_position, self.output_size)
|
||||
.context("Failed to write TGC header")?;
|
||||
|
||||
// Write user data serially
|
||||
let mut inner = self.inner.clone();
|
||||
inner
|
||||
.seek(SeekFrom::Start(self.header.gcm_files_start.get() as u64))
|
||||
.context("Seeking to GCM files start")?;
|
||||
loop {
|
||||
// TODO use DiscReader::fill_buf_internal
|
||||
let buf = inner
|
||||
.fill_buf()
|
||||
.with_context(|| format!("Reading disc data at offset {data_position}"))?;
|
||||
let len = buf.len();
|
||||
if len == 0 {
|
||||
break;
|
||||
}
|
||||
data_position += len as u64;
|
||||
data_callback(Bytes::copy_from_slice(buf), data_position, self.output_size)
|
||||
.context("Failed to write disc data")?;
|
||||
inner.consume(len);
|
||||
}
|
||||
|
||||
self.inner.seek(SeekFrom::Start(offset))?;
|
||||
if offset + SECTOR_SIZE as u64 > self.stream_len {
|
||||
// If the last block is not a full sector, fill the rest with zeroes
|
||||
let read = (self.stream_len - offset) as usize;
|
||||
self.inner.read_exact(&mut out[..read])?;
|
||||
out[read..].fill(0);
|
||||
} else {
|
||||
self.inner.read_exact(out)?;
|
||||
}
|
||||
|
||||
// Adjust internal GCM header
|
||||
if block == 0 {
|
||||
let partition_header = PartitionHeader::mut_from_bytes(
|
||||
&mut out[size_of::<DiscHeader>()
|
||||
..size_of::<DiscHeader>() + size_of::<PartitionHeader>()],
|
||||
)
|
||||
.unwrap();
|
||||
partition_header.dol_offset = self.header.dol_offset - self.header.header_offset;
|
||||
partition_header.fst_offset = self.header.fst_offset - self.header.header_offset;
|
||||
}
|
||||
|
||||
// Copy modified FST to output
|
||||
if offset + out.len() as u64 > self.header.fst_offset.get() as u64
|
||||
&& offset < self.header.fst_offset.get() as u64 + self.header.fst_size.get() as u64
|
||||
{
|
||||
let out_offset = (self.header.fst_offset.get() as u64).saturating_sub(offset) as usize;
|
||||
let fst_offset = offset.saturating_sub(self.header.fst_offset.get() as u64) as usize;
|
||||
let copy_len =
|
||||
(out.len() - out_offset).min(self.header.fst_size.get() as usize - fst_offset);
|
||||
out[out_offset..out_offset + copy_len]
|
||||
.copy_from_slice(&self.fst[fst_offset..fst_offset + copy_len]);
|
||||
}
|
||||
|
||||
match partition {
|
||||
Some(partition) if partition.has_encryption => Ok(Block::PartEncrypted),
|
||||
_ => Ok(Block::Raw),
|
||||
}
|
||||
Ok(DiscFinalization::default())
|
||||
}
|
||||
|
||||
fn block_size_internal(&self) -> u32 { SECTOR_SIZE as u32 }
|
||||
fn progress_bound(&self) -> u64 { self.output_size }
|
||||
|
||||
fn meta(&self) -> DiscMeta {
|
||||
DiscMeta {
|
||||
format: Format::Tgc,
|
||||
lossless: true,
|
||||
disc_size: Some(self.stream_len - self.header.header_offset.get() as u64),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
fn weight(&self) -> DiscWriterWeight { DiscWriterWeight::Light }
|
||||
}
|
||||
|
|
|
@ -2,17 +2,34 @@ use std::{
|
|||
io,
|
||||
io::{Read, Seek, SeekFrom},
|
||||
mem::size_of,
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use zerocopy::{big_endian::*, FromBytes, Immutable, IntoBytes, KnownLayout};
|
||||
use bytes::{BufMut, Bytes, BytesMut};
|
||||
use zerocopy::{big_endian::*, FromBytes, FromZeros, Immutable, IntoBytes, KnownLayout};
|
||||
|
||||
use crate::{
|
||||
io::{
|
||||
block::{Block, BlockIO, DiscStream, PartitionInfo, WBFS_MAGIC},
|
||||
nkit::NKitHeader,
|
||||
DiscMeta, Format, MagicBytes,
|
||||
common::{Compression, Format, MagicBytes},
|
||||
disc::{
|
||||
reader::DiscReader,
|
||||
writer::{
|
||||
check_block, par_process, read_block, BlockProcessor, BlockResult, CheckBlockResult,
|
||||
DataCallback, DiscWriter,
|
||||
},
|
||||
SECTOR_SIZE,
|
||||
},
|
||||
util::read::{read_box_slice, read_from},
|
||||
io::{
|
||||
block::{Block, BlockKind, BlockReader, WBFS_MAGIC},
|
||||
nkit::{JunkBits, NKitHeader},
|
||||
},
|
||||
read::{DiscMeta, DiscStream},
|
||||
util::{
|
||||
array_ref,
|
||||
digest::DigestManager,
|
||||
lfg::LaggedFibonacci,
|
||||
read::{read_arc_slice, read_box_slice, read_from},
|
||||
},
|
||||
write::{DiscFinalization, DiscWriterWeight, FormatOptions, ProcessOptions},
|
||||
Error, Result, ResultContext,
|
||||
};
|
||||
|
||||
|
@ -23,7 +40,8 @@ struct WBFSHeader {
|
|||
num_sectors: U32,
|
||||
sector_size_shift: u8,
|
||||
block_size_shift: u8,
|
||||
_pad: [u8; 2],
|
||||
version: u8,
|
||||
_pad: u8,
|
||||
}
|
||||
|
||||
impl WBFSHeader {
|
||||
|
@ -36,19 +54,20 @@ impl WBFSHeader {
|
|||
|
||||
const DISC_HEADER_SIZE: usize = 0x100;
|
||||
const NUM_WII_SECTORS: u32 = 143432 * 2; // Double layer discs
|
||||
const NKIT_HEADER_OFFSET: u64 = 0x10000;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct DiscIOWBFS {
|
||||
pub struct BlockReaderWBFS {
|
||||
inner: Box<dyn DiscStream>,
|
||||
/// WBFS header
|
||||
header: WBFSHeader,
|
||||
/// Map of Wii LBAs to WBFS LBAs
|
||||
block_map: Box<[U16]>,
|
||||
block_map: Arc<[U16]>,
|
||||
/// Optional NKit header
|
||||
nkit_header: Option<NKitHeader>,
|
||||
}
|
||||
|
||||
impl DiscIOWBFS {
|
||||
impl BlockReaderWBFS {
|
||||
pub fn new(mut inner: Box<dyn DiscStream>) -> Result<Box<Self>> {
|
||||
inner.seek(SeekFrom::Start(0)).context("Seeking to start")?;
|
||||
let header: WBFSHeader = read_from(inner.as_mut()).context("Reading WBFS header")?;
|
||||
|
@ -81,39 +100,36 @@ impl DiscIOWBFS {
|
|||
inner
|
||||
.seek(SeekFrom::Start(header.sector_size() as u64 + DISC_HEADER_SIZE as u64))
|
||||
.context("Seeking to WBFS LBA table")?; // Skip header
|
||||
let block_map: Box<[U16]> = read_box_slice(inner.as_mut(), header.max_blocks() as usize)
|
||||
let block_map: Arc<[U16]> = read_arc_slice(inner.as_mut(), header.max_blocks() as usize)
|
||||
.context("Reading WBFS LBA table")?;
|
||||
|
||||
// Read NKit header if present (always at 0x10000)
|
||||
inner.seek(SeekFrom::Start(0x10000)).context("Seeking to NKit header")?;
|
||||
inner.seek(SeekFrom::Start(NKIT_HEADER_OFFSET)).context("Seeking to NKit header")?;
|
||||
let nkit_header = NKitHeader::try_read_from(inner.as_mut(), header.block_size(), true);
|
||||
|
||||
Ok(Box::new(Self { inner, header, block_map, nkit_header }))
|
||||
}
|
||||
}
|
||||
|
||||
impl BlockIO for DiscIOWBFS {
|
||||
fn read_block_internal(
|
||||
&mut self,
|
||||
out: &mut [u8],
|
||||
block: u32,
|
||||
partition: Option<&PartitionInfo>,
|
||||
) -> io::Result<Block> {
|
||||
impl BlockReader for BlockReaderWBFS {
|
||||
fn read_block(&mut self, out: &mut [u8], sector: u32) -> io::Result<Block> {
|
||||
let block_size = self.header.block_size();
|
||||
if block >= self.header.max_blocks() {
|
||||
return Ok(Block::Zero);
|
||||
let block_idx = ((sector as u64 * SECTOR_SIZE as u64) / block_size as u64) as u32;
|
||||
if block_idx >= self.header.max_blocks() {
|
||||
// Out of bounds
|
||||
return Ok(Block::new(block_idx, block_size, BlockKind::None));
|
||||
}
|
||||
|
||||
// Find the block in the map
|
||||
let phys_block = self.block_map[block as usize].get();
|
||||
let phys_block = self.block_map[block_idx as usize].get();
|
||||
if phys_block == 0 {
|
||||
// Check if block is junk data
|
||||
if self.nkit_header.as_ref().and_then(|h| h.is_junk_block(block)).unwrap_or(false) {
|
||||
return Ok(Block::Junk);
|
||||
if self.nkit_header.as_ref().and_then(|h| h.is_junk_block(block_idx)).unwrap_or(false) {
|
||||
return Ok(Block::new(block_idx, block_size, BlockKind::Junk));
|
||||
}
|
||||
|
||||
// Otherwise, read zeroes
|
||||
return Ok(Block::Zero);
|
||||
return Ok(Block::new(block_idx, block_size, BlockKind::Zero));
|
||||
}
|
||||
|
||||
// Read block
|
||||
|
@ -121,13 +137,10 @@ impl BlockIO for DiscIOWBFS {
|
|||
self.inner.seek(SeekFrom::Start(block_start))?;
|
||||
self.inner.read_exact(out)?;
|
||||
|
||||
match partition {
|
||||
Some(partition) if partition.has_encryption => Ok(Block::PartEncrypted),
|
||||
_ => Ok(Block::Raw),
|
||||
}
|
||||
Ok(Block::new(block_idx, block_size, BlockKind::Raw))
|
||||
}
|
||||
|
||||
fn block_size_internal(&self) -> u32 { self.header.block_size() }
|
||||
fn block_size(&self) -> u32 { self.header.block_size() }
|
||||
|
||||
fn meta(&self) -> DiscMeta {
|
||||
let mut result = DiscMeta {
|
||||
|
@ -141,3 +154,228 @@ impl BlockIO for DiscIOWBFS {
|
|||
result
|
||||
}
|
||||
}
|
||||
|
||||
struct BlockProcessorWBFS {
|
||||
inner: DiscReader,
|
||||
header: WBFSHeader,
|
||||
decrypted_block: Box<[u8]>,
|
||||
lfg: LaggedFibonacci,
|
||||
disc_id: [u8; 4],
|
||||
disc_num: u8,
|
||||
}
|
||||
|
||||
impl Clone for BlockProcessorWBFS {
|
||||
fn clone(&self) -> Self {
|
||||
let block_size = self.header.block_size() as usize;
|
||||
Self {
|
||||
inner: self.inner.clone(),
|
||||
header: self.header.clone(),
|
||||
decrypted_block: <[u8]>::new_box_zeroed_with_elems(block_size).unwrap(),
|
||||
lfg: LaggedFibonacci::default(),
|
||||
disc_id: self.disc_id,
|
||||
disc_num: self.disc_num,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl BlockProcessor for BlockProcessorWBFS {
|
||||
type BlockMeta = CheckBlockResult;
|
||||
|
||||
fn process_block(&mut self, block_idx: u32) -> io::Result<BlockResult<Self::BlockMeta>> {
|
||||
let block_size = self.header.block_size() as usize;
|
||||
let input_position = block_idx as u64 * block_size as u64;
|
||||
self.inner.seek(SeekFrom::Start(input_position))?;
|
||||
let (block_data, disc_data) = read_block(&mut self.inner, block_size)?;
|
||||
|
||||
// Check if block is zeroed or junk
|
||||
let result = match check_block(
|
||||
disc_data.as_ref(),
|
||||
&mut self.decrypted_block,
|
||||
input_position,
|
||||
self.inner.partitions(),
|
||||
&mut self.lfg,
|
||||
self.disc_id,
|
||||
self.disc_num,
|
||||
)? {
|
||||
CheckBlockResult::Normal => {
|
||||
BlockResult { block_idx, disc_data, block_data, meta: CheckBlockResult::Normal }
|
||||
}
|
||||
CheckBlockResult::Zeroed => BlockResult {
|
||||
block_idx,
|
||||
disc_data,
|
||||
block_data: Bytes::new(),
|
||||
meta: CheckBlockResult::Zeroed,
|
||||
},
|
||||
CheckBlockResult::Junk => BlockResult {
|
||||
block_idx,
|
||||
disc_data,
|
||||
block_data: Bytes::new(),
|
||||
meta: CheckBlockResult::Junk,
|
||||
},
|
||||
};
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct DiscWriterWBFS {
|
||||
inner: DiscReader,
|
||||
header: WBFSHeader,
|
||||
disc_table: Box<[u8]>,
|
||||
block_count: u16,
|
||||
}
|
||||
|
||||
pub const DEFAULT_BLOCK_SIZE: u32 = 0x200000; // 2 MiB
|
||||
|
||||
impl DiscWriterWBFS {
|
||||
pub fn new(mut inner: DiscReader, options: &FormatOptions) -> Result<Box<dyn DiscWriter>> {
|
||||
if options.format != Format::Wbfs {
|
||||
return Err(Error::DiscFormat("Invalid format for WBFS writer".to_string()));
|
||||
}
|
||||
if options.compression != Compression::None {
|
||||
return Err(Error::DiscFormat("WBFS does not support compression".to_string()));
|
||||
}
|
||||
let block_size = options.block_size;
|
||||
if block_size < SECTOR_SIZE as u32 || block_size % SECTOR_SIZE as u32 != 0 {
|
||||
return Err(Error::DiscFormat("Invalid block size for WBFS".to_string()));
|
||||
}
|
||||
let sector_size = 512u32;
|
||||
|
||||
let disc_size = inner.disc_size();
|
||||
let block_count = disc_size.div_ceil(block_size as u64);
|
||||
if block_count > u16::MAX as u64 {
|
||||
return Err(Error::DiscFormat("Block size too small".to_string()));
|
||||
}
|
||||
let block_count = block_count as u16;
|
||||
|
||||
// Create header
|
||||
let header = WBFSHeader {
|
||||
magic: WBFS_MAGIC,
|
||||
num_sectors: 0.into(), // Written during finalization
|
||||
sector_size_shift: sector_size.trailing_zeros() as u8,
|
||||
block_size_shift: block_size.trailing_zeros() as u8,
|
||||
version: 1,
|
||||
_pad: 0,
|
||||
};
|
||||
|
||||
// Create disc table
|
||||
let mut disc_table =
|
||||
<[u8]>::new_box_zeroed_with_elems(sector_size as usize - size_of::<WBFSHeader>())?;
|
||||
disc_table[0] = 1;
|
||||
|
||||
let mut header_size = size_of::<WBFSHeader>();
|
||||
header_size += size_of_val(disc_table.as_ref());
|
||||
header_size += DISC_HEADER_SIZE;
|
||||
header_size += header.max_blocks() as usize * size_of::<U16>();
|
||||
if header_size > block_size as usize {
|
||||
return Err(Error::Other("WBFS info too large for block".to_string()));
|
||||
}
|
||||
|
||||
inner.seek(SeekFrom::Start(0)).context("Seeking to start")?;
|
||||
Ok(Box::new(Self { inner, header, disc_table, block_count }))
|
||||
}
|
||||
}
|
||||
|
||||
impl DiscWriter for DiscWriterWBFS {
|
||||
fn process(
|
||||
&self,
|
||||
data_callback: &mut DataCallback,
|
||||
options: &ProcessOptions,
|
||||
) -> Result<DiscFinalization> {
|
||||
let block_size = self.header.block_size();
|
||||
let max_blocks = self.header.max_blocks();
|
||||
let mut block_map = <[U16]>::new_box_zeroed_with_elems(max_blocks as usize)?;
|
||||
|
||||
let disc_size = self.inner.disc_size();
|
||||
let mut header_data = BytesMut::with_capacity(block_size as usize);
|
||||
header_data.put_slice(self.header.as_bytes());
|
||||
header_data.put_slice(self.disc_table.as_ref());
|
||||
header_data.put_slice(&self.inner.header().as_bytes()[..DISC_HEADER_SIZE]);
|
||||
header_data.put_slice(block_map.as_bytes());
|
||||
header_data.resize(block_size as usize, 0);
|
||||
data_callback(header_data.freeze(), 0, disc_size).context("Failed to write header")?;
|
||||
|
||||
// Determine junk data values
|
||||
let disc_header = self.inner.header();
|
||||
let disc_id = *array_ref![disc_header.game_id, 0, 4];
|
||||
let disc_num = disc_header.disc_num;
|
||||
|
||||
// Create hashers
|
||||
let digest = DigestManager::new(options);
|
||||
let mut junk_bits = JunkBits::new(block_size);
|
||||
let mut input_position = 0;
|
||||
|
||||
let mut phys_block = 1;
|
||||
par_process(
|
||||
|| BlockProcessorWBFS {
|
||||
inner: self.inner.clone(),
|
||||
header: self.header.clone(),
|
||||
decrypted_block: <[u8]>::new_box_zeroed_with_elems(block_size as usize).unwrap(),
|
||||
lfg: LaggedFibonacci::default(),
|
||||
disc_id,
|
||||
disc_num,
|
||||
},
|
||||
self.block_count as u32,
|
||||
options.processor_threads,
|
||||
|block| -> Result<()> {
|
||||
// Update hashers
|
||||
let disc_data_len = block.disc_data.len() as u64;
|
||||
digest.send(block.disc_data);
|
||||
|
||||
// Check if block is zeroed or junk
|
||||
match block.meta {
|
||||
CheckBlockResult::Normal => {
|
||||
block_map[block.block_idx as usize] = phys_block.into();
|
||||
phys_block += 1;
|
||||
}
|
||||
CheckBlockResult::Zeroed => {}
|
||||
CheckBlockResult::Junk => {
|
||||
junk_bits.set(block.block_idx, true);
|
||||
}
|
||||
}
|
||||
|
||||
input_position += disc_data_len;
|
||||
data_callback(block.block_data.clone(), input_position, disc_size)
|
||||
.with_context(|| format!("Failed to write block {}", block.block_idx))?;
|
||||
Ok(())
|
||||
},
|
||||
)?;
|
||||
|
||||
// Collect hash results
|
||||
let digest_results = digest.finish();
|
||||
let mut nkit_header = NKitHeader {
|
||||
version: 2,
|
||||
size: Some(disc_size),
|
||||
crc32: None,
|
||||
md5: None,
|
||||
sha1: None,
|
||||
xxh64: None,
|
||||
junk_bits: Some(junk_bits),
|
||||
encrypted: true,
|
||||
};
|
||||
nkit_header.apply_digests(&digest_results);
|
||||
|
||||
// Update header
|
||||
let mut header = self.header.clone();
|
||||
header.num_sectors = (((phys_block as u64 * header.block_size() as u64)
|
||||
/ header.sector_size() as u64) as u32)
|
||||
.into();
|
||||
let mut header_data = BytesMut::with_capacity(block_size as usize);
|
||||
header_data.put_slice(header.as_bytes());
|
||||
header_data.put_slice(&self.disc_table);
|
||||
header_data.put_slice(&self.inner.header().as_bytes()[..DISC_HEADER_SIZE]);
|
||||
header_data.put_slice(block_map.as_bytes());
|
||||
header_data.resize(NKIT_HEADER_OFFSET as usize, 0);
|
||||
let mut w = header_data.writer();
|
||||
nkit_header.write_to(&mut w).context("Writing NKit header")?;
|
||||
let header_data = w.into_inner().freeze();
|
||||
|
||||
let mut finalization = DiscFinalization { header: header_data, ..Default::default() };
|
||||
finalization.apply_digests(&digest_results);
|
||||
Ok(finalization)
|
||||
}
|
||||
|
||||
fn progress_bound(&self) -> u64 { self.inner.disc_size() }
|
||||
|
||||
fn weight(&self) -> DiscWriterWeight { DiscWriterWeight::Medium }
|
||||
}
|
||||
|
|
1257
nod/src/io/wia.rs
1257
nod/src/io/wia.rs
File diff suppressed because it is too large
Load Diff
333
nod/src/lib.rs
333
nod/src/lib.rs
|
@ -1,4 +1,5 @@
|
|||
#![warn(missing_docs, clippy::missing_inline_in_public_items)]
|
||||
#![allow(clippy::new_ret_no_self)]
|
||||
#![warn(missing_docs)]
|
||||
//! Library for traversing & reading Nintendo Optical Disc (GameCube and Wii) images.
|
||||
//!
|
||||
//! Originally based on the C++ library [nod](https://github.com/AxioDL/nod),
|
||||
|
@ -11,6 +12,7 @@
|
|||
//! - CISO (+ NKit 2 lossless)
|
||||
//! - NFS (Wii U VC)
|
||||
//! - GCZ
|
||||
//! - TGC
|
||||
//!
|
||||
//! # Examples
|
||||
//!
|
||||
|
@ -19,17 +21,21 @@
|
|||
//! ```no_run
|
||||
//! use std::io::Read;
|
||||
//!
|
||||
//! use nod::{
|
||||
//! common::PartitionKind,
|
||||
//! read::{DiscOptions, DiscReader, PartitionOptions},
|
||||
//! };
|
||||
//!
|
||||
//! // Open a disc image and the first data partition.
|
||||
//! let disc = nod::Disc::new("path/to/file.iso")
|
||||
//! .expect("Failed to open disc");
|
||||
//! let mut partition = disc.open_partition_kind(nod::PartitionKind::Data)
|
||||
//! let disc =
|
||||
//! DiscReader::new("path/to/file.iso", &DiscOptions::default()).expect("Failed to open disc");
|
||||
//! let mut partition = disc
|
||||
//! .open_partition_kind(PartitionKind::Data, &PartitionOptions::default())
|
||||
//! .expect("Failed to open data partition");
|
||||
//!
|
||||
//! // Read partition metadata and the file system table.
|
||||
//! let meta = partition.meta()
|
||||
//! .expect("Failed to read partition metadata");
|
||||
//! let fst = meta.fst()
|
||||
//! .expect("File system table is invalid");
|
||||
//! let meta = partition.meta().expect("Failed to read partition metadata");
|
||||
//! let fst = meta.fst().expect("File system table is invalid");
|
||||
//!
|
||||
//! // Find a file by path and read it into a string.
|
||||
//! if let Some((_, node)) = fst.find("/MP3/Worlds.txt") {
|
||||
|
@ -46,38 +52,106 @@
|
|||
//! Converting a disc image to raw ISO:
|
||||
//!
|
||||
//! ```no_run
|
||||
//! // Enable `PartitionEncryptionMode::Original` to ensure the output is a valid ISO.
|
||||
//! let options = nod::OpenOptions { partition_encryption: nod::PartitionEncryptionMode::Original };
|
||||
//! let mut disc = nod::Disc::new_with_options("path/to/file.rvz", &options)
|
||||
//! .expect("Failed to open disc");
|
||||
//! use nod::read::{DiscOptions, DiscReader, PartitionEncryption};
|
||||
//!
|
||||
//! // Read directly from the open disc and write to the output file.
|
||||
//! let mut out = std::fs::File::create("output.iso")
|
||||
//! .expect("Failed to create output file");
|
||||
//! std::io::copy(&mut disc, &mut out)
|
||||
//! .expect("Failed to write data");
|
||||
//! let options = DiscOptions {
|
||||
//! partition_encryption: PartitionEncryption::Original,
|
||||
//! // Use 4 threads to preload data as the disc is read. This can speed up sequential reads,
|
||||
//! // especially when the disc image format uses compression.
|
||||
//! preloader_threads: 4,
|
||||
//! };
|
||||
//! // Open a disc image.
|
||||
//! let mut disc = DiscReader::new("path/to/file.rvz", &options).expect("Failed to open disc");
|
||||
//!
|
||||
//! // Create a new output file.
|
||||
//! let mut out = std::fs::File::create("output.iso").expect("Failed to create output file");
|
||||
//! // Read directly from the DiscReader and write to the output file.
|
||||
//! // NOTE: Any copy method that accepts `Read` and `Write` can be used here,
|
||||
//! // such as `std::io::copy`. This example utilizes `BufRead` for efficiency,
|
||||
//! // since `DiscReader` has its own internal buffer.
|
||||
//! nod::util::buf_copy(&mut disc, &mut out).expect("Failed to write data");
|
||||
//! ```
|
||||
//!
|
||||
//! Converting a disc image to RVZ:
|
||||
//!
|
||||
//! ```no_run
|
||||
//! use std::fs::File;
|
||||
//! use std::io::{Seek, Write};
|
||||
//! use nod::common::{Compression, Format};
|
||||
//! use nod::read::{DiscOptions, DiscReader, PartitionEncryption};
|
||||
//! use nod::write::{DiscWriter, DiscWriterWeight, FormatOptions, ProcessOptions};
|
||||
//!
|
||||
//! let open_options = DiscOptions {
|
||||
//! partition_encryption: PartitionEncryption::Original,
|
||||
//! // Use 4 threads to preload data as the disc is read. This can speed up sequential reads,
|
||||
//! // especially when the disc image format uses compression.
|
||||
//! preloader_threads: 4,
|
||||
//! };
|
||||
//! // Open a disc image.
|
||||
//! let disc = DiscReader::new("path/to/file.iso", &open_options)
|
||||
//! .expect("Failed to open disc");
|
||||
//! // Create a new output file.
|
||||
//! let mut output_file = File::create("output.rvz")
|
||||
//! .expect("Failed to create output file");
|
||||
//!
|
||||
//! let options = FormatOptions {
|
||||
//! format: Format::Rvz,
|
||||
//! compression: Compression::Zstandard(19),
|
||||
//! block_size: Format::Rvz.default_block_size(),
|
||||
//! };
|
||||
//! // Create a disc writer with the desired output format.
|
||||
//! let mut writer = DiscWriter::new(disc, &options)
|
||||
//! .expect("Failed to create writer");
|
||||
//!
|
||||
//! // Ideally we'd base this on the actual number of CPUs available.
|
||||
//! // This is just an example.
|
||||
//! let num_threads = match writer.weight() {
|
||||
//! DiscWriterWeight::Light => 0,
|
||||
//! DiscWriterWeight::Medium => 4,
|
||||
//! DiscWriterWeight::Heavy => 12,
|
||||
//! };
|
||||
//! let process_options = ProcessOptions {
|
||||
//! processor_threads: num_threads,
|
||||
//! // Enable checksum calculation for the _original_ disc data.
|
||||
//! // Digests will be stored in the output file for verification, if supported.
|
||||
//! // They will also be returned in the finalization result.
|
||||
//! digest_crc32: true,
|
||||
//! digest_md5: false, // MD5 is slow, skip it
|
||||
//! digest_sha1: true,
|
||||
//! digest_xxh64: true,
|
||||
//! };
|
||||
//! // Start processing the disc image.
|
||||
//! let finalization = writer.process(
|
||||
//! |data, _progress, _total| {
|
||||
//! output_file.write_all(data.as_ref())?;
|
||||
//! // One could display progress here, if desired.
|
||||
//! Ok(())
|
||||
//! },
|
||||
//! &process_options
|
||||
//! )
|
||||
//! .expect("Failed to process disc image");
|
||||
//!
|
||||
//! // Some disc writers calculate data during processing.
|
||||
//! // If the finalization returns header data, seek to the beginning of the file and write it.
|
||||
//! if !finalization.header.is_empty() {
|
||||
//! output_file.seek(std::io::SeekFrom::Start(0))
|
||||
//! .expect("Failed to seek");
|
||||
//! output_file.write_all(finalization.header.as_ref())
|
||||
//! .expect("Failed to write header");
|
||||
//! }
|
||||
//! output_file.flush().expect("Failed to flush output file");
|
||||
//!
|
||||
//! // Display the calculated digests.
|
||||
//! println!("CRC32: {:08X}", finalization.crc32.unwrap());
|
||||
//! // ...
|
||||
|
||||
use std::{
|
||||
io::{BufRead, Read, Seek},
|
||||
path::Path,
|
||||
};
|
||||
|
||||
pub use disc::{
|
||||
ApploaderHeader, ContentMetadata, DiscHeader, DolHeader, FileStream, Fst, Node, NodeKind,
|
||||
OwnedFileStream, PartitionBase, PartitionHeader, PartitionKind, PartitionMeta, SignedHeader,
|
||||
Ticket, TicketLimit, TmdHeader, WindowedStream, BI2_SIZE, BOOT_SIZE, DL_DVD_SIZE, GCN_MAGIC,
|
||||
MINI_DVD_SIZE, REGION_SIZE, SECTOR_SIZE, SL_DVD_SIZE, WII_MAGIC,
|
||||
};
|
||||
pub use io::{
|
||||
block::{DiscStream, PartitionInfo},
|
||||
Compression, DiscMeta, Format, KeyBytes, MagicBytes,
|
||||
};
|
||||
pub use util::lfg::LaggedFibonacci;
|
||||
|
||||
mod disc;
|
||||
mod io;
|
||||
mod util;
|
||||
pub mod build;
|
||||
pub mod common;
|
||||
pub mod disc;
|
||||
pub(crate) mod io;
|
||||
pub mod read;
|
||||
pub mod util;
|
||||
pub mod write;
|
||||
|
||||
/// Error types for nod.
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
|
@ -91,9 +165,6 @@ pub enum Error {
|
|||
/// An unknown error.
|
||||
#[error("error: {0}")]
|
||||
Other(String),
|
||||
/// An error occurred while allocating memory.
|
||||
#[error("allocation failed")]
|
||||
Alloc(zerocopy::AllocError),
|
||||
}
|
||||
|
||||
impl From<&str> for Error {
|
||||
|
@ -108,7 +179,12 @@ impl From<String> for Error {
|
|||
|
||||
impl From<zerocopy::AllocError> for Error {
|
||||
#[inline]
|
||||
fn from(e: zerocopy::AllocError) -> Error { Error::Alloc(e) }
|
||||
fn from(_: zerocopy::AllocError) -> Error {
|
||||
Error::Io(
|
||||
"allocation failed".to_string(),
|
||||
std::io::Error::from(std::io::ErrorKind::OutOfMemory),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper result type for [`Error`].
|
||||
|
@ -149,178 +225,3 @@ where E: ErrorContext
|
|||
self.map_err(|e| e.context(f()))
|
||||
}
|
||||
}
|
||||
|
||||
/// Wii partition encryption mode.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
||||
pub enum PartitionEncryptionMode {
|
||||
/// Partition data is read as it's stored in the underlying disc format.
|
||||
/// For example, WIA/RVZ partitions are stored decrypted, so this avoids
|
||||
/// rebuilding the partition encryption and hash data if it will only be
|
||||
/// read via [`PartitionBase`]. If it's desired to read a full disc image
|
||||
/// via [`Disc`], use [`PartitionEncryptionMode::Original`] instead.
|
||||
#[default]
|
||||
AsIs,
|
||||
/// Partition encryption and hashes are rebuilt to match its original state,
|
||||
/// if necessary. This is used for converting or verifying a disc image.
|
||||
Original,
|
||||
/// Partition data will be encrypted if reading a decrypted disc image.
|
||||
/// Modifies the disc header to mark partition data as encrypted.
|
||||
ForceEncrypted,
|
||||
/// Partition data will be decrypted if reading an encrypted disc image.
|
||||
/// Modifies the disc header to mark partition data as decrypted.
|
||||
ForceDecrypted,
|
||||
}
|
||||
|
||||
/// Options for opening a disc image.
|
||||
#[derive(Default, Debug, Clone)]
|
||||
pub struct OpenOptions {
|
||||
/// Wii: Partition encryption mode. By default, partitions are read as they
|
||||
/// are stored in the underlying disc format, avoiding extra work when the
|
||||
/// underlying format stores them decrypted (e.g. WIA/RVZ).
|
||||
///
|
||||
/// This can be changed to [`PartitionEncryptionMode::Original`] to rebuild
|
||||
/// partition encryption and hashes to match its original state for conversion
|
||||
/// or verification.
|
||||
pub partition_encryption: PartitionEncryptionMode,
|
||||
}
|
||||
|
||||
/// Options for opening a partition.
|
||||
#[derive(Default, Debug, Clone)]
|
||||
pub struct PartitionOptions {
|
||||
/// Wii: Validate data hashes while reading the partition, if available.
|
||||
/// To ensure hashes are present, regardless of the underlying disc format,
|
||||
/// set [`OpenOptions::partition_encryption`] to [`PartitionEncryptionMode::Original`].
|
||||
pub validate_hashes: bool,
|
||||
}
|
||||
|
||||
/// An open disc image and read stream.
|
||||
///
|
||||
/// This is the primary entry point for reading disc images.
|
||||
pub struct Disc {
|
||||
reader: disc::reader::DiscReader,
|
||||
}
|
||||
|
||||
impl Disc {
|
||||
/// Opens a disc image from a file path.
|
||||
#[inline]
|
||||
pub fn new<P: AsRef<Path>>(path: P) -> Result<Disc> {
|
||||
Disc::new_with_options(path, &OpenOptions::default())
|
||||
}
|
||||
|
||||
/// Opens a disc image from a file path with custom options.
|
||||
#[inline]
|
||||
pub fn new_with_options<P: AsRef<Path>>(path: P, options: &OpenOptions) -> Result<Disc> {
|
||||
let io = io::block::open(path.as_ref())?;
|
||||
let reader = disc::reader::DiscReader::new(io, options)?;
|
||||
Ok(Disc { reader })
|
||||
}
|
||||
|
||||
/// Opens a disc image from a read stream.
|
||||
#[inline]
|
||||
pub fn new_stream(stream: Box<dyn DiscStream>) -> Result<Disc> {
|
||||
Disc::new_stream_with_options(stream, &OpenOptions::default())
|
||||
}
|
||||
|
||||
/// Opens a disc image from a read stream with custom options.
|
||||
#[inline]
|
||||
pub fn new_stream_with_options(
|
||||
stream: Box<dyn DiscStream>,
|
||||
options: &OpenOptions,
|
||||
) -> Result<Disc> {
|
||||
let io = io::block::new(stream)?;
|
||||
let reader = disc::reader::DiscReader::new(io, options)?;
|
||||
Ok(Disc { reader })
|
||||
}
|
||||
|
||||
/// Detects the format of a disc image from a read stream.
|
||||
#[inline]
|
||||
pub fn detect<R>(stream: &mut R) -> std::io::Result<Option<Format>>
|
||||
where R: Read + ?Sized {
|
||||
io::block::detect(stream)
|
||||
}
|
||||
|
||||
/// The disc's primary header.
|
||||
#[inline]
|
||||
pub fn header(&self) -> &DiscHeader { self.reader.header() }
|
||||
|
||||
/// The Wii disc's region information.
|
||||
///
|
||||
/// **GameCube**: This will return `None`.
|
||||
#[inline]
|
||||
pub fn region(&self) -> Option<&[u8; REGION_SIZE]> { self.reader.region() }
|
||||
|
||||
/// Returns extra metadata included in the disc file format, if any.
|
||||
#[inline]
|
||||
pub fn meta(&self) -> DiscMeta { self.reader.meta() }
|
||||
|
||||
/// The disc's size in bytes, or an estimate if not stored by the format.
|
||||
#[inline]
|
||||
pub fn disc_size(&self) -> u64 { self.reader.disc_size() }
|
||||
|
||||
/// A list of Wii partitions on the disc.
|
||||
///
|
||||
/// **GameCube**: This will return an empty slice.
|
||||
#[inline]
|
||||
pub fn partitions(&self) -> &[PartitionInfo] { self.reader.partitions() }
|
||||
|
||||
/// Opens a decrypted partition read stream for the specified partition index.
|
||||
///
|
||||
/// **GameCube**: `index` must always be 0.
|
||||
#[inline]
|
||||
pub fn open_partition(&self, index: usize) -> Result<Box<dyn PartitionBase>> {
|
||||
self.open_partition_with_options(index, &PartitionOptions::default())
|
||||
}
|
||||
|
||||
/// Opens a decrypted partition read stream for the specified partition index
|
||||
/// with custom options.
|
||||
///
|
||||
/// **GameCube**: `index` must always be 0.
|
||||
#[inline]
|
||||
pub fn open_partition_with_options(
|
||||
&self,
|
||||
index: usize,
|
||||
options: &PartitionOptions,
|
||||
) -> Result<Box<dyn PartitionBase>> {
|
||||
self.reader.open_partition(index, options)
|
||||
}
|
||||
|
||||
/// Opens a decrypted partition read stream for the first partition matching
|
||||
/// the specified kind.
|
||||
///
|
||||
/// **GameCube**: `kind` must always be [`PartitionKind::Data`].
|
||||
#[inline]
|
||||
pub fn open_partition_kind(&self, kind: PartitionKind) -> Result<Box<dyn PartitionBase>> {
|
||||
self.reader.open_partition_kind(kind, &PartitionOptions::default())
|
||||
}
|
||||
|
||||
/// Opens a decrypted partition read stream for the first partition matching
|
||||
/// the specified kind with custom options.
|
||||
///
|
||||
/// **GameCube**: `kind` must always be [`PartitionKind::Data`].
|
||||
#[inline]
|
||||
pub fn open_partition_kind_with_options(
|
||||
&self,
|
||||
kind: PartitionKind,
|
||||
options: &PartitionOptions,
|
||||
) -> Result<Box<dyn PartitionBase>> {
|
||||
self.reader.open_partition_kind(kind, options)
|
||||
}
|
||||
}
|
||||
|
||||
impl BufRead for Disc {
|
||||
#[inline]
|
||||
fn fill_buf(&mut self) -> std::io::Result<&[u8]> { self.reader.fill_buf() }
|
||||
|
||||
#[inline]
|
||||
fn consume(&mut self, amt: usize) { self.reader.consume(amt) }
|
||||
}
|
||||
|
||||
impl Read for Disc {
|
||||
#[inline]
|
||||
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> { self.reader.read(buf) }
|
||||
}
|
||||
|
||||
impl Seek for Disc {
|
||||
#[inline]
|
||||
fn seek(&mut self, pos: std::io::SeekFrom) -> std::io::Result<u64> { self.reader.seek(pos) }
|
||||
}
|
||||
|
|
|
@ -0,0 +1,376 @@
|
|||
//! [`DiscReader`] and associated types.
|
||||
use std::{
|
||||
io::{BufRead, Read, Seek},
|
||||
path::Path,
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use dyn_clone::DynClone;
|
||||
use zerocopy::FromBytes;
|
||||
|
||||
use crate::{
|
||||
common::{Compression, Format, PartitionInfo, PartitionKind},
|
||||
disc,
|
||||
disc::{
|
||||
fst::{Fst, Node},
|
||||
wii::{ContentMetadata, Ticket, TmdHeader, H3_TABLE_SIZE, REGION_SIZE},
|
||||
ApploaderHeader, DiscHeader, DolHeader, PartitionHeader, BI2_SIZE, BOOT_SIZE,
|
||||
},
|
||||
io::block,
|
||||
util::WindowedReader,
|
||||
Result,
|
||||
};
|
||||
|
||||
/// Wii partition encryption mode.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default, Hash)]
|
||||
pub enum PartitionEncryption {
|
||||
/// Partition encryption and hashes are rebuilt to match its original state,
|
||||
/// if necessary. This is used for converting or verifying a disc image.
|
||||
#[default]
|
||||
Original,
|
||||
/// Partition data will be encrypted if reading a decrypted disc image.
|
||||
/// Modifies the disc header to mark partition data as encrypted.
|
||||
ForceEncrypted,
|
||||
/// Partition data will be decrypted if reading an encrypted disc image.
|
||||
/// Modifies the disc header to mark partition data as decrypted.
|
||||
ForceDecrypted,
|
||||
/// Partition data will be decrypted if reading an encrypted disc image.
|
||||
/// Modifies the disc header to mark partition data as decrypted.
|
||||
/// Hashes are removed from the partition data.
|
||||
ForceDecryptedNoHashes,
|
||||
}
|
||||
|
||||
/// Options for opening a disc image.
|
||||
#[derive(Default, Debug, Clone)]
|
||||
pub struct DiscOptions {
|
||||
/// Wii: Partition encryption mode. This affects how partition data appears when
|
||||
/// reading directly from [`DiscReader`], and can be used to convert between
|
||||
/// encrypted and decrypted disc images.
|
||||
pub partition_encryption: PartitionEncryption,
|
||||
/// Number of threads to use for preloading data as the disc is read. This
|
||||
/// is particularly useful when reading the disc image sequentially, as it
|
||||
/// can perform decompression and rebuilding in parallel with the main
|
||||
/// read thread. The default value of 0 disables preloading.
|
||||
pub preloader_threads: usize,
|
||||
}
|
||||
|
||||
/// Options for opening a partition.
|
||||
#[derive(Default, Debug, Clone)]
|
||||
pub struct PartitionOptions {
|
||||
/// Wii: Validate data hashes while reading the partition, if available.
|
||||
pub validate_hashes: bool,
|
||||
}
|
||||
|
||||
/// Required trait bounds for reading disc images.
|
||||
pub trait DiscStream: Read + Seek + DynClone + Send + Sync {}
|
||||
|
||||
impl<T> DiscStream for T where T: Read + Seek + DynClone + Send + Sync + ?Sized {}
|
||||
|
||||
dyn_clone::clone_trait_object!(DiscStream);
|
||||
|
||||
/// An open disc image and read stream.
|
||||
///
|
||||
/// This is the primary entry point for reading disc images.
|
||||
#[derive(Clone)]
|
||||
pub struct DiscReader {
|
||||
inner: disc::reader::DiscReader,
|
||||
}
|
||||
|
||||
impl DiscReader {
|
||||
/// Opens a disc image from a file path.
|
||||
#[inline]
|
||||
pub fn new<P: AsRef<Path>>(path: P, options: &DiscOptions) -> Result<DiscReader> {
|
||||
let io = block::open(path.as_ref())?;
|
||||
let inner = disc::reader::DiscReader::new(io, options)?;
|
||||
Ok(DiscReader { inner })
|
||||
}
|
||||
|
||||
/// Opens a disc image from a read stream.
|
||||
#[inline]
|
||||
pub fn new_stream(stream: Box<dyn DiscStream>, options: &DiscOptions) -> Result<DiscReader> {
|
||||
let io = block::new(stream)?;
|
||||
let reader = disc::reader::DiscReader::new(io, options)?;
|
||||
Ok(DiscReader { inner: reader })
|
||||
}
|
||||
|
||||
/// Detects the format of a disc image from a read stream.
|
||||
#[inline]
|
||||
pub fn detect<R>(stream: &mut R) -> std::io::Result<Option<Format>>
|
||||
where R: Read + ?Sized {
|
||||
block::detect(stream)
|
||||
}
|
||||
|
||||
/// The disc's primary header.
|
||||
#[inline]
|
||||
pub fn header(&self) -> &DiscHeader { self.inner.header() }
|
||||
|
||||
/// The Wii disc's region information.
|
||||
///
|
||||
/// **GameCube**: This will return `None`.
|
||||
#[inline]
|
||||
pub fn region(&self) -> Option<&[u8; REGION_SIZE]> { self.inner.region() }
|
||||
|
||||
/// Returns extra metadata included in the disc file format, if any.
|
||||
#[inline]
|
||||
pub fn meta(&self) -> DiscMeta { self.inner.meta() }
|
||||
|
||||
/// The disc's size in bytes, or an estimate if not stored by the format.
|
||||
#[inline]
|
||||
pub fn disc_size(&self) -> u64 { self.inner.disc_size() }
|
||||
|
||||
/// A list of Wii partitions on the disc.
|
||||
///
|
||||
/// **GameCube**: This will return an empty slice.
|
||||
#[inline]
|
||||
pub fn partitions(&self) -> &[PartitionInfo] { self.inner.partitions() }
|
||||
|
||||
/// Opens a decrypted partition read stream for the specified partition index.
|
||||
///
|
||||
/// **GameCube**: `index` must always be 0.
|
||||
#[inline]
|
||||
pub fn open_partition(
|
||||
&self,
|
||||
index: usize,
|
||||
options: &PartitionOptions,
|
||||
) -> Result<Box<dyn PartitionReader>> {
|
||||
self.inner.open_partition(index, options)
|
||||
}
|
||||
|
||||
/// Opens a decrypted partition read stream for the first partition matching
|
||||
/// the specified kind.
|
||||
///
|
||||
/// **GameCube**: `kind` must always be [`PartitionKind::Data`].
|
||||
#[inline]
|
||||
pub fn open_partition_kind(
|
||||
&self,
|
||||
kind: PartitionKind,
|
||||
options: &PartitionOptions,
|
||||
) -> Result<Box<dyn PartitionReader>> {
|
||||
self.inner.open_partition_kind(kind, options)
|
||||
}
|
||||
|
||||
pub(crate) fn into_inner(self) -> disc::reader::DiscReader { self.inner }
|
||||
}
|
||||
|
||||
impl BufRead for DiscReader {
|
||||
#[inline]
|
||||
fn fill_buf(&mut self) -> std::io::Result<&[u8]> { self.inner.fill_buf() }
|
||||
|
||||
#[inline]
|
||||
fn consume(&mut self, amt: usize) { self.inner.consume(amt) }
|
||||
}
|
||||
|
||||
impl Read for DiscReader {
|
||||
#[inline]
|
||||
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> { self.inner.read(buf) }
|
||||
}
|
||||
|
||||
impl Seek for DiscReader {
|
||||
#[inline]
|
||||
fn seek(&mut self, pos: std::io::SeekFrom) -> std::io::Result<u64> { self.inner.seek(pos) }
|
||||
}
|
||||
|
||||
/// Extra metadata about the underlying disc file format.
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct DiscMeta {
|
||||
/// The disc file format.
|
||||
pub format: Format,
|
||||
/// The format's compression algorithm.
|
||||
pub compression: Compression,
|
||||
/// If the format uses blocks, the block size in bytes.
|
||||
pub block_size: Option<u32>,
|
||||
/// Whether Wii partitions are stored decrypted in the format.
|
||||
pub decrypted: bool,
|
||||
/// Whether the format omits Wii partition data hashes.
|
||||
pub needs_hash_recovery: bool,
|
||||
/// Whether the format supports recovering the original disc data losslessly.
|
||||
pub lossless: bool,
|
||||
/// The original disc's size in bytes, if stored by the format.
|
||||
pub disc_size: Option<u64>,
|
||||
/// The original disc's CRC32 hash, if stored by the format.
|
||||
pub crc32: Option<u32>,
|
||||
/// The original disc's MD5 hash, if stored by the format.
|
||||
pub md5: Option<[u8; 16]>,
|
||||
/// The original disc's SHA-1 hash, if stored by the format.
|
||||
pub sha1: Option<[u8; 20]>,
|
||||
/// The original disc's XXH64 hash, if stored by the format.
|
||||
pub xxh64: Option<u64>,
|
||||
}
|
||||
|
||||
/// An open disc partition.
|
||||
pub trait PartitionReader: DynClone + BufRead + Seek + Send + Sync {
|
||||
/// Whether this is a Wii partition. (GameCube otherwise)
|
||||
fn is_wii(&self) -> bool;
|
||||
|
||||
/// Reads the partition header and file system table.
|
||||
fn meta(&mut self) -> Result<PartitionMeta>;
|
||||
}
|
||||
|
||||
/// A file reader borrowing a [`PartitionReader`].
|
||||
pub type FileReader<'a> = WindowedReader<&'a mut dyn PartitionReader>;
|
||||
|
||||
/// A file reader owning a [`PartitionReader`].
|
||||
pub type OwnedFileReader = WindowedReader<Box<dyn PartitionReader>>;
|
||||
|
||||
impl<'a> dyn PartitionReader + 'a {
|
||||
/// Seeks the partition stream to the specified file system node
|
||||
/// and returns a windowed stream.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// Basic usage:
|
||||
/// ```no_run
|
||||
/// use std::io::Read;
|
||||
///
|
||||
/// use nod::read::{DiscOptions, DiscReader, PartitionKind, PartitionOptions};
|
||||
///
|
||||
/// fn main() -> nod::Result<()> {
|
||||
/// let disc = DiscReader::new("path/to/file.iso", &DiscOptions::default())?;
|
||||
/// let mut partition =
|
||||
/// disc.open_partition_kind(PartitionKind::Data, &PartitionOptions::default())?;
|
||||
/// let meta = partition.meta()?;
|
||||
/// let fst = meta.fst()?;
|
||||
/// if let Some((_, node)) = fst.find("/MP3/Worlds.txt") {
|
||||
/// let mut s = String::new();
|
||||
/// partition
|
||||
/// .open_file(node)
|
||||
/// .expect("Failed to open file stream")
|
||||
/// .read_to_string(&mut s)
|
||||
/// .expect("Failed to read file");
|
||||
/// println!("{}", s);
|
||||
/// }
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// ```
|
||||
pub fn open_file(&mut self, node: Node) -> std::io::Result<FileReader> {
|
||||
if !node.is_file() {
|
||||
return Err(std::io::Error::new(
|
||||
std::io::ErrorKind::InvalidInput,
|
||||
"Node is not a file".to_string(),
|
||||
));
|
||||
}
|
||||
let is_wii = self.is_wii();
|
||||
FileReader::new(self, node.offset(is_wii), node.length() as u64)
|
||||
}
|
||||
}
|
||||
|
||||
impl dyn PartitionReader {
|
||||
/// Consumes the partition instance and returns a windowed stream.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```no_run
|
||||
/// use std::io::Read;
|
||||
///
|
||||
/// use nod::read::{DiscOptions, DiscReader, OwnedFileReader, PartitionKind, PartitionOptions};
|
||||
///
|
||||
/// fn main() -> nod::Result<()> {
|
||||
/// let disc = DiscReader::new("path/to/file.iso", &DiscOptions::default())?;
|
||||
/// let mut partition =
|
||||
/// disc.open_partition_kind(PartitionKind::Data, &PartitionOptions::default())?;
|
||||
/// let meta = partition.meta()?;
|
||||
/// let fst = meta.fst()?;
|
||||
/// if let Some((_, node)) = fst.find("/disc.tgc") {
|
||||
/// let file: OwnedFileReader = partition
|
||||
/// .clone() // Clone the Box<dyn PartitionBase>
|
||||
/// .into_open_file(node) // Get an OwnedFileStream
|
||||
/// .expect("Failed to open file stream");
|
||||
/// // Open the inner disc image using the owned stream
|
||||
/// let inner_disc = DiscReader::new_stream(Box::new(file), &DiscOptions::default())
|
||||
/// .expect("Failed to open inner disc");
|
||||
/// // ...
|
||||
/// }
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// ```
|
||||
pub fn into_open_file(self: Box<Self>, node: Node) -> std::io::Result<OwnedFileReader> {
|
||||
if !node.is_file() {
|
||||
return Err(std::io::Error::new(
|
||||
std::io::ErrorKind::InvalidInput,
|
||||
"Node is not a file".to_string(),
|
||||
));
|
||||
}
|
||||
let is_wii = self.is_wii();
|
||||
OwnedFileReader::new(self, node.offset(is_wii), node.length() as u64)
|
||||
}
|
||||
}
|
||||
|
||||
dyn_clone::clone_trait_object!(PartitionReader);
|
||||
|
||||
/// Extra disc partition data. (DOL, FST, etc.)
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct PartitionMeta {
|
||||
/// Disc and partition header (boot.bin)
|
||||
pub raw_boot: Arc<[u8; BOOT_SIZE]>,
|
||||
/// Debug and region information (bi2.bin)
|
||||
pub raw_bi2: Arc<[u8; BI2_SIZE]>,
|
||||
/// Apploader (apploader.bin)
|
||||
pub raw_apploader: Arc<[u8]>,
|
||||
/// Main binary (main.dol)
|
||||
pub raw_dol: Arc<[u8]>,
|
||||
/// File system table (fst.bin)
|
||||
pub raw_fst: Arc<[u8]>,
|
||||
/// Ticket (ticket.bin, Wii only)
|
||||
pub raw_ticket: Option<Arc<[u8]>>,
|
||||
/// TMD (tmd.bin, Wii only)
|
||||
pub raw_tmd: Option<Arc<[u8]>>,
|
||||
/// Certificate chain (cert.bin, Wii only)
|
||||
pub raw_cert_chain: Option<Arc<[u8]>>,
|
||||
/// H3 hash table (h3.bin, Wii only)
|
||||
pub raw_h3_table: Option<Arc<[u8; H3_TABLE_SIZE]>>,
|
||||
}
|
||||
|
||||
impl PartitionMeta {
|
||||
/// A view into the disc header.
|
||||
#[inline]
|
||||
pub fn header(&self) -> &DiscHeader {
|
||||
DiscHeader::ref_from_bytes(&self.raw_boot[..size_of::<DiscHeader>()])
|
||||
.expect("Invalid header alignment")
|
||||
}
|
||||
|
||||
/// A view into the partition header.
|
||||
#[inline]
|
||||
pub fn partition_header(&self) -> &PartitionHeader {
|
||||
PartitionHeader::ref_from_bytes(&self.raw_boot[size_of::<DiscHeader>()..])
|
||||
.expect("Invalid partition header alignment")
|
||||
}
|
||||
|
||||
/// A view into the apploader header.
|
||||
#[inline]
|
||||
pub fn apploader_header(&self) -> &ApploaderHeader {
|
||||
ApploaderHeader::ref_from_prefix(&self.raw_apploader)
|
||||
.expect("Invalid apploader alignment")
|
||||
.0
|
||||
}
|
||||
|
||||
/// A view into the file system table (FST).
|
||||
#[inline]
|
||||
pub fn fst(&self) -> Result<Fst, &'static str> { Fst::new(&self.raw_fst) }
|
||||
|
||||
/// A view into the DOL header.
|
||||
#[inline]
|
||||
pub fn dol_header(&self) -> &DolHeader {
|
||||
DolHeader::ref_from_prefix(&self.raw_dol).expect("Invalid DOL alignment").0
|
||||
}
|
||||
|
||||
/// A view into the ticket. (Wii only)
|
||||
#[inline]
|
||||
pub fn ticket(&self) -> Option<&Ticket> {
|
||||
let raw_ticket = self.raw_ticket.as_deref()?;
|
||||
Some(Ticket::ref_from_bytes(raw_ticket).expect("Invalid ticket alignment"))
|
||||
}
|
||||
|
||||
/// A view into the TMD. (Wii only)
|
||||
#[inline]
|
||||
pub fn tmd_header(&self) -> Option<&TmdHeader> {
|
||||
let raw_tmd = self.raw_tmd.as_deref()?;
|
||||
Some(TmdHeader::ref_from_prefix(raw_tmd).expect("Invalid TMD alignment").0)
|
||||
}
|
||||
|
||||
/// A view into the TMD content metadata. (Wii only)
|
||||
#[inline]
|
||||
pub fn content_metadata(&self) -> Option<&[ContentMetadata]> {
|
||||
let raw_cmd = &self.raw_tmd.as_deref()?[size_of::<TmdHeader>()..];
|
||||
Some(<[ContentMetadata]>::ref_from_bytes(raw_cmd).expect("Invalid CMD alignment"))
|
||||
}
|
||||
}
|
|
@ -0,0 +1,136 @@
|
|||
use tracing::instrument;
|
||||
|
||||
use crate::{
|
||||
common::KeyBytes,
|
||||
disc::{
|
||||
wii::{HASHES_SIZE, SECTOR_DATA_SIZE},
|
||||
SECTOR_SIZE,
|
||||
},
|
||||
util::array_ref,
|
||||
};
|
||||
|
||||
#[cfg(feature = "openssl")]
|
||||
thread_local! {
|
||||
static ENC_CIPHER_CTX: std::cell::RefCell<openssl::cipher_ctx::CipherCtx> = {
|
||||
let cipher = openssl::cipher::Cipher::fetch(None, "AES-128-CBC", None).unwrap();
|
||||
let mut ctx = openssl::cipher_ctx::CipherCtx::new().unwrap();
|
||||
ctx.set_padding(false);
|
||||
ctx.encrypt_init(Some(&cipher), None, None).unwrap();
|
||||
std::cell::RefCell::new(ctx)
|
||||
};
|
||||
static DEC_CIPHER_CTX: std::cell::RefCell<openssl::cipher_ctx::CipherCtx> = {
|
||||
let cipher = openssl::cipher::Cipher::fetch(None, "AES-128-CBC", None).unwrap();
|
||||
let mut ctx = openssl::cipher_ctx::CipherCtx::new().unwrap();
|
||||
ctx.set_padding(false);
|
||||
ctx.decrypt_init(Some(&cipher), None, None).unwrap();
|
||||
std::cell::RefCell::new(ctx)
|
||||
};
|
||||
}
|
||||
|
||||
/// Encrypts data in-place using AES-128-CBC with the given key and IV.
|
||||
pub fn aes_cbc_encrypt(key: &KeyBytes, iv: &KeyBytes, data: &mut [u8]) {
|
||||
assert_eq!(data.len() % 16, 0);
|
||||
#[cfg(not(feature = "openssl"))]
|
||||
{
|
||||
use aes::cipher::{block_padding::NoPadding, BlockModeEncrypt, KeyIvInit};
|
||||
<cbc::Encryptor<aes::Aes128>>::new(key.into(), iv.into())
|
||||
.encrypt_padded::<NoPadding>(data, data.len())
|
||||
.unwrap();
|
||||
}
|
||||
#[cfg(feature = "openssl")]
|
||||
ENC_CIPHER_CTX.with_borrow_mut(|ctx| {
|
||||
ctx.encrypt_init(None, Some(key), Some(iv)).unwrap();
|
||||
let len = unsafe {
|
||||
// The openssl crate doesn't provide a safe API for using the same inbuf/outbuf.
|
||||
// However, this is valid with AES-CBC and no padding. Create a copy of the input
|
||||
// slice to appease the borrow checker.
|
||||
let input = std::slice::from_raw_parts(data.as_ptr(), data.len());
|
||||
ctx.cipher_update_unchecked(input, Some(data))
|
||||
}
|
||||
.unwrap();
|
||||
assert_eq!(len, data.len());
|
||||
});
|
||||
}
|
||||
|
||||
/// Decrypts data in-place using AES-128-CBC with the given key and IV.
|
||||
pub fn aes_cbc_decrypt(key: &KeyBytes, iv: &KeyBytes, data: &mut [u8]) {
|
||||
assert_eq!(data.len() % 16, 0);
|
||||
#[cfg(not(feature = "openssl"))]
|
||||
{
|
||||
use aes::cipher::{block_padding::NoPadding, BlockModeDecrypt, KeyIvInit};
|
||||
<cbc::Decryptor<aes::Aes128>>::new(key.into(), iv.into())
|
||||
.decrypt_padded::<NoPadding>(data)
|
||||
.unwrap();
|
||||
}
|
||||
#[cfg(feature = "openssl")]
|
||||
DEC_CIPHER_CTX.with_borrow_mut(|ctx| {
|
||||
ctx.decrypt_init(None, Some(key), Some(iv)).unwrap();
|
||||
let len = unsafe {
|
||||
// The openssl crate doesn't provide a safe API for using the same inbuf/outbuf.
|
||||
// However, this is valid with AES-CBC and no padding. Create a copy of the input
|
||||
// slice to appease the borrow checker.
|
||||
let input = std::slice::from_raw_parts(data.as_ptr(), data.len());
|
||||
ctx.cipher_update_unchecked(input, Some(data))
|
||||
}
|
||||
.unwrap();
|
||||
assert_eq!(len, data.len());
|
||||
});
|
||||
}
|
||||
|
||||
/// Decrypts data buffer-to-buffer using AES-128-CBC with the given key and IV.
|
||||
pub fn aes_cbc_decrypt_b2b(key: &KeyBytes, iv: &KeyBytes, data: &[u8], out: &mut [u8]) {
|
||||
assert_eq!(data.len() % 16, 0);
|
||||
assert_eq!(data.len(), out.len());
|
||||
#[cfg(not(feature = "openssl"))]
|
||||
{
|
||||
use aes::cipher::{block_padding::NoPadding, BlockModeDecrypt, KeyIvInit};
|
||||
<cbc::Decryptor<aes::Aes128>>::new(key.into(), iv.into())
|
||||
.decrypt_padded_b2b::<NoPadding>(data, out)
|
||||
.unwrap();
|
||||
}
|
||||
#[cfg(feature = "openssl")]
|
||||
DEC_CIPHER_CTX.with_borrow_mut(|ctx| {
|
||||
ctx.decrypt_init(None, Some(key), Some(iv)).unwrap();
|
||||
let len = unsafe { ctx.cipher_update_unchecked(data, Some(out)) }.unwrap();
|
||||
assert_eq!(len, out.len());
|
||||
});
|
||||
}
|
||||
|
||||
/// Encrypts a Wii partition sector in-place.
|
||||
#[instrument(skip_all)]
|
||||
pub fn encrypt_sector(out: &mut [u8; SECTOR_SIZE], key: &KeyBytes) {
|
||||
aes_cbc_encrypt(key, &[0u8; 16], &mut out[..HASHES_SIZE]);
|
||||
// Data IV from encrypted hash block
|
||||
let iv = *array_ref![out, 0x3D0, 16];
|
||||
aes_cbc_encrypt(key, &iv, &mut out[HASHES_SIZE..]);
|
||||
}
|
||||
|
||||
/// Decrypts a Wii partition sector in-place.
|
||||
#[instrument(skip_all)]
|
||||
pub fn decrypt_sector(out: &mut [u8; SECTOR_SIZE], key: &KeyBytes) {
|
||||
// Data IV from encrypted hash block
|
||||
let iv = *array_ref![out, 0x3D0, 16];
|
||||
aes_cbc_decrypt(key, &[0u8; 16], &mut out[..HASHES_SIZE]);
|
||||
aes_cbc_decrypt(key, &iv, &mut out[HASHES_SIZE..]);
|
||||
}
|
||||
|
||||
/// Decrypts a Wii partition sector buffer-to-buffer.
|
||||
#[instrument(skip_all)]
|
||||
pub fn decrypt_sector_b2b(data: &[u8; SECTOR_SIZE], out: &mut [u8; SECTOR_SIZE], key: &KeyBytes) {
|
||||
// Data IV from encrypted hash block
|
||||
let iv = *array_ref![data, 0x3D0, 16];
|
||||
aes_cbc_decrypt_b2b(key, &[0u8; 16], &data[..HASHES_SIZE], &mut out[..HASHES_SIZE]);
|
||||
aes_cbc_decrypt_b2b(key, &iv, &data[HASHES_SIZE..], &mut out[HASHES_SIZE..]);
|
||||
}
|
||||
|
||||
/// Decrypts a Wii partition sector data (excluding hashes) buffer-to-buffer.
|
||||
#[instrument(skip_all)]
|
||||
pub fn decrypt_sector_data_b2b(
|
||||
data: &[u8; SECTOR_SIZE],
|
||||
out: &mut [u8; SECTOR_DATA_SIZE],
|
||||
key: &KeyBytes,
|
||||
) {
|
||||
// Data IV from encrypted hash block
|
||||
let iv = *array_ref![data, 0x3D0, 16];
|
||||
aes_cbc_decrypt_b2b(key, &iv, &data[HASHES_SIZE..], out);
|
||||
}
|
|
@ -1,95 +1,483 @@
|
|||
/// Decodes the LZMA Properties byte (lc/lp/pb).
|
||||
/// See `lzma_lzma_lclppb_decode` in `liblzma/lzma/lzma_decoder.c`.
|
||||
#[cfg(feature = "compress-lzma")]
|
||||
pub fn lzma_lclppb_decode(
|
||||
options: &mut liblzma::stream::LzmaOptions,
|
||||
byte: u8,
|
||||
) -> std::io::Result<()> {
|
||||
let mut d = byte as u32;
|
||||
if d >= (9 * 5 * 5) {
|
||||
return Err(std::io::Error::new(
|
||||
std::io::ErrorKind::InvalidData,
|
||||
format!("Invalid LZMA props byte: {}", d),
|
||||
));
|
||||
}
|
||||
options.literal_context_bits(d % 9);
|
||||
d /= 9;
|
||||
options.position_bits(d / 5);
|
||||
options.literal_position_bits(d % 5);
|
||||
Ok(())
|
||||
use std::{io, io::Read};
|
||||
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::{
|
||||
common::Compression,
|
||||
io::wia::{WIACompression, WIADisc},
|
||||
Error, Result,
|
||||
};
|
||||
|
||||
pub struct Decompressor {
|
||||
pub kind: DecompressionKind,
|
||||
pub cache: DecompressorCache,
|
||||
}
|
||||
|
||||
/// Decodes LZMA properties.
|
||||
/// See `lzma_lzma_props_decode` in `liblzma/lzma/lzma_decoder.c`.
|
||||
#[cfg(feature = "compress-lzma")]
|
||||
pub fn lzma_props_decode(props: &[u8]) -> std::io::Result<liblzma::stream::LzmaOptions> {
|
||||
use crate::array_ref;
|
||||
if props.len() != 5 {
|
||||
return Err(std::io::Error::new(
|
||||
std::io::ErrorKind::InvalidData,
|
||||
format!("Invalid LZMA props length: {}", props.len()),
|
||||
));
|
||||
impl Clone for Decompressor {
|
||||
fn clone(&self) -> Self {
|
||||
Self { kind: self.kind.clone(), cache: DecompressorCache::default() }
|
||||
}
|
||||
let mut options = liblzma::stream::LzmaOptions::new();
|
||||
lzma_lclppb_decode(&mut options, props[0])?;
|
||||
options.dict_size(u32::from_le_bytes(*array_ref!(props, 1, 4)));
|
||||
Ok(options)
|
||||
}
|
||||
|
||||
/// Decodes LZMA2 properties.
|
||||
/// See `lzma_lzma2_props_decode` in `liblzma/lzma/lzma2_decoder.c`.
|
||||
#[cfg(feature = "compress-lzma")]
|
||||
pub fn lzma2_props_decode(props: &[u8]) -> std::io::Result<liblzma::stream::LzmaOptions> {
|
||||
use std::cmp::Ordering;
|
||||
if props.len() != 1 {
|
||||
return Err(std::io::Error::new(
|
||||
std::io::ErrorKind::InvalidData,
|
||||
format!("Invalid LZMA2 props length: {}", props.len()),
|
||||
));
|
||||
#[derive(Default)]
|
||||
pub enum DecompressorCache {
|
||||
#[default]
|
||||
None,
|
||||
#[cfg(feature = "compress-zlib")]
|
||||
Deflate(Box<miniz_oxide::inflate::core::DecompressorOxide>),
|
||||
#[cfg(feature = "compress-zstd")]
|
||||
Zstandard(zstd_safe::DCtx<'static>),
|
||||
}
|
||||
|
||||
impl Decompressor {
|
||||
pub fn new(kind: DecompressionKind) -> Self {
|
||||
Self { kind, cache: DecompressorCache::default() }
|
||||
}
|
||||
let d = props[0] as u32;
|
||||
let mut options = liblzma::stream::LzmaOptions::new();
|
||||
options.dict_size(match d.cmp(&40) {
|
||||
Ordering::Greater => {
|
||||
return Err(std::io::Error::new(
|
||||
std::io::ErrorKind::InvalidData,
|
||||
format!("Invalid LZMA2 props byte: {}", d),
|
||||
|
||||
#[instrument(name = "Decompressor::decompress", skip_all)]
|
||||
pub fn decompress(&mut self, buf: &[u8], out: &mut [u8]) -> io::Result<usize> {
|
||||
match &self.kind {
|
||||
DecompressionKind::None => {
|
||||
out.copy_from_slice(buf);
|
||||
Ok(buf.len())
|
||||
}
|
||||
#[cfg(feature = "compress-zlib")]
|
||||
DecompressionKind::Deflate => {
|
||||
let decompressor = match &mut self.cache {
|
||||
DecompressorCache::Deflate(decompressor) => decompressor,
|
||||
_ => {
|
||||
self.cache = DecompressorCache::Deflate(Box::new(
|
||||
miniz_oxide::inflate::core::DecompressorOxide::new(),
|
||||
));
|
||||
match &mut self.cache {
|
||||
DecompressorCache::Deflate(decompressor) => decompressor,
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
};
|
||||
decompressor.init();
|
||||
let (status, in_size, out_size) = miniz_oxide::inflate::core::decompress(
|
||||
decompressor.as_mut(),
|
||||
buf,
|
||||
out,
|
||||
0,
|
||||
miniz_oxide::inflate::core::inflate_flags::TINFL_FLAG_PARSE_ZLIB_HEADER
|
||||
| miniz_oxide::inflate::core::inflate_flags::TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF,
|
||||
);
|
||||
match status {
|
||||
miniz_oxide::inflate::TINFLStatus::Done => Ok(out_size),
|
||||
_ => Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!(
|
||||
"Deflate decompression status {:?} (in: {}, out: {})",
|
||||
status, in_size, out_size
|
||||
),
|
||||
)),
|
||||
}
|
||||
}
|
||||
#[cfg(feature = "compress-bzip2")]
|
||||
DecompressionKind::Bzip2 => {
|
||||
let mut decoder = bzip2::Decompress::new(false);
|
||||
let status = decoder.decompress(buf, out)?;
|
||||
match status {
|
||||
bzip2::Status::StreamEnd => Ok(decoder.total_out() as usize),
|
||||
_ => Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!("Bzip2 decompression status {:?}", status),
|
||||
)),
|
||||
}
|
||||
}
|
||||
#[cfg(feature = "compress-lzma")]
|
||||
DecompressionKind::Lzma(data) => {
|
||||
use lzma_util::{lzma_props_decode, new_lzma_decoder};
|
||||
let mut decoder = new_lzma_decoder(&lzma_props_decode(data)?)?;
|
||||
let status = decoder.process(buf, out, liblzma::stream::Action::Finish)?;
|
||||
match status {
|
||||
liblzma::stream::Status::StreamEnd => Ok(decoder.total_out() as usize),
|
||||
_ => Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!("LZMA decompression status {:?}", status),
|
||||
)),
|
||||
}
|
||||
}
|
||||
#[cfg(feature = "compress-lzma")]
|
||||
DecompressionKind::Lzma2(data) => {
|
||||
use lzma_util::{lzma2_props_decode, new_lzma2_decoder};
|
||||
let mut decoder = new_lzma2_decoder(&lzma2_props_decode(data)?)?;
|
||||
let status = decoder.process(buf, out, liblzma::stream::Action::Finish)?;
|
||||
match status {
|
||||
liblzma::stream::Status::StreamEnd => Ok(decoder.total_out() as usize),
|
||||
_ => Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!("LZMA2 decompression status {:?}", status),
|
||||
)),
|
||||
}
|
||||
}
|
||||
#[cfg(feature = "compress-zstd")]
|
||||
DecompressionKind::Zstandard => {
|
||||
let ctx = match &mut self.cache {
|
||||
DecompressorCache::Zstandard(ctx) => ctx,
|
||||
_ => {
|
||||
let ctx = zstd_safe::DCtx::create();
|
||||
self.cache = DecompressorCache::Zstandard(ctx);
|
||||
match &mut self.cache {
|
||||
DecompressorCache::Zstandard(ctx) => ctx,
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
};
|
||||
ctx.decompress(out, buf).map_err(zstd_util::map_error_code)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum DecompressionKind {
|
||||
None,
|
||||
#[cfg(feature = "compress-zlib")]
|
||||
Deflate,
|
||||
#[cfg(feature = "compress-bzip2")]
|
||||
Bzip2,
|
||||
#[cfg(feature = "compress-lzma")]
|
||||
Lzma(Box<[u8]>),
|
||||
#[cfg(feature = "compress-lzma")]
|
||||
Lzma2(Box<[u8]>),
|
||||
#[cfg(feature = "compress-zstd")]
|
||||
Zstandard,
|
||||
}
|
||||
|
||||
impl DecompressionKind {
|
||||
pub fn from_wia(disc: &WIADisc) -> Result<Self> {
|
||||
let _data = &disc.compr_data[..disc.compr_data_len as usize];
|
||||
match disc.compression() {
|
||||
WIACompression::None => Ok(Self::None),
|
||||
#[cfg(feature = "compress-bzip2")]
|
||||
WIACompression::Bzip2 => Ok(Self::Bzip2),
|
||||
#[cfg(feature = "compress-lzma")]
|
||||
WIACompression::Lzma => Ok(Self::Lzma(Box::from(_data))),
|
||||
#[cfg(feature = "compress-lzma")]
|
||||
WIACompression::Lzma2 => Ok(Self::Lzma2(Box::from(_data))),
|
||||
#[cfg(feature = "compress-zstd")]
|
||||
WIACompression::Zstandard => Ok(Self::Zstandard),
|
||||
comp => Err(Error::DiscFormat(format!("Unsupported WIA/RVZ compression: {:?}", comp))),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn wrap<'a, R>(&mut self, reader: R) -> io::Result<Box<dyn Read + 'a>>
|
||||
where R: Read + 'a {
|
||||
Ok(match self {
|
||||
DecompressionKind::None => Box::new(reader),
|
||||
#[cfg(feature = "compress-zlib")]
|
||||
DecompressionKind::Deflate => unimplemented!("DecompressionKind::Deflate.wrap"),
|
||||
#[cfg(feature = "compress-bzip2")]
|
||||
DecompressionKind::Bzip2 => Box::new(bzip2::read::BzDecoder::new(reader)),
|
||||
#[cfg(feature = "compress-lzma")]
|
||||
DecompressionKind::Lzma(data) => {
|
||||
use lzma_util::{lzma_props_decode, new_lzma_decoder};
|
||||
let stream = new_lzma_decoder(&lzma_props_decode(data)?)?;
|
||||
Box::new(liblzma::read::XzDecoder::new_stream(reader, stream))
|
||||
}
|
||||
#[cfg(feature = "compress-lzma")]
|
||||
DecompressionKind::Lzma2(data) => {
|
||||
use lzma_util::{lzma2_props_decode, new_lzma2_decoder};
|
||||
let stream = new_lzma2_decoder(&lzma2_props_decode(data)?)?;
|
||||
Box::new(liblzma::read::XzDecoder::new_stream(reader, stream))
|
||||
}
|
||||
#[cfg(feature = "compress-zstd")]
|
||||
DecompressionKind::Zstandard => Box::new(zstd::stream::Decoder::new(reader)?),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Compressor {
|
||||
pub kind: Compression,
|
||||
pub cache: CompressorCache,
|
||||
pub buffer: Vec<u8>,
|
||||
}
|
||||
|
||||
impl Clone for Compressor {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
kind: self.kind,
|
||||
cache: CompressorCache::default(),
|
||||
buffer: Vec::with_capacity(self.buffer.capacity()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub enum CompressorCache {
|
||||
#[default]
|
||||
None,
|
||||
#[cfg(feature = "compress-zlib")]
|
||||
Deflate(Box<miniz_oxide::deflate::core::CompressorOxide>),
|
||||
#[cfg(feature = "compress-zstd")]
|
||||
Zstandard(zstd_safe::CCtx<'static>),
|
||||
}
|
||||
|
||||
impl Compressor {
|
||||
pub fn new(kind: Compression, buffer_size: usize) -> Self {
|
||||
Self { kind, cache: CompressorCache::default(), buffer: Vec::with_capacity(buffer_size) }
|
||||
}
|
||||
|
||||
/// Compresses the given buffer into `out`. `out`'s capacity will not be extended. Instead, if
|
||||
/// the compressed data is larger than `out`, this function will bail and return `false`.
|
||||
#[instrument(name = "Compressor::compress", skip_all)]
|
||||
pub fn compress(&mut self, buf: &[u8]) -> io::Result<bool> {
|
||||
self.buffer.clear();
|
||||
match self.kind {
|
||||
#[cfg(feature = "compress-zlib")]
|
||||
Compression::Deflate(level) => {
|
||||
let compressor = match &mut self.cache {
|
||||
CompressorCache::Deflate(compressor) => compressor,
|
||||
_ => {
|
||||
self.cache = CompressorCache::Deflate(Box::new(
|
||||
miniz_oxide::deflate::core::CompressorOxide::new(
|
||||
miniz_oxide::deflate::core::create_comp_flags_from_zip_params(
|
||||
level as i32,
|
||||
15,
|
||||
0,
|
||||
),
|
||||
),
|
||||
));
|
||||
match &mut self.cache {
|
||||
CompressorCache::Deflate(compressor) => compressor,
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
};
|
||||
self.buffer.resize(self.buffer.capacity(), 0);
|
||||
compressor.reset();
|
||||
let (status, _, out_size) = miniz_oxide::deflate::core::compress(
|
||||
compressor.as_mut(),
|
||||
buf,
|
||||
self.buffer.as_mut_slice(),
|
||||
miniz_oxide::deflate::core::TDEFLFlush::Finish,
|
||||
);
|
||||
self.buffer.truncate(out_size);
|
||||
Ok(status == miniz_oxide::deflate::core::TDEFLStatus::Done)
|
||||
}
|
||||
#[cfg(feature = "compress-bzip2")]
|
||||
Compression::Bzip2(level) => {
|
||||
let compression = bzip2::Compression::new(level as u32);
|
||||
let mut compress = bzip2::Compress::new(compression, 30);
|
||||
let status = compress.compress_vec(buf, &mut self.buffer, bzip2::Action::Finish)?;
|
||||
Ok(status == bzip2::Status::StreamEnd)
|
||||
}
|
||||
#[cfg(feature = "compress-lzma")]
|
||||
Compression::Lzma(level) => {
|
||||
let options = liblzma::stream::LzmaOptions::new_preset(level as u32)?;
|
||||
let mut encoder = lzma_util::new_lzma_encoder(&options)?;
|
||||
let status =
|
||||
encoder.process_vec(buf, &mut self.buffer, liblzma::stream::Action::Finish)?;
|
||||
Ok(status == liblzma::stream::Status::StreamEnd)
|
||||
}
|
||||
#[cfg(feature = "compress-lzma")]
|
||||
Compression::Lzma2(level) => {
|
||||
let options = liblzma::stream::LzmaOptions::new_preset(level as u32)?;
|
||||
let mut encoder = lzma_util::new_lzma2_encoder(&options)?;
|
||||
let status =
|
||||
encoder.process_vec(buf, &mut self.buffer, liblzma::stream::Action::Finish)?;
|
||||
Ok(status == liblzma::stream::Status::StreamEnd)
|
||||
}
|
||||
#[cfg(feature = "compress-zstd")]
|
||||
Compression::Zstandard(level) => {
|
||||
let ctx = match &mut self.cache {
|
||||
CompressorCache::Zstandard(compressor) => compressor,
|
||||
_ => {
|
||||
let mut ctx = zstd_safe::CCtx::create();
|
||||
ctx.init(level as i32).map_err(zstd_util::map_error_code)?;
|
||||
self.cache = CompressorCache::Zstandard(ctx);
|
||||
match &mut self.cache {
|
||||
CompressorCache::Zstandard(compressor) => compressor,
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
};
|
||||
match ctx.compress2(&mut self.buffer, buf) {
|
||||
Ok(_) => Ok(true),
|
||||
// dstSize_tooSmall
|
||||
Err(e) if e == -70isize as usize => Ok(false),
|
||||
Err(e) => Err(zstd_util::map_error_code(e)),
|
||||
}
|
||||
}
|
||||
_ => Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("Unsupported compression: {:?}", self.kind),
|
||||
)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "compress-lzma")]
|
||||
pub mod lzma_util {
|
||||
use std::{
|
||||
cmp::Ordering,
|
||||
io::{Error, ErrorKind, Result},
|
||||
};
|
||||
|
||||
use liblzma::stream::{Filters, LzmaOptions, Stream};
|
||||
|
||||
use crate::util::{array_ref, array_ref_mut, static_assert};
|
||||
|
||||
/// Decodes the LZMA Properties byte (lc/lp/pb).
|
||||
/// See `lzma_lzma_lclppb_decode` in `liblzma/lzma/lzma_decoder.c`.
|
||||
pub fn lzma_lclppb_decode(options: &mut LzmaOptions, byte: u8) -> Result<()> {
|
||||
let mut d = byte as u32;
|
||||
if d >= (9 * 5 * 5) {
|
||||
return Err(Error::new(
|
||||
ErrorKind::InvalidData,
|
||||
format!("Invalid LZMA props byte: {}", d),
|
||||
));
|
||||
}
|
||||
Ordering::Equal => u32::MAX,
|
||||
Ordering::Less => (2 | (d & 1)) << (d / 2 + 11),
|
||||
});
|
||||
Ok(options)
|
||||
options.literal_context_bits(d % 9);
|
||||
d /= 9;
|
||||
options.position_bits(d / 5);
|
||||
options.literal_position_bits(d % 5);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Encodes the LZMA Properties byte (lc/lp/pb).
|
||||
/// See `lzma_lzma_lclppb_encode` in `liblzma/lzma/lzma_encoder.c`.
|
||||
pub fn lzma_lclppb_encode(options: &LzmaOptions) -> Result<u8> {
|
||||
let options = get_options_sys(options);
|
||||
let byte = (options.pb * 5 + options.lp) * 9 + options.lc;
|
||||
if byte >= (9 * 5 * 5) {
|
||||
return Err(Error::new(
|
||||
ErrorKind::InvalidData,
|
||||
format!("Invalid LZMA props byte: {}", byte),
|
||||
));
|
||||
}
|
||||
Ok(byte as u8)
|
||||
}
|
||||
|
||||
/// Decodes LZMA properties.
|
||||
/// See `lzma_lzma_props_decode` in `liblzma/lzma/lzma_decoder.c`.
|
||||
pub fn lzma_props_decode(props: &[u8]) -> Result<LzmaOptions> {
|
||||
if props.len() != 5 {
|
||||
return Err(Error::new(
|
||||
ErrorKind::InvalidData,
|
||||
format!("Invalid LZMA props length: {}", props.len()),
|
||||
));
|
||||
}
|
||||
let mut options = LzmaOptions::new();
|
||||
lzma_lclppb_decode(&mut options, props[0])?;
|
||||
options.dict_size(u32::from_le_bytes(*array_ref![props, 1, 4]));
|
||||
Ok(options)
|
||||
}
|
||||
|
||||
/// Encodes LZMA properties.
|
||||
/// See `lzma_lzma_props_encode` in `liblzma/lzma/lzma_encoder.c`.
|
||||
pub fn lzma_props_encode(options: &LzmaOptions) -> Result<[u8; 5]> {
|
||||
let mut props = [0u8; 5];
|
||||
props[0] = lzma_lclppb_encode(options)?;
|
||||
*array_ref_mut![props, 1, 4] = get_options_sys(options).dict_size.to_le_bytes();
|
||||
Ok(props)
|
||||
}
|
||||
|
||||
/// Decodes LZMA2 properties.
|
||||
/// See `lzma_lzma2_props_decode` in `liblzma/lzma/lzma2_decoder.c`.
|
||||
pub fn lzma2_props_decode(props: &[u8]) -> Result<LzmaOptions> {
|
||||
if props.len() != 1 {
|
||||
return Err(Error::new(
|
||||
ErrorKind::InvalidData,
|
||||
format!("Invalid LZMA2 props length: {}", props.len()),
|
||||
));
|
||||
}
|
||||
let d = props[0] as u32;
|
||||
let mut options = LzmaOptions::new();
|
||||
options.dict_size(match d.cmp(&40) {
|
||||
Ordering::Greater => {
|
||||
return Err(Error::new(
|
||||
ErrorKind::InvalidData,
|
||||
format!("Invalid LZMA2 props byte: {}", d),
|
||||
));
|
||||
}
|
||||
Ordering::Equal => u32::MAX,
|
||||
Ordering::Less => (2 | (d & 1)) << (d / 2 + 11),
|
||||
});
|
||||
Ok(options)
|
||||
}
|
||||
|
||||
/// Encodes LZMA2 properties.
|
||||
/// See `lzma_lzma2_props_encode` in `liblzma/lzma/lzma2_encoder.c`.
|
||||
pub fn lzma2_props_encode(options: &LzmaOptions) -> Result<[u8; 1]> {
|
||||
let options = get_options_sys(options);
|
||||
let mut d = options.dict_size.max(liblzma_sys::LZMA_DICT_SIZE_MIN);
|
||||
|
||||
// Round up to the next 2^n - 1 or 2^n + 2^(n - 1) - 1 depending
|
||||
// on which one is the next:
|
||||
d -= 1;
|
||||
d |= d >> 2;
|
||||
d |= d >> 3;
|
||||
d |= d >> 4;
|
||||
d |= d >> 8;
|
||||
d |= d >> 16;
|
||||
|
||||
// Get the highest two bits using the proper encoding:
|
||||
if d == u32::MAX {
|
||||
d = 40;
|
||||
} else {
|
||||
d = get_dist_slot(d + 1) - 24;
|
||||
}
|
||||
|
||||
Ok([d as u8])
|
||||
}
|
||||
|
||||
/// Creates a new raw LZMA decoder with the given options.
|
||||
pub fn new_lzma_decoder(options: &LzmaOptions) -> Result<Stream> {
|
||||
let mut filters = Filters::new();
|
||||
filters.lzma1(options);
|
||||
Stream::new_raw_decoder(&filters).map_err(Error::from)
|
||||
}
|
||||
|
||||
/// Creates a new raw LZMA encoder with the given options.
|
||||
pub fn new_lzma_encoder(options: &LzmaOptions) -> Result<Stream> {
|
||||
let mut filters = Filters::new();
|
||||
filters.lzma1(options);
|
||||
Stream::new_raw_encoder(&filters).map_err(Error::from)
|
||||
}
|
||||
|
||||
/// Creates a new raw LZMA2 decoder with the given options.
|
||||
pub fn new_lzma2_decoder(options: &LzmaOptions) -> Result<Stream> {
|
||||
let mut filters = Filters::new();
|
||||
filters.lzma2(options);
|
||||
Stream::new_raw_decoder(&filters).map_err(Error::from)
|
||||
}
|
||||
|
||||
/// Creates a new raw LZMA2 encoder with the given options.
|
||||
pub fn new_lzma2_encoder(options: &LzmaOptions) -> Result<Stream> {
|
||||
let mut filters = Filters::new();
|
||||
filters.lzma2(options);
|
||||
Stream::new_raw_encoder(&filters).map_err(Error::from)
|
||||
}
|
||||
|
||||
/// liblzma does not expose any accessors for `LzmaOptions`, so we have to
|
||||
/// cast it into the internal `lzma_options_lzma` struct.
|
||||
#[inline]
|
||||
fn get_options_sys(options: &LzmaOptions) -> &liblzma_sys::lzma_options_lzma {
|
||||
static_assert!(size_of::<LzmaOptions>() == size_of::<liblzma_sys::lzma_options_lzma>());
|
||||
unsafe { &*(options as *const LzmaOptions as *const liblzma_sys::lzma_options_lzma) }
|
||||
}
|
||||
|
||||
/// See `get_dist_slot` in `liblzma/lzma/fastpos.h`.
|
||||
fn get_dist_slot(dist: u32) -> u32 {
|
||||
if dist <= 4 {
|
||||
dist
|
||||
} else {
|
||||
let i = dist.leading_zeros() ^ 31;
|
||||
(i + i) + ((dist >> (i - 1)) & 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new raw LZMA decoder with the given options.
|
||||
#[cfg(feature = "compress-lzma")]
|
||||
pub fn new_lzma_decoder<R>(
|
||||
reader: R,
|
||||
options: &liblzma::stream::LzmaOptions,
|
||||
) -> std::io::Result<liblzma::read::XzDecoder<R>>
|
||||
where
|
||||
R: std::io::Read,
|
||||
{
|
||||
let mut filters = liblzma::stream::Filters::new();
|
||||
filters.lzma1(options);
|
||||
let stream =
|
||||
liblzma::stream::Stream::new_raw_decoder(&filters).map_err(std::io::Error::from)?;
|
||||
Ok(liblzma::read::XzDecoder::new_stream(reader, stream))
|
||||
}
|
||||
#[cfg(feature = "compress-zstd")]
|
||||
mod zstd_util {
|
||||
use std::io;
|
||||
|
||||
/// Creates a new raw LZMA2 decoder with the given options.
|
||||
#[cfg(feature = "compress-lzma")]
|
||||
pub fn new_lzma2_decoder<R>(
|
||||
reader: R,
|
||||
options: &liblzma::stream::LzmaOptions,
|
||||
) -> std::io::Result<liblzma::read::XzDecoder<R>>
|
||||
where
|
||||
R: std::io::Read,
|
||||
{
|
||||
let mut filters = liblzma::stream::Filters::new();
|
||||
filters.lzma2(options);
|
||||
let stream =
|
||||
liblzma::stream::Stream::new_raw_decoder(&filters).map_err(std::io::Error::from)?;
|
||||
Ok(liblzma::read::XzDecoder::new_stream(reader, stream))
|
||||
pub fn map_error_code(code: usize) -> io::Error {
|
||||
let msg = zstd_safe::get_error_name(code);
|
||||
io::Error::new(io::ErrorKind::Other, msg.to_string())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,253 @@
|
|||
use std::{thread, thread::JoinHandle};
|
||||
|
||||
use bytes::Bytes;
|
||||
use crossbeam_channel::Sender;
|
||||
use digest::Digest;
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::{
|
||||
io::nkit::NKitHeader,
|
||||
write::{DiscFinalization, ProcessOptions},
|
||||
};
|
||||
|
||||
pub type DigestThread = (Sender<Bytes>, JoinHandle<DigestResult>);
|
||||
|
||||
pub fn digest_thread<H>() -> DigestThread
|
||||
where H: Hasher + Send + 'static {
|
||||
let (tx, rx) = crossbeam_channel::bounded::<Bytes>(1);
|
||||
let handle = thread::Builder::new()
|
||||
.name(format!("Digest {}", H::NAME))
|
||||
.spawn(move || {
|
||||
let mut hasher = H::new();
|
||||
while let Ok(data) = rx.recv() {
|
||||
hasher.update(data.as_ref());
|
||||
}
|
||||
hasher.finalize()
|
||||
})
|
||||
.expect("Failed to spawn digest thread");
|
||||
(tx, handle)
|
||||
}
|
||||
|
||||
pub struct DigestManager {
|
||||
threads: Vec<DigestThread>,
|
||||
}
|
||||
|
||||
impl DigestManager {
|
||||
pub fn new(options: &ProcessOptions) -> Self {
|
||||
let mut threads = Vec::new();
|
||||
if options.digest_crc32 {
|
||||
threads.push(digest_thread::<crc32fast::Hasher>());
|
||||
}
|
||||
if options.digest_md5 {
|
||||
#[cfg(feature = "openssl")]
|
||||
threads.push(digest_thread::<ossl::HasherMD5>());
|
||||
#[cfg(not(feature = "openssl"))]
|
||||
threads.push(digest_thread::<md5::Md5>());
|
||||
}
|
||||
if options.digest_sha1 {
|
||||
#[cfg(feature = "openssl")]
|
||||
threads.push(digest_thread::<ossl::HasherSHA1>());
|
||||
#[cfg(not(feature = "openssl"))]
|
||||
threads.push(digest_thread::<sha1::Sha1>());
|
||||
}
|
||||
if options.digest_xxh64 {
|
||||
threads.push(digest_thread::<xxhash_rust::xxh64::Xxh64>());
|
||||
}
|
||||
DigestManager { threads }
|
||||
}
|
||||
|
||||
#[instrument(name = "DigestManager::send", skip_all)]
|
||||
pub fn send(&self, data: Bytes) {
|
||||
let mut sent = 0usize;
|
||||
// Non-blocking send to all threads
|
||||
for (idx, (tx, _)) in self.threads.iter().enumerate() {
|
||||
if tx.try_send(data.clone()).is_ok() {
|
||||
sent |= 1 << idx;
|
||||
}
|
||||
}
|
||||
// Blocking send to any remaining threads
|
||||
for (idx, (tx, _)) in self.threads.iter().enumerate() {
|
||||
if sent & (1 << idx) == 0 {
|
||||
tx.send(data.clone()).expect("Failed to send data to digest thread");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(name = "DigestManager::finish", skip_all)]
|
||||
pub fn finish(self) -> DigestResults {
|
||||
let mut results = DigestResults { crc32: None, md5: None, sha1: None, xxh64: None };
|
||||
for (tx, handle) in self.threads {
|
||||
drop(tx); // Close channel
|
||||
match handle.join().unwrap() {
|
||||
DigestResult::Crc32(v) => results.crc32 = Some(v),
|
||||
DigestResult::Md5(v) => results.md5 = Some(v),
|
||||
DigestResult::Sha1(v) => results.sha1 = Some(v),
|
||||
DigestResult::Xxh64(v) => results.xxh64 = Some(v),
|
||||
}
|
||||
}
|
||||
results
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub enum DigestResult {
|
||||
Crc32(u32),
|
||||
Md5([u8; 16]),
|
||||
Sha1([u8; 20]),
|
||||
Xxh64(u64),
|
||||
}
|
||||
|
||||
pub trait Hasher {
|
||||
const NAME: &'static str;
|
||||
|
||||
fn new() -> Self;
|
||||
fn finalize(self) -> DigestResult;
|
||||
fn update(&mut self, data: &[u8]);
|
||||
}
|
||||
|
||||
impl Hasher for md5::Md5 {
|
||||
const NAME: &'static str = "MD5";
|
||||
|
||||
fn new() -> Self { Digest::new() }
|
||||
|
||||
fn finalize(self) -> DigestResult { DigestResult::Md5(Digest::finalize(self).into()) }
|
||||
|
||||
#[allow(unused_braces)] // https://github.com/rust-lang/rust/issues/116347
|
||||
#[instrument(name = "md5::Md5::update", skip_all)]
|
||||
fn update(&mut self, data: &[u8]) { Digest::update(self, data) }
|
||||
}
|
||||
|
||||
impl Hasher for sha1::Sha1 {
|
||||
const NAME: &'static str = "SHA-1";
|
||||
|
||||
fn new() -> Self { Digest::new() }
|
||||
|
||||
fn finalize(self) -> DigestResult { DigestResult::Sha1(Digest::finalize(self).into()) }
|
||||
|
||||
#[allow(unused_braces)] // https://github.com/rust-lang/rust/issues/116347
|
||||
#[instrument(name = "sha1::Sha1::update", skip_all)]
|
||||
fn update(&mut self, data: &[u8]) { Digest::update(self, data) }
|
||||
}
|
||||
|
||||
impl Hasher for crc32fast::Hasher {
|
||||
const NAME: &'static str = "CRC32";
|
||||
|
||||
fn new() -> Self { crc32fast::Hasher::new() }
|
||||
|
||||
fn finalize(self) -> DigestResult { DigestResult::Crc32(crc32fast::Hasher::finalize(self)) }
|
||||
|
||||
#[allow(unused_braces)] // https://github.com/rust-lang/rust/issues/116347
|
||||
#[instrument(name = "crc32fast::Hasher::update", skip_all)]
|
||||
fn update(&mut self, data: &[u8]) { crc32fast::Hasher::update(self, data) }
|
||||
}
|
||||
|
||||
impl Hasher for xxhash_rust::xxh64::Xxh64 {
|
||||
const NAME: &'static str = "XXH64";
|
||||
|
||||
fn new() -> Self { xxhash_rust::xxh64::Xxh64::new(0) }
|
||||
|
||||
fn finalize(self) -> DigestResult {
|
||||
DigestResult::Xxh64(xxhash_rust::xxh64::Xxh64::digest(&self))
|
||||
}
|
||||
|
||||
#[allow(unused_braces)] // https://github.com/rust-lang/rust/issues/116347
|
||||
#[instrument(name = "xxhash_rust::xxh64::Xxh64::update", skip_all)]
|
||||
fn update(&mut self, data: &[u8]) { xxhash_rust::xxh64::Xxh64::update(self, data) }
|
||||
}
|
||||
|
||||
#[cfg(feature = "openssl")]
|
||||
mod ossl {
|
||||
use tracing::instrument;
|
||||
|
||||
use super::{DigestResult, Hasher};
|
||||
|
||||
pub type HasherMD5 = HashWrapper<MessageDigestMD5>;
|
||||
pub type HasherSHA1 = HashWrapper<MessageDigestSHA1>;
|
||||
|
||||
pub struct HashWrapper<T>
|
||||
where T: MessageDigest
|
||||
{
|
||||
hasher: openssl::hash::Hasher,
|
||||
_marker: std::marker::PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<T> HashWrapper<T>
|
||||
where T: MessageDigest
|
||||
{
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
hasher: openssl::hash::Hasher::new(T::new()).unwrap(),
|
||||
_marker: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub trait MessageDigest {
|
||||
fn new() -> openssl::hash::MessageDigest;
|
||||
}
|
||||
|
||||
pub struct MessageDigestMD5;
|
||||
|
||||
impl MessageDigest for MessageDigestMD5 {
|
||||
fn new() -> openssl::hash::MessageDigest { openssl::hash::MessageDigest::md5() }
|
||||
}
|
||||
|
||||
pub struct MessageDigestSHA1;
|
||||
|
||||
impl MessageDigest for MessageDigestSHA1 {
|
||||
fn new() -> openssl::hash::MessageDigest { openssl::hash::MessageDigest::sha1() }
|
||||
}
|
||||
|
||||
impl Hasher for HasherMD5 {
|
||||
const NAME: &'static str = "MD5";
|
||||
|
||||
fn new() -> Self { Self::new() }
|
||||
|
||||
fn finalize(mut self) -> DigestResult {
|
||||
DigestResult::Md5((*self.hasher.finish().unwrap()).try_into().unwrap())
|
||||
}
|
||||
|
||||
#[allow(unused_braces)] // https://github.com/rust-lang/rust/issues/116347
|
||||
#[instrument(name = "ossl::HasherMD5::update", skip_all)]
|
||||
fn update(&mut self, data: &[u8]) { self.hasher.update(data).unwrap() }
|
||||
}
|
||||
|
||||
impl Hasher for HasherSHA1 {
|
||||
const NAME: &'static str = "SHA-1";
|
||||
|
||||
fn new() -> Self { Self::new() }
|
||||
|
||||
fn finalize(mut self) -> DigestResult {
|
||||
DigestResult::Sha1((*self.hasher.finish().unwrap()).try_into().unwrap())
|
||||
}
|
||||
|
||||
#[allow(unused_braces)] // https://github.com/rust-lang/rust/issues/116347
|
||||
#[instrument(name = "ossl::HasherSHA1::update", skip_all)]
|
||||
fn update(&mut self, data: &[u8]) { self.hasher.update(data).unwrap() }
|
||||
}
|
||||
}
|
||||
|
||||
pub struct DigestResults {
|
||||
pub crc32: Option<u32>,
|
||||
pub md5: Option<[u8; 16]>,
|
||||
pub sha1: Option<[u8; 20]>,
|
||||
pub xxh64: Option<u64>,
|
||||
}
|
||||
|
||||
impl DiscFinalization {
|
||||
pub(crate) fn apply_digests(&mut self, results: &DigestResults) {
|
||||
self.crc32 = results.crc32;
|
||||
self.md5 = results.md5;
|
||||
self.sha1 = results.sha1;
|
||||
self.xxh64 = results.xxh64;
|
||||
}
|
||||
}
|
||||
|
||||
impl NKitHeader {
|
||||
pub(crate) fn apply_digests(&mut self, results: &DigestResults) {
|
||||
self.crc32 = results.crc32;
|
||||
self.md5 = results.md5;
|
||||
self.sha1 = results.sha1;
|
||||
self.xxh64 = results.xxh64;
|
||||
}
|
||||
}
|
|
@ -1,22 +1,30 @@
|
|||
//! Lagged Fibonacci generator for GC / Wii partition junk data.
|
||||
|
||||
use std::{
|
||||
cmp::min,
|
||||
io,
|
||||
io::{Read, Write},
|
||||
};
|
||||
|
||||
use bytes::Buf;
|
||||
use zerocopy::{transmute_ref, IntoBytes};
|
||||
|
||||
use crate::disc::SECTOR_SIZE;
|
||||
|
||||
/// Value of `k` for the LFG.
|
||||
pub const LFG_K: usize = 521;
|
||||
|
||||
/// Value of `j` for the LFG.
|
||||
pub const LFG_J: usize = 32;
|
||||
|
||||
/// Number of 32-bit words in the seed.
|
||||
pub const SEED_SIZE: usize = 17;
|
||||
|
||||
/// Lagged Fibonacci generator for GC / Wii partition junk data.
|
||||
///
|
||||
/// References (license CC0-1.0):
|
||||
/// https://github.com/dolphin-emu/dolphin/blob/a0f555648c27ec0c928f6b1e1fcad5e2d7c4d0c4/docs/WiaAndRvz.md
|
||||
/// https://github.com/dolphin-emu/dolphin/blob/a0f555648c27ec0c928f6b1e1fcad5e2d7c4d0c4/Source/Core/DiscIO/LaggedFibonacciGenerator.cpp
|
||||
/// - [WiaAndRvz.md](https://github.com/dolphin-emu/dolphin/blob/a0f555648c27ec0c928f6b1e1fcad5e2d7c4d0c4/docs/WiaAndRvz.md)
|
||||
/// - [LaggedFibonacciGenerator.cpp](https://github.com/dolphin-emu/dolphin/blob/a0f555648c27ec0c928f6b1e1fcad5e2d7c4d0c4/Source/Core/DiscIO/LaggedFibonacciGenerator.cpp)
|
||||
pub struct LaggedFibonacci {
|
||||
buffer: [u32; LFG_K],
|
||||
position: usize,
|
||||
|
@ -46,7 +54,6 @@ impl LaggedFibonacci {
|
|||
/// Initializes the LFG with the standard seed for a given disc ID, disc number, and sector.
|
||||
/// The partition offset is used to determine the sector and how many bytes to skip within the
|
||||
/// sector.
|
||||
#[allow(clippy::missing_inline_in_public_items)]
|
||||
pub fn init_with_seed(&mut self, disc_id: [u8; 4], disc_num: u8, partition_offset: u64) {
|
||||
let seed = u32::from_be_bytes([
|
||||
disc_id[2],
|
||||
|
@ -73,7 +80,6 @@ impl LaggedFibonacci {
|
|||
|
||||
/// Initializes the LFG with the seed read from a reader. The seed is assumed to be big-endian.
|
||||
/// This is used for rebuilding junk data in WIA/RVZ files.
|
||||
#[allow(clippy::missing_inline_in_public_items)]
|
||||
pub fn init_with_reader<R>(&mut self, reader: &mut R) -> io::Result<()>
|
||||
where R: Read + ?Sized {
|
||||
reader.read_exact(self.buffer[..SEED_SIZE].as_mut_bytes())?;
|
||||
|
@ -85,6 +91,22 @@ impl LaggedFibonacci {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// Initializes the LFG with the seed read from a [`Buf`]. The seed is assumed to be big-endian.
|
||||
/// This is used for rebuilding junk data in WIA/RVZ files.
|
||||
pub fn init_with_buf(&mut self, reader: &mut impl Buf) -> io::Result<()> {
|
||||
let out = self.buffer[..SEED_SIZE].as_mut_bytes();
|
||||
if reader.remaining() < out.len() {
|
||||
return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "Filling LFG seed"));
|
||||
}
|
||||
reader.copy_to_slice(out);
|
||||
for x in self.buffer[..SEED_SIZE].iter_mut() {
|
||||
*x = u32::from_be(*x);
|
||||
}
|
||||
self.position = 0;
|
||||
self.init();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Advances the LFG by one step.
|
||||
fn forward(&mut self) {
|
||||
for i in 0..LFG_J {
|
||||
|
@ -96,7 +118,6 @@ impl LaggedFibonacci {
|
|||
}
|
||||
|
||||
/// Skips `n` bytes of junk data.
|
||||
#[allow(clippy::missing_inline_in_public_items)]
|
||||
pub fn skip(&mut self, n: usize) {
|
||||
self.position += n;
|
||||
while self.position >= LFG_K * 4 {
|
||||
|
@ -105,8 +126,22 @@ impl LaggedFibonacci {
|
|||
}
|
||||
}
|
||||
|
||||
// pub fn backward(&mut self) {
|
||||
// for i in (LFG_J..LFG_K).rev() {
|
||||
// self.buffer[i] ^= self.buffer[i - LFG_J];
|
||||
// }
|
||||
// for i in (0..LFG_J).rev() {
|
||||
// self.buffer[i] ^= self.buffer[i + LFG_K - LFG_J];
|
||||
// }
|
||||
// }
|
||||
|
||||
// pub fn get_seed(&mut self, seed: &mut [u8; SEED_SIZE]) {
|
||||
// for i in 0..SEED_SIZE {
|
||||
// seed[i] = self.buffer[i].to_be_bytes()[3];
|
||||
// }
|
||||
// }
|
||||
|
||||
/// Fills the buffer with junk data.
|
||||
#[allow(clippy::missing_inline_in_public_items)]
|
||||
pub fn fill(&mut self, mut buf: &mut [u8]) {
|
||||
while !buf.is_empty() {
|
||||
let len = min(buf.len(), LFG_K * 4 - self.position);
|
||||
|
@ -122,7 +157,6 @@ impl LaggedFibonacci {
|
|||
}
|
||||
|
||||
/// Writes junk data to the output stream.
|
||||
#[allow(clippy::missing_inline_in_public_items)]
|
||||
pub fn write<W>(&mut self, w: &mut W, mut len: u64) -> io::Result<()>
|
||||
where W: Write + ?Sized {
|
||||
while len > 0 {
|
||||
|
@ -141,7 +175,6 @@ impl LaggedFibonacci {
|
|||
|
||||
/// The junk data on GC / Wii discs is reinitialized every 32KB. This functions handles the
|
||||
/// wrapping logic and reinitializes the LFG at sector boundaries.
|
||||
#[allow(clippy::missing_inline_in_public_items)]
|
||||
pub fn fill_sector_chunked(
|
||||
&mut self,
|
||||
mut buf: &mut [u8],
|
||||
|
@ -161,7 +194,6 @@ impl LaggedFibonacci {
|
|||
|
||||
/// The junk data on GC / Wii discs is reinitialized every 32KB. This functions handles the
|
||||
/// wrapping logic and reinitializes the LFG at sector boundaries.
|
||||
#[allow(clippy::missing_inline_in_public_items)]
|
||||
pub fn write_sector_chunked<W>(
|
||||
&mut self,
|
||||
w: &mut W,
|
||||
|
@ -182,6 +214,33 @@ impl LaggedFibonacci {
|
|||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Checks if the data matches the junk data generated by the LFG. This function handles the
|
||||
/// wrapping logic and reinitializes the LFG at sector boundaries.
|
||||
pub fn check_sector_chunked(
|
||||
&mut self,
|
||||
mut buf: &[u8],
|
||||
disc_id: [u8; 4],
|
||||
disc_num: u8,
|
||||
mut partition_offset: u64,
|
||||
) -> bool {
|
||||
if buf.is_empty() {
|
||||
return false;
|
||||
}
|
||||
let mut lfg_buf = [0u8; SECTOR_SIZE];
|
||||
while !buf.is_empty() {
|
||||
self.init_with_seed(disc_id, disc_num, partition_offset);
|
||||
let len =
|
||||
(SECTOR_SIZE - (partition_offset % SECTOR_SIZE as u64) as usize).min(buf.len());
|
||||
self.fill(&mut lfg_buf[..len]);
|
||||
if buf[..len] != lfg_buf[..len] {
|
||||
return false;
|
||||
}
|
||||
buf = &buf[len..];
|
||||
partition_offset += len as u64;
|
||||
}
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
|
|
@ -1,20 +1,139 @@
|
|||
use std::ops::{Div, Rem};
|
||||
//! Utility functions and types.
|
||||
|
||||
use std::{
|
||||
io,
|
||||
io::{Read, Seek, SeekFrom},
|
||||
ops::{Div, Rem},
|
||||
};
|
||||
|
||||
use io::{BufRead, Write};
|
||||
|
||||
pub(crate) mod aes;
|
||||
pub(crate) mod compress;
|
||||
pub(crate) mod lfg;
|
||||
pub(crate) mod digest;
|
||||
pub mod lfg;
|
||||
pub(crate) mod read;
|
||||
pub(crate) mod take_seek;
|
||||
|
||||
/// Copies from a buffered reader to a writer without extra allocations.
|
||||
pub fn buf_copy<R, W>(reader: &mut R, writer: &mut W) -> io::Result<u64>
|
||||
where
|
||||
R: BufRead + ?Sized,
|
||||
W: Write + ?Sized,
|
||||
{
|
||||
let mut copied = 0;
|
||||
loop {
|
||||
let buf = reader.fill_buf()?;
|
||||
let len = buf.len();
|
||||
if len == 0 {
|
||||
break;
|
||||
}
|
||||
writer.write_all(buf)?;
|
||||
reader.consume(len);
|
||||
copied += len as u64;
|
||||
}
|
||||
Ok(copied)
|
||||
}
|
||||
|
||||
/// A reader with a fixed window.
|
||||
#[derive(Clone)]
|
||||
pub struct WindowedReader<T>
|
||||
where T: BufRead + Seek
|
||||
{
|
||||
base: T,
|
||||
pos: u64,
|
||||
begin: u64,
|
||||
end: u64,
|
||||
}
|
||||
|
||||
impl<T> WindowedReader<T>
|
||||
where T: BufRead + Seek
|
||||
{
|
||||
/// Creates a new windowed stream with offset and size.
|
||||
///
|
||||
/// Seeks underlying stream immediately.
|
||||
#[inline]
|
||||
pub fn new(mut base: T, offset: u64, size: u64) -> io::Result<Self> {
|
||||
base.seek(SeekFrom::Start(offset))?;
|
||||
Ok(Self { base, pos: offset, begin: offset, end: offset + size })
|
||||
}
|
||||
|
||||
/// Returns the length of the window.
|
||||
#[inline]
|
||||
#[allow(clippy::len_without_is_empty)]
|
||||
pub fn len(&self) -> u64 { self.end - self.begin }
|
||||
}
|
||||
|
||||
impl<T> Read for WindowedReader<T>
|
||||
where T: BufRead + Seek
|
||||
{
|
||||
#[inline]
|
||||
fn read(&mut self, out: &mut [u8]) -> io::Result<usize> {
|
||||
let buf = self.fill_buf()?;
|
||||
let len = buf.len().min(out.len());
|
||||
out[..len].copy_from_slice(&buf[..len]);
|
||||
self.consume(len);
|
||||
Ok(len)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> BufRead for WindowedReader<T>
|
||||
where T: BufRead + Seek
|
||||
{
|
||||
#[inline]
|
||||
fn fill_buf(&mut self) -> io::Result<&[u8]> {
|
||||
let limit = self.end.saturating_sub(self.pos);
|
||||
if limit == 0 {
|
||||
return Ok(&[]);
|
||||
}
|
||||
let buf = self.base.fill_buf()?;
|
||||
let max = (buf.len() as u64).min(limit) as usize;
|
||||
Ok(&buf[..max])
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn consume(&mut self, amt: usize) {
|
||||
self.base.consume(amt);
|
||||
self.pos += amt as u64;
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Seek for WindowedReader<T>
|
||||
where T: BufRead + Seek
|
||||
{
|
||||
#[inline]
|
||||
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
|
||||
let mut pos = match pos {
|
||||
SeekFrom::Start(p) => self.begin + p,
|
||||
SeekFrom::End(p) => self.end.saturating_add_signed(p),
|
||||
SeekFrom::Current(p) => self.pos.saturating_add_signed(p),
|
||||
};
|
||||
if pos < self.begin {
|
||||
pos = self.begin;
|
||||
} else if pos > self.end {
|
||||
pos = self.end;
|
||||
}
|
||||
let result = self.base.seek(SeekFrom::Start(pos))?;
|
||||
self.pos = result;
|
||||
Ok(result - self.begin)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn stream_position(&mut self) -> io::Result<u64> { Ok(self.pos) }
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub(crate) fn div_rem<T>(x: T, y: T) -> (T, T)
|
||||
where T: Div<Output = T> + Rem<Output = T> + Copy {
|
||||
let quot = x / y;
|
||||
let rem = x % y;
|
||||
(quot, rem)
|
||||
(x / y, x % y)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub(crate) fn align_up_32(n: u32, align: u32) -> u32 { (n + align - 1) & !(align - 1) }
|
||||
|
||||
#[inline]
|
||||
pub(crate) fn align_up_64(n: u64, align: u64) -> u64 { (n + align - 1) & !(align - 1) }
|
||||
|
||||
/// Creates a fixed-size array reference from a slice.
|
||||
#[macro_export]
|
||||
macro_rules! array_ref {
|
||||
($slice:expr, $offset:expr, $size:expr) => {{
|
||||
#[inline(always)]
|
||||
|
@ -24,9 +143,9 @@ macro_rules! array_ref {
|
|||
to_array(&$slice[$offset..$offset + $size])
|
||||
}};
|
||||
}
|
||||
pub(crate) use array_ref;
|
||||
|
||||
/// Creates a mutable fixed-size array reference from a slice.
|
||||
#[macro_export]
|
||||
macro_rules! array_ref_mut {
|
||||
($slice:expr, $offset:expr, $size:expr) => {{
|
||||
#[inline(always)]
|
||||
|
@ -36,11 +155,28 @@ macro_rules! array_ref_mut {
|
|||
to_array(&mut $slice[$offset..$offset + $size])
|
||||
}};
|
||||
}
|
||||
pub(crate) use array_ref_mut;
|
||||
|
||||
/// Compile-time assertion.
|
||||
#[macro_export]
|
||||
macro_rules! static_assert {
|
||||
($condition:expr) => {
|
||||
const _: () = core::assert!($condition);
|
||||
};
|
||||
}
|
||||
pub(crate) use static_assert;
|
||||
|
||||
macro_rules! impl_read_for_bufread {
|
||||
($ty:ident) => {
|
||||
impl std::io::Read for $ty {
|
||||
fn read(&mut self, out: &mut [u8]) -> std::io::Result<usize> {
|
||||
use std::io::BufRead;
|
||||
let buf = self.fill_buf()?;
|
||||
let len = buf.len().min(out.len());
|
||||
out[..len].copy_from_slice(&buf[..len]);
|
||||
self.consume(len);
|
||||
Ok(len)
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
pub(crate) use impl_read_for_bufread;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use std::{io, io::Read};
|
||||
use std::{io, io::Read, sync::Arc};
|
||||
|
||||
use zerocopy::{FromBytes, FromZeros, IntoBytes};
|
||||
|
||||
|
@ -36,6 +36,16 @@ where
|
|||
Ok(ret)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn read_arc<T, R>(reader: &mut R) -> io::Result<Arc<T>>
|
||||
where
|
||||
T: FromBytes + IntoBytes,
|
||||
R: Read + ?Sized,
|
||||
{
|
||||
// TODO use Arc::new_zeroed once it's stable
|
||||
read_box(reader).map(Arc::from)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn read_box_slice<T, R>(reader: &mut R, count: usize) -> io::Result<Box<[T]>>
|
||||
where
|
||||
|
@ -48,6 +58,16 @@ where
|
|||
Ok(ret)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn read_arc_slice<T, R>(reader: &mut R, count: usize) -> io::Result<Arc<[T]>>
|
||||
where
|
||||
T: FromBytes + IntoBytes,
|
||||
R: Read + ?Sized,
|
||||
{
|
||||
// TODO use Arc::new_zeroed once it's stable
|
||||
read_box_slice(reader, count).map(Arc::from)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn read_u16_be<R>(reader: &mut R) -> io::Result<u16>
|
||||
where R: Read + ?Sized {
|
||||
|
@ -71,3 +91,26 @@ where R: Read + ?Sized {
|
|||
reader.read_exact(&mut buf)?;
|
||||
Ok(u64::from_be_bytes(buf))
|
||||
}
|
||||
|
||||
pub fn read_with_zero_fill<R>(r: &mut R, mut buf: &mut [u8]) -> io::Result<usize>
|
||||
where R: Read + ?Sized {
|
||||
let mut total = 0;
|
||||
while !buf.is_empty() {
|
||||
let read = r.read(buf)?;
|
||||
if read == 0 {
|
||||
// Fill remaining block with zeroes
|
||||
buf.fill(0);
|
||||
break;
|
||||
}
|
||||
buf = &mut buf[read..];
|
||||
total += read;
|
||||
}
|
||||
Ok(total)
|
||||
}
|
||||
|
||||
pub fn box_to_bytes<T>(b: Box<T>) -> Box<[u8]>
|
||||
where T: IntoBytes {
|
||||
let p = Box::into_raw(b);
|
||||
let sp = unsafe { std::slice::from_raw_parts_mut(p as *mut u8, size_of::<T>()) };
|
||||
unsafe { Box::from_raw(sp) }
|
||||
}
|
||||
|
|
|
@ -1,127 +0,0 @@
|
|||
// From https://github.com/jam1garner/binrw/blob/e96a1320287ec83d1f471525ffa380800ec9e124/binrw/src/io/take_seek.rs
|
||||
// MIT License
|
||||
//
|
||||
// Copyright (c) jam1garner and other contributors
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in all
|
||||
// copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
// SOFTWARE.
|
||||
#![allow(dead_code)]
|
||||
//! Types for seekable reader adapters which limit the number of bytes read from
|
||||
//! the underlying reader.
|
||||
|
||||
use std::io::{Read, Result, Seek, SeekFrom};
|
||||
|
||||
/// Read adapter which limits the bytes read from an underlying reader, with
|
||||
/// seek support.
|
||||
///
|
||||
/// This struct is generally created by importing the [`TakeSeekExt`] extension
|
||||
/// and calling [`take_seek`] on a reader.
|
||||
///
|
||||
/// [`take_seek`]: TakeSeekExt::take_seek
|
||||
#[derive(Debug)]
|
||||
pub struct TakeSeek<T> {
|
||||
inner: T,
|
||||
pos: u64,
|
||||
end: u64,
|
||||
}
|
||||
|
||||
impl<T> TakeSeek<T> {
|
||||
/// Gets a reference to the underlying reader.
|
||||
pub fn get_ref(&self) -> &T { &self.inner }
|
||||
|
||||
/// Gets a mutable reference to the underlying reader.
|
||||
///
|
||||
/// Care should be taken to avoid modifying the internal I/O state of the
|
||||
/// underlying reader as doing so may corrupt the internal limit of this
|
||||
/// `TakeSeek`.
|
||||
pub fn get_mut(&mut self) -> &mut T { &mut self.inner }
|
||||
|
||||
/// Consumes this wrapper, returning the wrapped value.
|
||||
pub fn into_inner(self) -> T { self.inner }
|
||||
|
||||
/// Returns the number of bytes that can be read before this instance will
|
||||
/// return EOF.
|
||||
///
|
||||
/// # Note
|
||||
///
|
||||
/// This instance may reach EOF after reading fewer bytes than indicated by
|
||||
/// this method if the underlying [`Read`] instance reaches EOF.
|
||||
pub fn limit(&self) -> u64 { self.end.saturating_sub(self.pos) }
|
||||
}
|
||||
|
||||
impl<T: Seek> TakeSeek<T> {
|
||||
/// Sets the number of bytes that can be read before this instance will
|
||||
/// return EOF. This is the same as constructing a new `TakeSeek` instance,
|
||||
/// so the amount of bytes read and the previous limit value don’t matter
|
||||
/// when calling this method.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Panics if the inner stream returns an error from `stream_position`.
|
||||
pub fn set_limit(&mut self, limit: u64) {
|
||||
let pos = self.inner.stream_position().expect("cannot get position for `set_limit`");
|
||||
self.pos = pos;
|
||||
self.end = pos + limit;
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Read> Read for TakeSeek<T> {
|
||||
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
|
||||
let limit = self.limit();
|
||||
|
||||
// Don't call into inner reader at all at EOF because it may still block
|
||||
if limit == 0 {
|
||||
return Ok(0);
|
||||
}
|
||||
|
||||
// Lint: It is impossible for this cast to truncate because the value
|
||||
// being cast is the minimum of two values, and one of the value types
|
||||
// is already `usize`.
|
||||
#[allow(clippy::cast_possible_truncation)]
|
||||
let max = (buf.len() as u64).min(limit) as usize;
|
||||
let n = self.inner.read(&mut buf[0..max])?;
|
||||
self.pos += n as u64;
|
||||
Ok(n)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Seek> Seek for TakeSeek<T> {
|
||||
fn seek(&mut self, pos: SeekFrom) -> Result<u64> {
|
||||
self.pos = self.inner.seek(pos)?;
|
||||
Ok(self.pos)
|
||||
}
|
||||
|
||||
fn stream_position(&mut self) -> Result<u64> { Ok(self.pos) }
|
||||
}
|
||||
|
||||
/// An extension trait that implements `take_seek()` for compatible streams.
|
||||
pub trait TakeSeekExt {
|
||||
/// Creates an adapter which will read at most `limit` bytes from the
|
||||
/// wrapped stream.
|
||||
fn take_seek(self, limit: u64) -> TakeSeek<Self>
|
||||
where Self: Sized;
|
||||
}
|
||||
|
||||
impl<T: Read + Seek> TakeSeekExt for T {
|
||||
fn take_seek(mut self, limit: u64) -> TakeSeek<Self>
|
||||
where Self: Sized {
|
||||
let pos = self.stream_position().expect("cannot get position for `take_seek`");
|
||||
|
||||
TakeSeek { inner: self, pos, end: pos + limit }
|
||||
}
|
||||
}
|
|
@ -0,0 +1,163 @@
|
|||
//! [`DiscWriter`] and associated types.
|
||||
|
||||
use bytes::Bytes;
|
||||
|
||||
use crate::{
|
||||
common::{Compression, Format},
|
||||
disc,
|
||||
read::DiscReader,
|
||||
Error, Result,
|
||||
};
|
||||
|
||||
/// Options for writing a disc image.
|
||||
#[derive(Default, Debug, Clone)]
|
||||
pub struct FormatOptions {
|
||||
/// The disc format to write.
|
||||
pub format: Format,
|
||||
/// The compression algorithm to use for the output format, if supported.
|
||||
///
|
||||
/// If unsure, use [`Format::default_compression`] to get the default compression for the format.
|
||||
pub compression: Compression,
|
||||
/// Block size to use.
|
||||
///
|
||||
/// If unsure, use [`Format::default_block_size`] to get the default block size for the format.
|
||||
pub block_size: u32,
|
||||
}
|
||||
|
||||
impl FormatOptions {
|
||||
/// Creates options for the specified format.
|
||||
/// Uses the default compression and block size for the format.
|
||||
#[inline]
|
||||
pub fn new(format: Format) -> FormatOptions {
|
||||
FormatOptions {
|
||||
format,
|
||||
compression: format.default_compression(),
|
||||
block_size: format.default_block_size(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Options for processing a disc image writer.
|
||||
#[derive(Default, Debug, Clone)]
|
||||
pub struct ProcessOptions {
|
||||
/// If the output format supports multithreaded processing, this sets the number of threads to
|
||||
/// use for processing data. This is particularly useful for formats that compress data or
|
||||
/// perform other transformations. The default value of 0 disables multithreading.
|
||||
pub processor_threads: usize,
|
||||
/// Enables CRC32 checksum calculation for the disc data.
|
||||
///
|
||||
/// If the output format supports it, this will be stored in the disc data. (NKit 2 compatible)
|
||||
/// Each digest calculation will run on a separate thread, unaffected by the processor thread
|
||||
/// count.
|
||||
pub digest_crc32: bool,
|
||||
/// Enables MD5 checksum calculation for the disc data. (Slow!)
|
||||
///
|
||||
/// If the output format supports it, this will be stored in the disc data. (NKit 2 compatible)
|
||||
/// Each digest calculation will run on a separate thread, unaffected by the processor thread
|
||||
/// count.
|
||||
pub digest_md5: bool,
|
||||
/// Enables SHA-1 checksum calculation for the disc data.
|
||||
///
|
||||
/// If the output format supports it, this will be stored in the disc data. (NKit 2 compatible)
|
||||
/// Each digest calculation will run on a separate thread, unaffected by the processor thread
|
||||
/// count.
|
||||
pub digest_sha1: bool,
|
||||
/// Enables XXH64 checksum calculation for the disc data.
|
||||
///
|
||||
/// If the output format supports it, this will be stored in the disc data. (NKit 2 compatible)
|
||||
/// Each digest calculation will run on a separate thread, unaffected by the processor thread
|
||||
/// count.
|
||||
pub digest_xxh64: bool,
|
||||
}
|
||||
|
||||
/// A constructed disc writer.
|
||||
///
|
||||
/// This is the primary entry point for writing disc images.
|
||||
#[derive(Clone)]
|
||||
pub struct DiscWriter {
|
||||
inner: Box<dyn disc::writer::DiscWriter>,
|
||||
}
|
||||
|
||||
impl DiscWriter {
|
||||
/// Creates a new disc writer with the specified format options.
|
||||
#[inline]
|
||||
pub fn new(disc: DiscReader, options: &FormatOptions) -> Result<DiscWriter> {
|
||||
let mut options = options.clone();
|
||||
options.compression.validate_level()?;
|
||||
let mut reader = disc.into_inner();
|
||||
reader.reset();
|
||||
let inner = match options.format {
|
||||
Format::Iso => {
|
||||
if options.compression != Compression::None {
|
||||
return Err(Error::Other("ISO/GCM does not support compression".to_string()));
|
||||
}
|
||||
Box::new(reader)
|
||||
}
|
||||
Format::Ciso => crate::io::ciso::DiscWriterCISO::new(reader, &options)?,
|
||||
#[cfg(feature = "compress-zlib")]
|
||||
Format::Gcz => crate::io::gcz::DiscWriterGCZ::new(reader, &options)?,
|
||||
Format::Tgc => crate::io::tgc::DiscWriterTGC::new(reader, &options)?,
|
||||
Format::Wbfs => crate::io::wbfs::DiscWriterWBFS::new(reader, &options)?,
|
||||
Format::Wia | Format::Rvz => crate::io::wia::DiscWriterWIA::new(reader, &options)?,
|
||||
format => return Err(Error::Other(format!("Unsupported write format: {format}"))),
|
||||
};
|
||||
Ok(DiscWriter { inner })
|
||||
}
|
||||
|
||||
/// Processes the disc writer to completion, calling the data callback, in order, for each block
|
||||
/// of data to write to the output file. The callback should write all data before returning, or
|
||||
/// return an error if writing fails.
|
||||
#[inline]
|
||||
pub fn process(
|
||||
&self,
|
||||
mut data_callback: impl FnMut(Bytes, u64, u64) -> std::io::Result<()> + Send,
|
||||
options: &ProcessOptions,
|
||||
) -> Result<DiscFinalization> {
|
||||
self.inner.process(&mut data_callback, options)
|
||||
}
|
||||
|
||||
/// Returns the progress upper bound for the disc writer. For most formats, this has no
|
||||
/// relation to the written disc size, but can be used to display progress.
|
||||
#[inline]
|
||||
pub fn progress_bound(&self) -> u64 { self.inner.progress_bound() }
|
||||
|
||||
/// Returns the weight of the disc writer, which can help determine the number of threads to
|
||||
/// dedicate for output processing. This may depend on the format's configuration, such as
|
||||
/// whether compression is enabled.
|
||||
#[inline]
|
||||
pub fn weight(&self) -> DiscWriterWeight { self.inner.weight() }
|
||||
}
|
||||
|
||||
/// Data returned by the disc writer after processing.
|
||||
///
|
||||
/// If header data is provided, the consumer should seek to the beginning of the output stream and
|
||||
/// write the header data, overwriting any existing data. Otherwise, the output disc will be
|
||||
/// invalid.
|
||||
#[derive(Default, Clone)]
|
||||
pub struct DiscFinalization {
|
||||
/// Header data to write to the beginning of the output stream, if any.
|
||||
pub header: Bytes,
|
||||
/// The calculated CRC32 checksum of the input disc data, if any.
|
||||
pub crc32: Option<u32>,
|
||||
/// The calculated MD5 hash of the input disc data, if any.
|
||||
pub md5: Option<[u8; 16]>,
|
||||
/// The calculated SHA-1 hash of the input disc data, if any.
|
||||
pub sha1: Option<[u8; 20]>,
|
||||
/// The calculated SHA-256 hash of the input disc data, if any.
|
||||
pub xxh64: Option<u64>,
|
||||
}
|
||||
|
||||
/// The weight of a disc writer, which can help determine the number of threads to use for
|
||||
/// processing.
|
||||
pub enum DiscWriterWeight {
|
||||
/// The writer performs little to no processing of the input data, and is mostly I/O bound.
|
||||
/// This means that this writer does not benefit from parallelization, and will ignore the
|
||||
/// number of threads specified.
|
||||
Light,
|
||||
/// The writer performs some processing of the input data, and is somewhat CPU bound. This means
|
||||
/// that this writer benefits from parallelization, but not as much as a heavy writer.
|
||||
Medium,
|
||||
/// The writer performs significant processing of the input data, and is mostly CPU bound. This
|
||||
/// means that this writer benefits from parallelization.
|
||||
Heavy,
|
||||
}
|
|
@ -16,31 +16,30 @@ categories = ["command-line-utilities", "parser-implementations"]
|
|||
build = "build.rs"
|
||||
|
||||
[features]
|
||||
asm = ["md-5/asm", "nod/asm", "sha1/asm"]
|
||||
nightly = ["crc32fast/nightly"]
|
||||
openssl = ["nod/openssl"]
|
||||
openssl-vendored = ["nod/openssl-vendored"]
|
||||
tracy = ["dep:tracing-tracy"]
|
||||
|
||||
[dependencies]
|
||||
argp = "0.3"
|
||||
base16ct = "0.2"
|
||||
crc32fast = "1.4"
|
||||
digest = "0.10"
|
||||
digest = { workspace = true }
|
||||
enable-ansi-support = "0.2"
|
||||
hex = { version = "0.4", features = ["serde"] }
|
||||
indicatif = "0.17"
|
||||
itertools = "0.13"
|
||||
log = "0.4"
|
||||
md-5 = "0.10"
|
||||
md-5 = { workspace = true }
|
||||
nod = { version = "2.0.0-alpha", path = "../nod" }
|
||||
quick-xml = { version = "0.36", features = ["serialize"] }
|
||||
num_cpus = "1.16"
|
||||
quick-xml = { version = "0.37", features = ["serialize"] }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
sha1 = "0.10"
|
||||
sha1 = { workspace = true }
|
||||
size = "0.4"
|
||||
supports-color = "3.0"
|
||||
tracing = "0.1"
|
||||
tracing = { workspace = true }
|
||||
tracing-attributes = "0.1"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
xxhash-rust = { version = "0.8", features = ["xxh64"] }
|
||||
zerocopy = { version = "0.8", features = ["alloc", "derive"] }
|
||||
tracing-tracy = { version = "0.11", features = ["flush-on-exit"], optional = true }
|
||||
zerocopy = { workspace = true }
|
||||
zstd = "0.13"
|
||||
|
||||
[target.'cfg(target_env = "musl")'.dependencies]
|
||||
|
@ -48,7 +47,7 @@ mimalloc = "0.1"
|
|||
|
||||
[build-dependencies]
|
||||
hex = { version = "0.4", features = ["serde"] }
|
||||
quick-xml = { version = "0.36", features = ["serialize"] }
|
||||
quick-xml = { version = "0.37", features = ["serialize"] }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
zerocopy = { version = "0.8", features = ["alloc", "derive"] }
|
||||
zstd = "0.13"
|
||||
|
|
|
@ -1,9 +1,13 @@
|
|||
use std::path::PathBuf;
|
||||
use std::{ffi::OsStr, path::PathBuf};
|
||||
|
||||
use argp::FromArgs;
|
||||
use nod::OpenOptions;
|
||||
use nod::{
|
||||
common::Format,
|
||||
read::{DiscOptions, PartitionEncryption},
|
||||
write::FormatOptions,
|
||||
};
|
||||
|
||||
use crate::util::{redump, shared::convert_and_verify};
|
||||
use crate::util::{path_display, redump, shared::convert_and_verify};
|
||||
|
||||
#[derive(FromArgs, Debug)]
|
||||
/// Converts a disc image to ISO.
|
||||
|
@ -27,6 +31,9 @@ pub struct Args {
|
|||
#[argp(switch)]
|
||||
/// encrypt Wii partition data
|
||||
encrypt: bool,
|
||||
#[argp(option, short = 'c')]
|
||||
/// compression format and level (e.g. "zstd:19")
|
||||
compress: Option<String>,
|
||||
}
|
||||
|
||||
pub fn run(args: Args) -> nod::Result<()> {
|
||||
|
@ -34,15 +41,46 @@ pub fn run(args: Args) -> nod::Result<()> {
|
|||
println!("Loading dat files...");
|
||||
redump::load_dats(args.dat.iter().map(PathBuf::as_ref))?;
|
||||
}
|
||||
let options = OpenOptions {
|
||||
let options = DiscOptions {
|
||||
partition_encryption: match (args.decrypt, args.encrypt) {
|
||||
(true, false) => nod::PartitionEncryptionMode::ForceDecrypted,
|
||||
(false, true) => nod::PartitionEncryptionMode::ForceEncrypted,
|
||||
(false, false) => nod::PartitionEncryptionMode::Original,
|
||||
(true, false) => PartitionEncryption::ForceDecrypted,
|
||||
(false, true) => PartitionEncryption::ForceEncrypted,
|
||||
(false, false) => PartitionEncryption::Original,
|
||||
(true, true) => {
|
||||
return Err(nod::Error::Other("Both --decrypt and --encrypt specified".to_string()))
|
||||
}
|
||||
},
|
||||
preloader_threads: 4,
|
||||
};
|
||||
convert_and_verify(&args.file, Some(&args.out), args.md5, &options)
|
||||
let format = match args.out.extension() {
|
||||
Some(ext)
|
||||
if ext.eq_ignore_ascii_case(OsStr::new("iso"))
|
||||
|| ext.eq_ignore_ascii_case(OsStr::new("gcm")) =>
|
||||
{
|
||||
Format::Iso
|
||||
}
|
||||
Some(ext) if ext.eq_ignore_ascii_case(OsStr::new("ciso")) => Format::Ciso,
|
||||
Some(ext) if ext.eq_ignore_ascii_case(OsStr::new("gcz")) => Format::Gcz,
|
||||
Some(ext) if ext.eq_ignore_ascii_case(OsStr::new("nfs")) => Format::Nfs,
|
||||
Some(ext) if ext.eq_ignore_ascii_case(OsStr::new("rvz")) => Format::Rvz,
|
||||
Some(ext) if ext.eq_ignore_ascii_case(OsStr::new("wbfs")) => Format::Wbfs,
|
||||
Some(ext) if ext.eq_ignore_ascii_case(OsStr::new("wia")) => Format::Wia,
|
||||
Some(ext) if ext.eq_ignore_ascii_case(OsStr::new("tgc")) => Format::Tgc,
|
||||
Some(_) => {
|
||||
return Err(nod::Error::Other(format!(
|
||||
"Unknown file extension: {}",
|
||||
path_display(&args.out)
|
||||
)))
|
||||
}
|
||||
None => Format::Iso,
|
||||
};
|
||||
let mut compression = if let Some(compress) = args.compress {
|
||||
compress.parse()?
|
||||
} else {
|
||||
format.default_compression()
|
||||
};
|
||||
compression.validate_level()?;
|
||||
let format_options =
|
||||
FormatOptions { format, compression, block_size: format.default_block_size() };
|
||||
convert_and_verify(&args.file, Some(&args.out), args.md5, &options, &format_options)
|
||||
}
|
||||
|
|
|
@ -1,24 +1,19 @@
|
|||
use std::{
|
||||
cmp::min,
|
||||
collections::BTreeMap,
|
||||
fmt,
|
||||
io::Read,
|
||||
path::{Path, PathBuf},
|
||||
sync::{mpsc::sync_channel, Arc},
|
||||
thread,
|
||||
};
|
||||
|
||||
use argp::FromArgs;
|
||||
use indicatif::{ProgressBar, ProgressState, ProgressStyle};
|
||||
use nod::{Disc, OpenOptions, PartitionEncryptionMode, Result, ResultContext};
|
||||
use zerocopy::FromZeros;
|
||||
|
||||
use crate::util::{
|
||||
digest::{digest_thread, DigestResult},
|
||||
redump,
|
||||
redump::GameResult,
|
||||
use nod::{
|
||||
read::{DiscOptions, DiscReader, PartitionEncryption},
|
||||
write::{DiscWriter, FormatOptions, ProcessOptions},
|
||||
Result, ResultContext,
|
||||
};
|
||||
|
||||
use crate::util::{redump, redump::GameResult};
|
||||
|
||||
#[derive(FromArgs, Debug)]
|
||||
/// Commands related to DAT files.
|
||||
#[argp(subcommand, name = "dat")]
|
||||
|
@ -165,9 +160,9 @@ struct DiscHashes {
|
|||
}
|
||||
|
||||
fn load_disc(path: &Path, name: &str, full_verify: bool) -> Result<DiscHashes> {
|
||||
let options = OpenOptions { partition_encryption: PartitionEncryptionMode::Original };
|
||||
let mut disc = Disc::new_with_options(path, &options)?;
|
||||
let disc_size = disc.disc_size();
|
||||
let options =
|
||||
DiscOptions { partition_encryption: PartitionEncryption::Original, preloader_threads: 4 };
|
||||
let disc = DiscReader::new(path, &options)?;
|
||||
if !full_verify {
|
||||
let meta = disc.meta();
|
||||
if let (Some(crc32), Some(sha1)) = (meta.crc32, meta.sha1) {
|
||||
|
@ -175,7 +170,8 @@ fn load_disc(path: &Path, name: &str, full_verify: bool) -> Result<DiscHashes> {
|
|||
}
|
||||
}
|
||||
|
||||
let pb = ProgressBar::new(disc_size).with_message(format!("{}:", name));
|
||||
let disc_writer = DiscWriter::new(disc, &FormatOptions::default())?;
|
||||
let pb = ProgressBar::new(disc_writer.progress_bound()).with_message(format!("{}:", name));
|
||||
pb.set_style(ProgressStyle::with_template("{msg} {spinner:.green} [{elapsed_precise}] [{wide_bar:.cyan/blue}] {bytes}/{total_bytes} ({bytes_per_sec}, {eta})")
|
||||
.unwrap()
|
||||
.with_key("eta", |state: &ProgressState, w: &mut dyn fmt::Write| {
|
||||
|
@ -183,47 +179,22 @@ fn load_disc(path: &Path, name: &str, full_verify: bool) -> Result<DiscHashes> {
|
|||
})
|
||||
.progress_chars("#>-"));
|
||||
|
||||
const BUFFER_SIZE: usize = 1015808; // LCM(0x8000, 0x7C00)
|
||||
let digest_threads = [digest_thread::<crc32fast::Hasher>(), digest_thread::<sha1::Sha1>()];
|
||||
|
||||
let (w_tx, w_rx) = sync_channel::<Arc<[u8]>>(1);
|
||||
let w_thread = thread::spawn(move || {
|
||||
let mut total_written = 0u64;
|
||||
while let Ok(data) = w_rx.recv() {
|
||||
let mut total_written = 0u64;
|
||||
let finalization = disc_writer.process(
|
||||
|data, pos, _| {
|
||||
total_written += data.len() as u64;
|
||||
pb.set_position(total_written);
|
||||
}
|
||||
pb.finish_and_clear();
|
||||
});
|
||||
pb.set_position(pos);
|
||||
Ok(())
|
||||
},
|
||||
&ProcessOptions {
|
||||
processor_threads: 12, // TODO
|
||||
digest_crc32: true,
|
||||
digest_md5: false,
|
||||
digest_sha1: true,
|
||||
digest_xxh64: false,
|
||||
},
|
||||
)?;
|
||||
pb.finish();
|
||||
|
||||
let mut total_read = 0u64;
|
||||
let mut buf = <[u8]>::new_box_zeroed_with_elems(BUFFER_SIZE)?;
|
||||
while total_read < disc_size {
|
||||
let read = min(BUFFER_SIZE as u64, disc_size - total_read) as usize;
|
||||
disc.read_exact(&mut buf[..read]).with_context(|| {
|
||||
format!("Reading {} bytes at disc offset {}", BUFFER_SIZE, total_read)
|
||||
})?;
|
||||
|
||||
let arc = Arc::<[u8]>::from(&buf[..read]);
|
||||
for (tx, _) in &digest_threads {
|
||||
tx.send(arc.clone()).map_err(|_| "Sending data to hash thread")?;
|
||||
}
|
||||
w_tx.send(arc).map_err(|_| "Sending data to write thread")?;
|
||||
total_read += read as u64;
|
||||
}
|
||||
drop(w_tx); // Close channel
|
||||
w_thread.join().unwrap();
|
||||
|
||||
let mut crc32 = None;
|
||||
let mut sha1 = None;
|
||||
for (tx, handle) in digest_threads {
|
||||
drop(tx); // Close channel
|
||||
match handle.join().unwrap() {
|
||||
DigestResult::Crc32(v) => crc32 = Some(v),
|
||||
DigestResult::Sha1(v) => sha1 = Some(v),
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(DiscHashes { crc32: crc32.unwrap(), sha1: sha1.unwrap() })
|
||||
Ok(DiscHashes { crc32: finalization.crc32.unwrap(), sha1: finalization.sha1.unwrap() })
|
||||
}
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
use std::{
|
||||
borrow::Cow,
|
||||
fs,
|
||||
fs::File,
|
||||
io::{BufRead, Write},
|
||||
|
@ -7,15 +6,16 @@ use std::{
|
|||
};
|
||||
|
||||
use argp::FromArgs;
|
||||
use itertools::Itertools;
|
||||
use nod::{
|
||||
Disc, Fst, Node, OpenOptions, PartitionBase, PartitionKind, PartitionMeta, PartitionOptions,
|
||||
common::PartitionKind,
|
||||
disc::fst::{Fst, Node},
|
||||
read::{DiscOptions, DiscReader, PartitionMeta, PartitionOptions, PartitionReader},
|
||||
ResultContext,
|
||||
};
|
||||
use size::{Base, Size};
|
||||
use zerocopy::IntoBytes;
|
||||
|
||||
use crate::util::{display, has_extension};
|
||||
use crate::util::{has_extension, path_display};
|
||||
|
||||
#[derive(FromArgs, Debug)]
|
||||
/// Extracts a disc image.
|
||||
|
@ -53,77 +53,57 @@ pub fn run(args: Args) -> nod::Result<()> {
|
|||
} else {
|
||||
output_dir = args.file.with_extension("");
|
||||
}
|
||||
let disc = Disc::new_with_options(&args.file, &OpenOptions::default())?;
|
||||
let disc =
|
||||
DiscReader::new(&args.file, &DiscOptions { preloader_threads: 4, ..Default::default() })?;
|
||||
let header = disc.header();
|
||||
let is_wii = header.is_wii();
|
||||
let partition_options = PartitionOptions { validate_hashes: args.validate };
|
||||
let options = PartitionOptions { validate_hashes: args.validate };
|
||||
if let Some(partition) = args.partition {
|
||||
if partition.eq_ignore_ascii_case("all") {
|
||||
for info in disc.partitions() {
|
||||
let mut out_dir = output_dir.clone();
|
||||
out_dir.push(info.kind.dir_name().as_ref());
|
||||
let mut partition =
|
||||
disc.open_partition_with_options(info.index, &partition_options)?;
|
||||
let mut partition = disc.open_partition(info.index, &options)?;
|
||||
extract_partition(&disc, partition.as_mut(), &out_dir, is_wii, args.quiet)?;
|
||||
}
|
||||
} else if partition.eq_ignore_ascii_case("data") {
|
||||
let mut partition =
|
||||
disc.open_partition_kind_with_options(PartitionKind::Data, &partition_options)?;
|
||||
let mut partition = disc.open_partition_kind(PartitionKind::Data, &options)?;
|
||||
extract_partition(&disc, partition.as_mut(), &output_dir, is_wii, args.quiet)?;
|
||||
} else if partition.eq_ignore_ascii_case("update") {
|
||||
let mut partition =
|
||||
disc.open_partition_kind_with_options(PartitionKind::Update, &partition_options)?;
|
||||
let mut partition = disc.open_partition_kind(PartitionKind::Update, &options)?;
|
||||
extract_partition(&disc, partition.as_mut(), &output_dir, is_wii, args.quiet)?;
|
||||
} else if partition.eq_ignore_ascii_case("channel") {
|
||||
let mut partition =
|
||||
disc.open_partition_kind_with_options(PartitionKind::Channel, &partition_options)?;
|
||||
let mut partition = disc.open_partition_kind(PartitionKind::Channel, &options)?;
|
||||
extract_partition(&disc, partition.as_mut(), &output_dir, is_wii, args.quiet)?;
|
||||
} else {
|
||||
let idx = partition.parse::<usize>().map_err(|_| "Invalid partition index")?;
|
||||
let mut partition = disc.open_partition_with_options(idx, &partition_options)?;
|
||||
let mut partition = disc.open_partition(idx, &options)?;
|
||||
extract_partition(&disc, partition.as_mut(), &output_dir, is_wii, args.quiet)?;
|
||||
}
|
||||
} else {
|
||||
let mut partition =
|
||||
disc.open_partition_kind_with_options(PartitionKind::Data, &partition_options)?;
|
||||
let mut partition = disc.open_partition_kind(PartitionKind::Data, &options)?;
|
||||
extract_partition(&disc, partition.as_mut(), &output_dir, is_wii, args.quiet)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn extract_partition(
|
||||
disc: &Disc,
|
||||
partition: &mut dyn PartitionBase,
|
||||
disc: &DiscReader,
|
||||
partition: &mut dyn PartitionReader,
|
||||
out_dir: &Path,
|
||||
is_wii: bool,
|
||||
quiet: bool,
|
||||
) -> nod::Result<()> {
|
||||
let meta = partition.meta()?;
|
||||
extract_sys_files(disc, meta.as_ref(), out_dir, quiet)?;
|
||||
extract_sys_files(disc, &meta, out_dir, quiet)?;
|
||||
|
||||
// Extract FST
|
||||
let files_dir = out_dir.join("files");
|
||||
fs::create_dir_all(&files_dir)
|
||||
.with_context(|| format!("Creating directory {}", display(&files_dir)))?;
|
||||
.with_context(|| format!("Creating directory {}", path_display(&files_dir)))?;
|
||||
|
||||
let fst = Fst::new(&meta.raw_fst)?;
|
||||
let mut path_segments = Vec::<(Cow<str>, usize)>::new();
|
||||
for (idx, node, name) in fst.iter() {
|
||||
// Remove ended path segments
|
||||
let mut new_size = 0;
|
||||
for (_, end) in path_segments.iter() {
|
||||
if *end == idx {
|
||||
break;
|
||||
}
|
||||
new_size += 1;
|
||||
}
|
||||
path_segments.truncate(new_size);
|
||||
|
||||
// Add the new path segment
|
||||
let end = if node.is_dir() { node.length() as usize } else { idx + 1 };
|
||||
path_segments.push((name?, end));
|
||||
|
||||
let path = path_segments.iter().map(|(name, _)| name.as_ref()).join("/");
|
||||
for (_, node, path) in fst.iter() {
|
||||
if node.is_dir() {
|
||||
fs::create_dir_all(files_dir.join(&path))
|
||||
.with_context(|| format!("Creating directory {}", path))?;
|
||||
|
@ -135,14 +115,14 @@ fn extract_partition(
|
|||
}
|
||||
|
||||
fn extract_sys_files(
|
||||
disc: &Disc,
|
||||
disc: &DiscReader,
|
||||
data: &PartitionMeta,
|
||||
out_dir: &Path,
|
||||
quiet: bool,
|
||||
) -> nod::Result<()> {
|
||||
let sys_dir = out_dir.join("sys");
|
||||
fs::create_dir_all(&sys_dir)
|
||||
.with_context(|| format!("Creating directory {}", display(&sys_dir)))?;
|
||||
.with_context(|| format!("Creating directory {}", path_display(&sys_dir)))?;
|
||||
extract_file(data.raw_boot.as_ref(), &sys_dir.join("boot.bin"), quiet)?;
|
||||
extract_file(data.raw_bi2.as_ref(), &sys_dir.join("bi2.bin"), quiet)?;
|
||||
extract_file(data.raw_apploader.as_ref(), &sys_dir.join("apploader.img"), quiet)?;
|
||||
|
@ -154,7 +134,7 @@ fn extract_sys_files(
|
|||
if disc_header.is_wii() {
|
||||
let disc_dir = out_dir.join("disc");
|
||||
fs::create_dir_all(&disc_dir)
|
||||
.with_context(|| format!("Creating directory {}", display(&disc_dir)))?;
|
||||
.with_context(|| format!("Creating directory {}", path_display(&disc_dir)))?;
|
||||
extract_file(&disc_header.as_bytes()[..0x100], &disc_dir.join("header.bin"), quiet)?;
|
||||
if let Some(region) = disc.region() {
|
||||
extract_file(region, &disc_dir.join("region.bin"), quiet)?;
|
||||
|
@ -179,17 +159,18 @@ fn extract_file(bytes: &[u8], out_path: &Path, quiet: bool) -> nod::Result<()> {
|
|||
if !quiet {
|
||||
println!(
|
||||
"Extracting {} (size: {})",
|
||||
display(out_path),
|
||||
path_display(out_path),
|
||||
Size::from_bytes(bytes.len()).format().with_base(Base::Base10)
|
||||
);
|
||||
}
|
||||
fs::write(out_path, bytes).with_context(|| format!("Writing file {}", display(out_path)))?;
|
||||
fs::write(out_path, bytes)
|
||||
.with_context(|| format!("Writing file {}", path_display(out_path)))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn extract_node(
|
||||
node: Node,
|
||||
partition: &mut dyn PartitionBase,
|
||||
partition: &mut dyn PartitionReader,
|
||||
base_path: &Path,
|
||||
name: &str,
|
||||
is_wii: bool,
|
||||
|
@ -199,12 +180,12 @@ fn extract_node(
|
|||
if !quiet {
|
||||
println!(
|
||||
"Extracting {} (size: {})",
|
||||
display(&file_path),
|
||||
path_display(&file_path),
|
||||
Size::from_bytes(node.length()).format().with_base(Base::Base10)
|
||||
);
|
||||
}
|
||||
let mut file = File::create(&file_path)
|
||||
.with_context(|| format!("Creating file {}", display(&file_path)))?;
|
||||
.with_context(|| format!("Creating file {}", path_display(&file_path)))?;
|
||||
let mut r = partition.open_file(node).with_context(|| {
|
||||
format!(
|
||||
"Opening file {} on disc for reading (offset {}, size {})",
|
||||
|
@ -214,15 +195,17 @@ fn extract_node(
|
|||
)
|
||||
})?;
|
||||
loop {
|
||||
let buf =
|
||||
r.fill_buf().with_context(|| format!("Extracting file {}", display(&file_path)))?;
|
||||
let buf = r
|
||||
.fill_buf()
|
||||
.with_context(|| format!("Extracting file {}", path_display(&file_path)))?;
|
||||
let len = buf.len();
|
||||
if len == 0 {
|
||||
break;
|
||||
}
|
||||
file.write_all(buf).with_context(|| format!("Writing file {}", display(&file_path)))?;
|
||||
file.write_all(buf)
|
||||
.with_context(|| format!("Writing file {}", path_display(&file_path)))?;
|
||||
r.consume(len);
|
||||
}
|
||||
file.flush().with_context(|| format!("Flushing file {}", display(&file_path)))?;
|
||||
file.flush().with_context(|| format!("Flushing file {}", path_display(&file_path)))?;
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -0,0 +1,771 @@
|
|||
use std::{
|
||||
fs,
|
||||
fs::File,
|
||||
io,
|
||||
io::{BufRead, Read, Seek, SeekFrom, Write},
|
||||
path::{Path, PathBuf},
|
||||
str::from_utf8,
|
||||
time::Instant,
|
||||
};
|
||||
|
||||
use argp::FromArgs;
|
||||
use nod::{
|
||||
build::gc::{FileCallback, FileInfo, GCPartitionBuilder, PartitionOverrides},
|
||||
common::PartitionKind,
|
||||
disc::{
|
||||
fst::Fst, DiscHeader, PartitionHeader, BI2_SIZE, BOOT_SIZE, MINI_DVD_SIZE, SECTOR_SIZE,
|
||||
},
|
||||
read::{
|
||||
DiscOptions, DiscReader, PartitionEncryption, PartitionMeta, PartitionOptions,
|
||||
PartitionReader,
|
||||
},
|
||||
util::lfg::LaggedFibonacci,
|
||||
write::{DiscWriter, FormatOptions, ProcessOptions},
|
||||
ResultContext,
|
||||
};
|
||||
use tracing::{debug, error, info, warn};
|
||||
use zerocopy::{FromBytes, FromZeros};
|
||||
|
||||
use crate::util::{array_ref, redump, shared::convert_and_verify};
|
||||
|
||||
#[derive(FromArgs, Debug)]
|
||||
/// Generates a disc image.
|
||||
#[argp(subcommand, name = "gen")]
|
||||
pub struct Args {
|
||||
#[argp(positional)]
|
||||
/// Path to extracted disc image
|
||||
dir: PathBuf,
|
||||
#[argp(positional)]
|
||||
/// Output ISO file
|
||||
out: PathBuf,
|
||||
}
|
||||
|
||||
#[derive(FromArgs, Debug)]
|
||||
/// Test disc image generation.
|
||||
#[argp(subcommand, name = "gentest")]
|
||||
pub struct TestArgs {
|
||||
#[argp(positional)]
|
||||
/// Path to original disc images
|
||||
inputs: Vec<PathBuf>,
|
||||
#[argp(option, short = 'o')]
|
||||
/// Output ISO file
|
||||
output: Option<PathBuf>,
|
||||
#[argp(option, short = 't')]
|
||||
/// Output original ISO for comparison
|
||||
test_output: Option<PathBuf>,
|
||||
}
|
||||
|
||||
fn read_fixed<const N: usize>(path: &Path) -> nod::Result<Box<[u8; N]>> {
|
||||
let mut buf = <[u8; N]>::new_box_zeroed()?;
|
||||
File::open(path)
|
||||
.with_context(|| format!("Failed to open {}", path.display()))?
|
||||
.read_exact(buf.as_mut())
|
||||
.with_context(|| format!("Failed to read {}", path.display()))?;
|
||||
Ok(buf)
|
||||
}
|
||||
|
||||
fn read_all(path: &Path) -> nod::Result<Box<[u8]>> {
|
||||
let mut buf = Vec::new();
|
||||
File::open(path)
|
||||
.with_context(|| format!("Failed to open {}", path.display()))?
|
||||
.read_to_end(&mut buf)
|
||||
.with_context(|| format!("Failed to read {}", path.display()))?;
|
||||
Ok(buf.into_boxed_slice())
|
||||
}
|
||||
|
||||
struct FileWriteInfo {
|
||||
name: String,
|
||||
offset: u64,
|
||||
length: u64,
|
||||
}
|
||||
|
||||
fn file_size(path: &Path) -> nod::Result<u64> {
|
||||
Ok(fs::metadata(path)
|
||||
.with_context(|| format!("Failed to get metadata for {}", path.display()))?
|
||||
.len())
|
||||
}
|
||||
|
||||
fn check_file_size(path: &Path, expected: u64) -> nod::Result<()> {
|
||||
let actual = file_size(path)?;
|
||||
if actual != expected {
|
||||
return Err(nod::Error::DiscFormat(format!(
|
||||
"File {} has size {}, expected {}",
|
||||
path.display(),
|
||||
actual,
|
||||
expected
|
||||
)));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn run(args: Args) -> nod::Result<()> {
|
||||
let start = Instant::now();
|
||||
|
||||
// Validate file sizes
|
||||
let boot_path = args.dir.join("sys/boot.bin");
|
||||
check_file_size(&boot_path, BOOT_SIZE as u64)?;
|
||||
let bi2_path = args.dir.join("sys/bi2.bin");
|
||||
check_file_size(&bi2_path, BI2_SIZE as u64)?;
|
||||
let apploader_path = args.dir.join("sys/apploader.img");
|
||||
let apploader_size = file_size(&apploader_path)?;
|
||||
let dol_path = args.dir.join("sys/main.dol");
|
||||
let dol_size = file_size(&dol_path)?;
|
||||
|
||||
// Build metadata
|
||||
let mut file_infos = Vec::new();
|
||||
let boot_data: Box<[u8; BOOT_SIZE]> = read_fixed(&boot_path)?;
|
||||
let header = DiscHeader::ref_from_bytes(&boot_data[..size_of::<DiscHeader>()])
|
||||
.expect("Failed to read disc header");
|
||||
let junk_id = get_junk_id(header);
|
||||
let partition_header = PartitionHeader::ref_from_bytes(&boot_data[size_of::<DiscHeader>()..])
|
||||
.expect("Failed to read partition header");
|
||||
let fst_path = args.dir.join("sys/fst.bin");
|
||||
let fst_data = read_all(&fst_path)?;
|
||||
let fst = Fst::new(&fst_data).expect("Failed to parse FST");
|
||||
|
||||
file_infos.push(FileWriteInfo {
|
||||
name: "sys/boot.bin".to_string(),
|
||||
offset: 0,
|
||||
length: BOOT_SIZE as u64,
|
||||
});
|
||||
file_infos.push(FileWriteInfo {
|
||||
name: "sys/bi2.bin".to_string(),
|
||||
offset: BOOT_SIZE as u64,
|
||||
length: BI2_SIZE as u64,
|
||||
});
|
||||
file_infos.push(FileWriteInfo {
|
||||
name: "sys/apploader.img".to_string(),
|
||||
offset: BOOT_SIZE as u64 + BI2_SIZE as u64,
|
||||
length: apploader_size,
|
||||
});
|
||||
let fst_offset = partition_header.fst_offset(false);
|
||||
let dol_offset = partition_header.dol_offset(false);
|
||||
if dol_offset < fst_offset {
|
||||
file_infos.push(FileWriteInfo {
|
||||
name: "sys/main.dol".to_string(),
|
||||
offset: dol_offset,
|
||||
length: dol_size,
|
||||
});
|
||||
} else {
|
||||
let mut found = false;
|
||||
for (_, node, path) in fst.iter() {
|
||||
if !node.is_file() {
|
||||
continue;
|
||||
}
|
||||
let offset = node.offset(false);
|
||||
if offset == dol_offset {
|
||||
info!("Using DOL from FST: {}", path);
|
||||
found = true;
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return Err(nod::Error::DiscFormat("DOL not found in FST".to_string()));
|
||||
}
|
||||
}
|
||||
let fst_size = partition_header.fst_size(false);
|
||||
file_infos.push(FileWriteInfo {
|
||||
name: "sys/fst.bin".to_string(),
|
||||
offset: fst_offset,
|
||||
length: fst_size,
|
||||
});
|
||||
|
||||
// Collect files
|
||||
for (_, node, path) in fst.iter() {
|
||||
let length = node.length() as u64;
|
||||
if node.is_dir() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut file_path = args.dir.join("files");
|
||||
file_path.extend(path.split('/'));
|
||||
let metadata = match fs::metadata(&file_path) {
|
||||
Ok(meta) => meta,
|
||||
Err(e) if e.kind() == io::ErrorKind::NotFound => {
|
||||
warn!("File not found: {}", file_path.display());
|
||||
continue;
|
||||
}
|
||||
Err(e) => {
|
||||
return Err(e)
|
||||
.context(format!("Failed to get metadata for {}", file_path.display()))
|
||||
}
|
||||
};
|
||||
if metadata.is_dir() {
|
||||
return Err(nod::Error::Other(format!("Path {} is a directory", file_path.display())));
|
||||
}
|
||||
if metadata.len() != length {
|
||||
return Err(nod::Error::Other(format!(
|
||||
"File {} has size {}, expected {}",
|
||||
file_path.display(),
|
||||
metadata.len(),
|
||||
length
|
||||
)));
|
||||
}
|
||||
let offset = node.offset(false);
|
||||
file_infos.push(FileWriteInfo {
|
||||
name: file_path.into_os_string().into_string().unwrap(),
|
||||
offset,
|
||||
length,
|
||||
});
|
||||
}
|
||||
sort_files(&mut file_infos)?;
|
||||
|
||||
// Write files
|
||||
let mut out = File::create(&args.out)
|
||||
.with_context(|| format!("Failed to create {}", args.out.display()))?;
|
||||
info!("Writing disc image to {} ({} files)", args.out.display(), file_infos.len());
|
||||
let crc = write_files(
|
||||
&mut out,
|
||||
&file_infos,
|
||||
header,
|
||||
partition_header,
|
||||
junk_id,
|
||||
|out, name| match name {
|
||||
"sys/boot.bin" => out.write_all(boot_data.as_ref()),
|
||||
"sys/fst.bin" => out.write_all(fst_data.as_ref()),
|
||||
path => {
|
||||
let mut in_file = File::open(args.dir.join(path))?;
|
||||
io::copy(&mut in_file, out).map(|_| ())
|
||||
}
|
||||
},
|
||||
)?;
|
||||
out.flush().context("Failed to flush output file")?;
|
||||
info!("Generated disc image in {:?} (CRC32: {:08X})", start.elapsed(), crc);
|
||||
let redump_entry = redump::find_by_crc32(crc);
|
||||
if let Some(entry) = &redump_entry {
|
||||
println!("Redump: {} ✅", entry.name);
|
||||
} else {
|
||||
println!("Redump: Not found ❌");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn align_up<const N: u64>(n: u64) -> u64 { (n + N - 1) & !(N - 1) }
|
||||
|
||||
#[inline]
|
||||
fn gcm_align(n: u64) -> u64 { (n + 31) & !3 }
|
||||
|
||||
/// Files can be located on the inner rim of the disc (closer to the center) or the outer rim
|
||||
/// (closer to the edge). The inner rim is slower to read, so developers often configured certain
|
||||
/// files to be located on the outer rim. This function attempts to find a gap in the file offsets
|
||||
/// between the inner and outer rim, which we need to recreate junk data properly.
|
||||
fn find_file_gap(file_infos: &[FileWriteInfo], fst_end: u64) -> Option<u64> {
|
||||
let mut last_offset = 0;
|
||||
for info in file_infos {
|
||||
if last_offset > fst_end && info.offset > last_offset + SECTOR_SIZE as u64 {
|
||||
debug!("Found file gap at {:X} -> {:X}", last_offset, info.offset);
|
||||
return Some(last_offset);
|
||||
}
|
||||
last_offset = info.offset + info.length;
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn write_files<W>(
|
||||
w: &mut W,
|
||||
file_infos: &[FileWriteInfo],
|
||||
header: &DiscHeader,
|
||||
partition_header: &PartitionHeader,
|
||||
junk_id: Option<[u8; 4]>,
|
||||
mut callback: impl FnMut(&mut HashStream<&mut W>, &str) -> io::Result<()>,
|
||||
) -> nod::Result<u32>
|
||||
where
|
||||
W: Write + ?Sized,
|
||||
{
|
||||
let fst_end = partition_header.fst_offset(false) + partition_header.fst_size(false);
|
||||
let file_gap = find_file_gap(file_infos, fst_end);
|
||||
let mut lfg = LaggedFibonacci::default();
|
||||
let mut out = HashStream::new(w);
|
||||
let mut last_end = 0;
|
||||
for info in file_infos {
|
||||
if let Some(junk_id) = junk_id {
|
||||
let aligned_end = gcm_align(last_end);
|
||||
if info.offset > aligned_end && last_end >= fst_end {
|
||||
// Junk data is aligned to 4 bytes with a 28 byte padding (aka `(n + 31) & !3`)
|
||||
// but a few cases don't have the 28 byte padding. Namely, the junk data after the
|
||||
// FST, and the junk data in between the inner and outer rim files. This attempts to
|
||||
// determine the correct alignment, but is not 100% accurate.
|
||||
let junk_start =
|
||||
if file_gap == Some(last_end) { align_up::<4>(last_end) } else { aligned_end };
|
||||
debug!("Writing junk data at {:X} -> {:X}", junk_start, info.offset);
|
||||
write_junk_data(
|
||||
&mut lfg,
|
||||
&mut out,
|
||||
junk_id,
|
||||
header.disc_num,
|
||||
junk_start,
|
||||
info.offset,
|
||||
)?;
|
||||
}
|
||||
}
|
||||
debug!(
|
||||
"Writing file {} at {:X} -> {:X}",
|
||||
info.name,
|
||||
info.offset,
|
||||
info.offset + info.length
|
||||
);
|
||||
out.seek(SeekFrom::Start(info.offset))
|
||||
.with_context(|| format!("Seeking to offset {}", info.offset))?;
|
||||
if info.length > 0 {
|
||||
callback(&mut out, &info.name)
|
||||
.with_context(|| format!("Failed to write file {}", info.name))?;
|
||||
let cur = out.stream_position().context("Getting current position")?;
|
||||
if cur != info.offset + info.length {
|
||||
return Err(nod::Error::Other(format!(
|
||||
"Wrote {} bytes, expected {}",
|
||||
cur - info.offset,
|
||||
info.length
|
||||
)));
|
||||
}
|
||||
}
|
||||
last_end = info.offset + info.length;
|
||||
}
|
||||
if let Some(junk_id) = junk_id {
|
||||
let aligned_end = gcm_align(last_end);
|
||||
if aligned_end < MINI_DVD_SIZE && aligned_end >= fst_end {
|
||||
debug!("Writing junk data at {:X} -> {:X}", aligned_end, MINI_DVD_SIZE);
|
||||
write_junk_data(
|
||||
&mut lfg,
|
||||
&mut out,
|
||||
junk_id,
|
||||
header.disc_num,
|
||||
aligned_end,
|
||||
MINI_DVD_SIZE,
|
||||
)?;
|
||||
last_end = MINI_DVD_SIZE;
|
||||
}
|
||||
}
|
||||
out.write_zeroes(MINI_DVD_SIZE - last_end).context("Writing end of file")?;
|
||||
out.flush().context("Flushing output")?;
|
||||
Ok(out.finish())
|
||||
}
|
||||
|
||||
fn write_junk_data<W>(
|
||||
lfg: &mut LaggedFibonacci,
|
||||
out: &mut W,
|
||||
junk_id: [u8; 4],
|
||||
disc_num: u8,
|
||||
pos: u64,
|
||||
end: u64,
|
||||
) -> nod::Result<()>
|
||||
where
|
||||
W: Write + Seek + ?Sized,
|
||||
{
|
||||
out.seek(SeekFrom::Start(pos)).with_context(|| format!("Seeking to offset {}", pos))?;
|
||||
lfg.write_sector_chunked(out, end - pos, junk_id, disc_num, pos)
|
||||
.with_context(|| format!("Failed to write junk data at offset {}", pos))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn run_test(args: TestArgs) -> nod::Result<()> {
|
||||
let mut failed = vec![];
|
||||
for input in args.inputs {
|
||||
match in_memory_test(&input, args.output.as_deref(), args.test_output.as_deref()) {
|
||||
Ok(()) => {}
|
||||
Err(e) => {
|
||||
error!("Failed to generate disc image: {:?}", e);
|
||||
failed.push((input, e));
|
||||
}
|
||||
}
|
||||
}
|
||||
if !failed.is_empty() {
|
||||
error!("Failed to generate disc images:");
|
||||
for (input, e) in failed {
|
||||
error!(" {}: {:?}", input.display(), e);
|
||||
}
|
||||
std::process::exit(1);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Some games (mainly beta and sample discs) have junk data that doesn't match the game ID. This
|
||||
/// function returns the correct game ID to use, if an override is needed.
|
||||
fn get_override_junk_id(header: &DiscHeader) -> Option<[u8; 4]> {
|
||||
match &header.game_id {
|
||||
// Dairantou Smash Brothers DX (Japan) (Taikenban)
|
||||
b"DALJ01" if header.disc_num == 0 && header.disc_version == 0 => Some(*b"DPIJ"),
|
||||
// 2002 FIFA World Cup (Japan) (Jitsuen-you Sample)
|
||||
b"DFIJ13" if header.disc_num == 0 && header.disc_version == 0 => Some(*b"GFIJ"),
|
||||
// Disney's Magical Park (Japan) (Jitsuen-you Sample)
|
||||
b"DMTJ18" if header.disc_num == 0 && header.disc_version == 0 => Some(*b"GMTJ"),
|
||||
// Star Wars - Rogue Squadron II (Japan) (Jitsuen-you Sample)
|
||||
b"DSWJ13" if header.disc_num == 0 && header.disc_version == 0 => Some(*b"GSWJ"),
|
||||
// Homeland (Japan) (Rev 1) [T-En by DOL-Translations v20230606] [i]
|
||||
b"GHEE91" if header.disc_num == 0 && header.disc_version == 1 => Some(*b"GHEJ"),
|
||||
// Kururin Squash! (Japan) [T-En by DOL-Translations v2.0.0]
|
||||
b"GKQE01" if header.disc_num == 0 && header.disc_version == 0 => Some(*b"GKQJ"),
|
||||
// Lupin III - Lost Treasure Under the Sea (Japan) (Disc 1) [T-En by DOL-Translations v0.5.0] [i] [n]
|
||||
b"GL3EE8" if header.disc_num == 0 && header.disc_version == 0 => Some(*b"GL3J"),
|
||||
// Lupin III - Lost Treasure Under the Sea (Japan) (Disc 2) [T-En by DOL-Translations v0.5.0] [i] [n]
|
||||
b"GL3EE8" if header.disc_num == 1 && header.disc_version == 0 => Some(*b"GL3J"),
|
||||
// Taxi 3 - The Game (France) [T-En by DOL-Translations v20230801] [n]
|
||||
b"GXQP41" if header.disc_num == 0 && header.disc_version == 0 => Some(*b"GXQF"),
|
||||
// Donkey Konga 3 - Tabehoudai! Haru Mogitate 50-kyoku (Japan) [T-En by DOL-Translations v0.1.1] [i]
|
||||
b"GY3E01" if header.disc_num == 0 && header.disc_version == 0 => Some(*b"GY3J"),
|
||||
// Need for Speed - Underground (Europe) (Alt)
|
||||
b"PZHP69" if header.disc_num == 0 && header.disc_version == 0 => Some(*b"GNDP"),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn get_junk_id(header: &DiscHeader) -> Option<[u8; 4]> {
|
||||
Some(match get_override_junk_id(header) {
|
||||
Some(id) => {
|
||||
info!("Using override junk ID: {:X?}", from_utf8(&id).unwrap());
|
||||
id
|
||||
}
|
||||
None => *array_ref!(header.game_id, 0, 4),
|
||||
})
|
||||
}
|
||||
|
||||
fn sort_files(files: &mut [FileWriteInfo]) -> nod::Result<()> {
|
||||
files.sort_unstable_by_key(|info| (info.offset, info.length));
|
||||
for i in 1..files.len() {
|
||||
let prev = &files[i - 1];
|
||||
let cur = &files[i];
|
||||
if cur.offset < prev.offset + prev.length {
|
||||
return Err(nod::Error::Other(format!(
|
||||
"File {} ({:#X}-{:#X}) overlaps with {} ({:#X}-{:#X})",
|
||||
cur.name,
|
||||
cur.offset,
|
||||
cur.offset + cur.length,
|
||||
prev.name,
|
||||
prev.offset,
|
||||
prev.offset + prev.length
|
||||
)));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn in_memory_test(
|
||||
path: &Path,
|
||||
output: Option<&Path>,
|
||||
test_output: Option<&Path>,
|
||||
) -> nod::Result<()> {
|
||||
let start = Instant::now();
|
||||
info!("Opening disc image '{}'", path.display());
|
||||
let disc = DiscReader::new(path, &DiscOptions::default())?;
|
||||
info!(
|
||||
"Opened disc image '{}' (Disc {}, Revision {})",
|
||||
disc.header().game_title_str(),
|
||||
disc.header().disc_num + 1,
|
||||
disc.header().disc_version
|
||||
);
|
||||
let Some(orig_crc32) = disc.meta().crc32 else {
|
||||
return Err(nod::Error::Other("CRC32 not found in disc metadata".to_string()));
|
||||
};
|
||||
let mut partition =
|
||||
disc.open_partition_kind(PartitionKind::Data, &PartitionOptions::default())?;
|
||||
let meta = partition.meta()?;
|
||||
|
||||
// Build metadata
|
||||
let mut file_infos = Vec::new();
|
||||
let header = meta.header();
|
||||
let junk_id = get_junk_id(header);
|
||||
let partition_header = meta.partition_header();
|
||||
let fst = meta.fst()?;
|
||||
|
||||
file_infos.push(FileWriteInfo {
|
||||
name: "sys/boot.bin".to_string(),
|
||||
offset: 0,
|
||||
length: BOOT_SIZE as u64,
|
||||
});
|
||||
file_infos.push(FileWriteInfo {
|
||||
name: "sys/bi2.bin".to_string(),
|
||||
offset: BOOT_SIZE as u64,
|
||||
length: BI2_SIZE as u64,
|
||||
});
|
||||
file_infos.push(FileWriteInfo {
|
||||
name: "sys/apploader.img".to_string(),
|
||||
offset: BOOT_SIZE as u64 + BI2_SIZE as u64,
|
||||
length: meta.raw_apploader.len() as u64,
|
||||
});
|
||||
let fst_offset = partition_header.fst_offset(false);
|
||||
let dol_offset = partition_header.dol_offset(false);
|
||||
if dol_offset < fst_offset {
|
||||
file_infos.push(FileWriteInfo {
|
||||
name: "sys/main.dol".to_string(),
|
||||
offset: dol_offset,
|
||||
length: meta.raw_dol.len() as u64,
|
||||
});
|
||||
} else {
|
||||
let mut found = false;
|
||||
for (_, node, name) in fst.iter() {
|
||||
if !node.is_file() {
|
||||
continue;
|
||||
}
|
||||
let offset = node.offset(false);
|
||||
if offset == dol_offset {
|
||||
info!("Using DOL from FST: {}", name);
|
||||
found = true;
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return Err(nod::Error::Other("DOL not found in FST".to_string()));
|
||||
}
|
||||
}
|
||||
let fst_size = partition_header.fst_size(false);
|
||||
file_infos.push(FileWriteInfo {
|
||||
name: "sys/fst.bin".to_string(),
|
||||
offset: fst_offset,
|
||||
length: fst_size,
|
||||
});
|
||||
|
||||
// Collect files
|
||||
let mut builder = GCPartitionBuilder::new(false, PartitionOverrides::default());
|
||||
for (idx, node, path) in fst.iter() {
|
||||
let offset = node.offset(false);
|
||||
let length = node.length() as u64;
|
||||
if node.is_dir() {
|
||||
if length as usize == idx + 1 {
|
||||
println!("Empty directory: {}", path);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(junk_id) = junk_id {
|
||||
// Some games have junk data in place of files that were removed from the disc layout.
|
||||
// This is a naive check to skip these files in our disc layout so that the junk data
|
||||
// alignment is correct. This misses some cases where the junk data starts in the middle
|
||||
// of a file, but handling those cases would require a more complex solution.
|
||||
if length > 4
|
||||
&& check_junk_data(partition.as_mut(), offset, length, junk_id, header.disc_num)?
|
||||
{
|
||||
warn!("Skipping junk data file: {} (size {})", path, length);
|
||||
builder.add_junk_file(path);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
builder.add_file(FileInfo {
|
||||
name: path,
|
||||
size: length,
|
||||
offset: Some(offset),
|
||||
alignment: None,
|
||||
})?;
|
||||
}
|
||||
|
||||
// Write files
|
||||
info!("Writing disc image with {} files", file_infos.len());
|
||||
for file in &file_infos {
|
||||
builder.add_file(FileInfo {
|
||||
name: file.name.clone(),
|
||||
size: file.length,
|
||||
offset: Some(file.offset),
|
||||
alignment: None,
|
||||
})?;
|
||||
}
|
||||
let writer = builder.build(|out: &mut dyn Write, name: &str| match name {
|
||||
"sys/boot.bin" => out.write_all(meta.raw_boot.as_ref()),
|
||||
"sys/bi2.bin" => out.write_all(meta.raw_bi2.as_ref()),
|
||||
"sys/fst.bin" => out.write_all(meta.raw_fst.as_ref()),
|
||||
"sys/apploader.img" => out.write_all(meta.raw_apploader.as_ref()),
|
||||
"sys/main.dol" => out.write_all(meta.raw_dol.as_ref()),
|
||||
path => {
|
||||
let Some((_, node)) = fst.find(path) else {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::NotFound,
|
||||
format!("File not found: {}", path),
|
||||
));
|
||||
};
|
||||
let mut file = partition.open_file(node)?;
|
||||
buf_copy(&mut file, out)?;
|
||||
Ok(())
|
||||
}
|
||||
})?;
|
||||
let disc_stream = writer.into_stream(PartitionFileReader { partition, meta })?;
|
||||
let disc_reader = DiscReader::new_stream(disc_stream, &DiscOptions::default())?;
|
||||
let disc_writer = DiscWriter::new(disc_reader, &FormatOptions::default())?;
|
||||
let process_options = ProcessOptions { digest_crc32: true, ..Default::default() };
|
||||
let finalization = if let Some(output) = output {
|
||||
let mut out = File::create(output)
|
||||
.with_context(|| format!("Failed to create {}", output.display()))?;
|
||||
let finalization =
|
||||
disc_writer.process(|data, _, _| out.write_all(data.as_ref()), &process_options)?;
|
||||
out.flush().context("Failed to flush output file")?;
|
||||
finalization
|
||||
} else {
|
||||
disc_writer.process(|_, _, _| Ok(()), &process_options)?
|
||||
};
|
||||
let crc = finalization.crc32.unwrap();
|
||||
info!("Generated disc image in {:?} (CRC32: {:08X})", start.elapsed(), crc);
|
||||
if crc != orig_crc32 {
|
||||
if let Some(test_output) = test_output {
|
||||
let open_options = DiscOptions {
|
||||
partition_encryption: PartitionEncryption::Original,
|
||||
preloader_threads: 4,
|
||||
};
|
||||
convert_and_verify(
|
||||
path,
|
||||
Some(test_output),
|
||||
false,
|
||||
&open_options,
|
||||
&FormatOptions::default(),
|
||||
)?;
|
||||
}
|
||||
return Err(nod::Error::Other(format!(
|
||||
"CRC32 mismatch: {:08X} != {:08X}",
|
||||
crc, orig_crc32
|
||||
)));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct PartitionFileReader {
|
||||
partition: Box<dyn PartitionReader>,
|
||||
meta: PartitionMeta,
|
||||
}
|
||||
|
||||
impl FileCallback for PartitionFileReader {
|
||||
fn read_file(&mut self, out: &mut [u8], name: &str, offset: u64) -> io::Result<()> {
|
||||
let data: &[u8] = match name {
|
||||
"sys/boot.bin" => self.meta.raw_boot.as_ref(),
|
||||
"sys/bi2.bin" => self.meta.raw_bi2.as_ref(),
|
||||
"sys/fst.bin" => self.meta.raw_fst.as_ref(),
|
||||
"sys/apploader.img" => self.meta.raw_apploader.as_ref(),
|
||||
"sys/main.dol" => self.meta.raw_dol.as_ref(),
|
||||
path => {
|
||||
let fst = self.meta.fst().map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
|
||||
let Some((_, node)) = fst.find(path) else {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::NotFound,
|
||||
format!("File not found: {}", path),
|
||||
));
|
||||
};
|
||||
let mut file = self.partition.open_file(node)?;
|
||||
file.seek(SeekFrom::Start(offset))?;
|
||||
file.read_exact(out)?;
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
let offset = offset as usize;
|
||||
let len = out.len().min(data.len() - offset);
|
||||
out[..len].copy_from_slice(&data[offset..offset + len]);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Some disc files still exist in the FST, but were removed from the disc layout. These files had
|
||||
/// junk data written in their place, since the disc creator did not know about them. To match the
|
||||
/// original disc, we need to check for these files and remove them from our disc layout as well.
|
||||
/// This ensures that the junk data alignment is correct.
|
||||
fn check_junk_data(
|
||||
partition: &mut dyn PartitionReader,
|
||||
offset: u64,
|
||||
len: u64,
|
||||
junk_id: [u8; 4],
|
||||
disc_num: u8,
|
||||
) -> nod::Result<bool> {
|
||||
if len == 0 {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
partition
|
||||
.seek(SeekFrom::Start(offset))
|
||||
.with_context(|| format!("Seeking to offset {}", offset))?;
|
||||
let mut lfg = LaggedFibonacci::default();
|
||||
let mut pos = offset;
|
||||
let mut remaining = len;
|
||||
while remaining > 0 {
|
||||
let file_buf = partition
|
||||
.fill_buf()
|
||||
.with_context(|| format!("Failed to read disc file at offset {}", offset))?;
|
||||
let read_len = (file_buf.len() as u64).min(remaining) as usize;
|
||||
if !lfg.check_sector_chunked(&file_buf[..read_len], junk_id, disc_num, pos) {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
pos += read_len as u64;
|
||||
remaining -= read_len as u64;
|
||||
partition.consume(read_len);
|
||||
}
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
pub struct HashStream<W> {
|
||||
inner: W,
|
||||
hasher: crc32fast::Hasher,
|
||||
position: u64,
|
||||
}
|
||||
|
||||
impl<W> HashStream<W> {
|
||||
pub fn new(inner: W) -> Self { Self { inner, hasher: Default::default(), position: 0 } }
|
||||
|
||||
pub fn finish(self) -> u32 { self.hasher.finalize() }
|
||||
}
|
||||
|
||||
impl<W> HashStream<W>
|
||||
where W: Write
|
||||
{
|
||||
pub fn write_zeroes(&mut self, mut len: u64) -> io::Result<()> {
|
||||
while len > 0 {
|
||||
let write_len = len.min(SECTOR_SIZE as u64) as usize;
|
||||
self.write_all(&ZERO_SECTOR[..write_len])?;
|
||||
len -= write_len as u64;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<W> Write for HashStream<W>
|
||||
where W: Write
|
||||
{
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
self.hasher.update(buf);
|
||||
self.position += buf.len() as u64;
|
||||
self.inner.write(buf)
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> io::Result<()> { self.inner.flush() }
|
||||
}
|
||||
|
||||
const ZERO_SECTOR: [u8; SECTOR_SIZE] = [0; SECTOR_SIZE];
|
||||
|
||||
impl<W> Seek for HashStream<W>
|
||||
where W: Write
|
||||
{
|
||||
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
|
||||
let new_position = match pos {
|
||||
SeekFrom::Start(v) => v,
|
||||
SeekFrom::Current(v) => self.position.saturating_add_signed(v),
|
||||
SeekFrom::End(_) => {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Unsupported,
|
||||
"HashStream: SeekFrom::End is not supported".to_string(),
|
||||
));
|
||||
}
|
||||
};
|
||||
if new_position < self.position {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"HashStream: Cannot seek backwards".to_string(),
|
||||
));
|
||||
}
|
||||
self.write_zeroes(new_position - self.position)?;
|
||||
Ok(new_position)
|
||||
}
|
||||
|
||||
fn stream_position(&mut self) -> io::Result<u64> { Ok(self.position) }
|
||||
}
|
||||
|
||||
/// Copies from a buffered reader to a writer without extra allocations.
|
||||
fn buf_copy<R, W>(reader: &mut R, writer: &mut W) -> io::Result<u64>
|
||||
where
|
||||
R: BufRead + ?Sized,
|
||||
W: Write + ?Sized,
|
||||
{
|
||||
let mut copied = 0;
|
||||
loop {
|
||||
let buf = reader.fill_buf()?;
|
||||
let len = buf.len();
|
||||
if len == 0 {
|
||||
break;
|
||||
}
|
||||
writer.write_all(buf)?;
|
||||
reader.consume(len);
|
||||
copied += len as u64;
|
||||
}
|
||||
Ok(copied)
|
||||
}
|
|
@ -1,10 +1,14 @@
|
|||
use std::path::{Path, PathBuf};
|
||||
|
||||
use argp::FromArgs;
|
||||
use nod::{Disc, SECTOR_SIZE};
|
||||
use nod::{
|
||||
disc::SECTOR_SIZE,
|
||||
read::{DiscOptions, DiscReader, PartitionOptions},
|
||||
};
|
||||
use size::Size;
|
||||
use tracing::info;
|
||||
|
||||
use crate::util::{display, shared::print_header};
|
||||
use crate::util::{path_display, shared::print_header};
|
||||
|
||||
#[derive(FromArgs, Debug)]
|
||||
/// Displays information about disc images.
|
||||
|
@ -23,15 +27,15 @@ pub fn run(args: Args) -> nod::Result<()> {
|
|||
}
|
||||
|
||||
fn info_file(path: &Path) -> nod::Result<()> {
|
||||
log::info!("Loading {}", display(path));
|
||||
let disc = Disc::new(path)?;
|
||||
info!("Loading {}", path_display(path));
|
||||
let disc = DiscReader::new(path, &DiscOptions::default())?;
|
||||
let header = disc.header();
|
||||
let meta = disc.meta();
|
||||
print_header(header, &meta);
|
||||
|
||||
if header.is_wii() {
|
||||
for (idx, info) in disc.partitions().iter().enumerate() {
|
||||
let mut partition = disc.open_partition(idx)?;
|
||||
let mut partition = disc.open_partition(idx, &PartitionOptions::default())?;
|
||||
let meta = partition.meta()?;
|
||||
|
||||
println!();
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
pub mod convert;
|
||||
pub mod dat;
|
||||
pub mod extract;
|
||||
pub mod gen;
|
||||
pub mod info;
|
||||
pub mod verify;
|
||||
|
|
|
@ -1,7 +1,10 @@
|
|||
use std::path::PathBuf;
|
||||
|
||||
use argp::FromArgs;
|
||||
use nod::{OpenOptions, PartitionEncryptionMode};
|
||||
use nod::{
|
||||
read::{DiscOptions, PartitionEncryption},
|
||||
write::FormatOptions,
|
||||
};
|
||||
|
||||
use crate::util::{redump, shared::convert_and_verify};
|
||||
|
||||
|
@ -18,6 +21,12 @@ pub struct Args {
|
|||
#[argp(option, short = 'd')]
|
||||
/// path to DAT file(s) for verification (optional)
|
||||
dat: Vec<PathBuf>,
|
||||
#[argp(switch)]
|
||||
/// decrypt Wii partition data
|
||||
decrypt: bool,
|
||||
#[argp(switch)]
|
||||
/// encrypt Wii partition data
|
||||
encrypt: bool,
|
||||
}
|
||||
|
||||
pub fn run(args: Args) -> nod::Result<()> {
|
||||
|
@ -25,9 +34,21 @@ pub fn run(args: Args) -> nod::Result<()> {
|
|||
println!("Loading dat files...");
|
||||
redump::load_dats(args.dat.iter().map(PathBuf::as_ref))?;
|
||||
}
|
||||
let options = OpenOptions { partition_encryption: PartitionEncryptionMode::Original };
|
||||
let cpus = num_cpus::get();
|
||||
let options = DiscOptions {
|
||||
partition_encryption: match (args.decrypt, args.encrypt) {
|
||||
(true, false) => PartitionEncryption::ForceDecrypted,
|
||||
(false, true) => PartitionEncryption::ForceEncrypted,
|
||||
(false, false) => PartitionEncryption::Original,
|
||||
(true, true) => {
|
||||
return Err(nod::Error::Other("Both --decrypt and --encrypt specified".to_string()))
|
||||
}
|
||||
},
|
||||
preloader_threads: 4.min(cpus),
|
||||
};
|
||||
let format_options = FormatOptions::default();
|
||||
for file in &args.file {
|
||||
convert_and_verify(file, None, args.md5, &options)?;
|
||||
convert_and_verify(file, None, args.md5, &options, &format_options)?;
|
||||
println!();
|
||||
}
|
||||
Ok(())
|
||||
|
|
|
@ -9,19 +9,23 @@ pub use nod;
|
|||
#[derive(FromArgs, Debug)]
|
||||
#[argp(subcommand)]
|
||||
pub enum SubCommand {
|
||||
Dat(cmd::dat::Args),
|
||||
Info(cmd::info::Args),
|
||||
Extract(cmd::extract::Args),
|
||||
Convert(cmd::convert::Args),
|
||||
Dat(cmd::dat::Args),
|
||||
Extract(cmd::extract::Args),
|
||||
// Gen(cmd::gen::Args),
|
||||
GenTest(cmd::gen::TestArgs),
|
||||
Info(cmd::info::Args),
|
||||
Verify(cmd::verify::Args),
|
||||
}
|
||||
|
||||
pub fn run(command: SubCommand) -> nod::Result<()> {
|
||||
match command {
|
||||
SubCommand::Dat(c_args) => cmd::dat::run(c_args),
|
||||
SubCommand::Info(c_args) => cmd::info::run(c_args),
|
||||
SubCommand::Convert(c_args) => cmd::convert::run(c_args),
|
||||
SubCommand::Dat(c_args) => cmd::dat::run(c_args),
|
||||
SubCommand::Extract(c_args) => cmd::extract::run(c_args),
|
||||
// SubCommand::Gen(c_args) => cmd::gen::run(c_args),
|
||||
SubCommand::GenTest(c_args) => cmd::gen::run_test(c_args),
|
||||
SubCommand::Info(c_args) => cmd::info::run(c_args),
|
||||
SubCommand::Verify(c_args) => cmd::verify::run(c_args),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -12,8 +12,6 @@ use argp::{FromArgValue, FromArgs};
|
|||
use enable_ansi_support::enable_ansi_support;
|
||||
use nodtool::{run, SubCommand};
|
||||
use supports_color::Stream;
|
||||
use tracing::level_filters::LevelFilter;
|
||||
use tracing_subscriber::EnvFilter;
|
||||
|
||||
#[derive(FromArgs, Debug)]
|
||||
/// Tool for reading GameCube and Wii disc images.
|
||||
|
@ -99,27 +97,43 @@ fn main() {
|
|||
supports_color::on(Stream::Stdout).is_some_and(|c| c.has_basic)
|
||||
};
|
||||
|
||||
let format =
|
||||
tracing_subscriber::fmt::format().with_ansi(use_colors).with_target(false).without_time();
|
||||
let builder = tracing_subscriber::fmt().event_format(format);
|
||||
if let Some(level) = args.log_level {
|
||||
builder
|
||||
.with_max_level(match level {
|
||||
LogLevel::Error => LevelFilter::ERROR,
|
||||
LogLevel::Warn => LevelFilter::WARN,
|
||||
LogLevel::Info => LevelFilter::INFO,
|
||||
LogLevel::Debug => LevelFilter::DEBUG,
|
||||
LogLevel::Trace => LevelFilter::TRACE,
|
||||
})
|
||||
.init();
|
||||
} else {
|
||||
builder
|
||||
.with_env_filter(
|
||||
EnvFilter::builder()
|
||||
.with_default_directive(LevelFilter::INFO.into())
|
||||
.from_env_lossy(),
|
||||
)
|
||||
.init();
|
||||
#[cfg(feature = "tracy")]
|
||||
{
|
||||
use tracing_subscriber::layer::SubscriberExt;
|
||||
tracing::subscriber::set_global_default(
|
||||
tracing_subscriber::registry().with(tracing_tracy::TracyLayer::default()),
|
||||
)
|
||||
.expect("setup tracy layer");
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "tracy"))]
|
||||
{
|
||||
use tracing::level_filters::LevelFilter;
|
||||
use tracing_subscriber::EnvFilter;
|
||||
let format = tracing_subscriber::fmt::format()
|
||||
.with_ansi(use_colors)
|
||||
.with_target(false)
|
||||
.without_time();
|
||||
let builder = tracing_subscriber::fmt().event_format(format);
|
||||
if let Some(level) = args.log_level {
|
||||
builder
|
||||
.with_max_level(match level {
|
||||
LogLevel::Error => LevelFilter::ERROR,
|
||||
LogLevel::Warn => LevelFilter::WARN,
|
||||
LogLevel::Info => LevelFilter::INFO,
|
||||
LogLevel::Debug => LevelFilter::DEBUG,
|
||||
LogLevel::Trace => LevelFilter::TRACE,
|
||||
})
|
||||
.init();
|
||||
} else {
|
||||
builder
|
||||
.with_env_filter(
|
||||
EnvFilter::builder()
|
||||
.with_default_directive(LevelFilter::INFO.into())
|
||||
.from_env_lossy(),
|
||||
)
|
||||
.init();
|
||||
}
|
||||
}
|
||||
|
||||
let mut result = Ok(());
|
||||
|
|
|
@ -1,29 +1,4 @@
|
|||
use std::{
|
||||
fmt,
|
||||
sync::{
|
||||
mpsc::{sync_channel, SyncSender},
|
||||
Arc,
|
||||
},
|
||||
thread,
|
||||
thread::JoinHandle,
|
||||
};
|
||||
|
||||
use digest::{Digest, Output};
|
||||
|
||||
pub type DigestThread = (SyncSender<Arc<[u8]>>, JoinHandle<DigestResult>);
|
||||
|
||||
pub fn digest_thread<H>() -> DigestThread
|
||||
where H: Hasher + Send + 'static {
|
||||
let (tx, rx) = sync_channel::<Arc<[u8]>>(1);
|
||||
let handle = thread::spawn(move || {
|
||||
let mut hasher = H::new();
|
||||
while let Ok(data) = rx.recv() {
|
||||
hasher.update(data.as_ref());
|
||||
}
|
||||
hasher.finalize()
|
||||
});
|
||||
(tx, handle)
|
||||
}
|
||||
use std::fmt;
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub enum DigestResult {
|
||||
|
@ -48,49 +23,9 @@ impl fmt::Display for DigestResult {
|
|||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
DigestResult::Crc32(crc) => write!(f, "{:08x}", crc),
|
||||
DigestResult::Md5(md5) => write!(f, "{:032x}", <Output<md5::Md5>>::from(*md5)),
|
||||
DigestResult::Sha1(sha1) => write!(f, "{:040x}", <Output<sha1::Sha1>>::from(*sha1)),
|
||||
DigestResult::Md5(md5) => write!(f, "{}", hex::encode(md5)),
|
||||
DigestResult::Sha1(sha1) => write!(f, "{}", hex::encode(sha1)),
|
||||
DigestResult::Xxh64(xxh64) => write!(f, "{:016x}", xxh64),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub trait Hasher {
|
||||
fn new() -> Self;
|
||||
fn finalize(self) -> DigestResult;
|
||||
fn update(&mut self, data: &[u8]);
|
||||
}
|
||||
|
||||
impl Hasher for md5::Md5 {
|
||||
fn new() -> Self { Digest::new() }
|
||||
|
||||
fn finalize(self) -> DigestResult { DigestResult::Md5(Digest::finalize(self).into()) }
|
||||
|
||||
fn update(&mut self, data: &[u8]) { Digest::update(self, data) }
|
||||
}
|
||||
|
||||
impl Hasher for sha1::Sha1 {
|
||||
fn new() -> Self { Digest::new() }
|
||||
|
||||
fn finalize(self) -> DigestResult { DigestResult::Sha1(Digest::finalize(self).into()) }
|
||||
|
||||
fn update(&mut self, data: &[u8]) { Digest::update(self, data) }
|
||||
}
|
||||
|
||||
impl Hasher for crc32fast::Hasher {
|
||||
fn new() -> Self { crc32fast::Hasher::new() }
|
||||
|
||||
fn finalize(self) -> DigestResult { DigestResult::Crc32(crc32fast::Hasher::finalize(self)) }
|
||||
|
||||
fn update(&mut self, data: &[u8]) { crc32fast::Hasher::update(self, data) }
|
||||
}
|
||||
|
||||
impl Hasher for xxhash_rust::xxh64::Xxh64 {
|
||||
fn new() -> Self { xxhash_rust::xxh64::Xxh64::new(0) }
|
||||
|
||||
fn finalize(self) -> DigestResult {
|
||||
DigestResult::Xxh64(xxhash_rust::xxh64::Xxh64::digest(&self))
|
||||
}
|
||||
|
||||
fn update(&mut self, data: &[u8]) { xxhash_rust::xxh64::Xxh64::update(self, data) }
|
||||
}
|
||||
|
|
|
@ -8,7 +8,7 @@ use std::{
|
|||
path::{Path, MAIN_SEPARATOR},
|
||||
};
|
||||
|
||||
pub fn display(path: &Path) -> PathDisplay { PathDisplay { path } }
|
||||
pub fn path_display(path: &Path) -> PathDisplay { PathDisplay { path } }
|
||||
|
||||
pub struct PathDisplay<'a> {
|
||||
path: &'a Path,
|
||||
|
@ -19,7 +19,7 @@ impl fmt::Display for PathDisplay<'_> {
|
|||
let mut first = true;
|
||||
for segment in self.path.iter() {
|
||||
let segment_str = segment.to_string_lossy();
|
||||
if segment_str == "." {
|
||||
if segment_str == "/" || segment_str == "." {
|
||||
continue;
|
||||
}
|
||||
if first {
|
||||
|
@ -39,3 +39,15 @@ pub fn has_extension(filename: &Path, extension: &str) -> bool {
|
|||
None => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a fixed-size array reference from a slice.
|
||||
macro_rules! array_ref {
|
||||
($slice:expr, $offset:expr, $size:expr) => {{
|
||||
#[inline(always)]
|
||||
fn to_array<T>(slice: &[T]) -> &[T; $size] {
|
||||
unsafe { &*(slice.as_ptr() as *const [_; $size]) }
|
||||
}
|
||||
to_array(&$slice[$offset..$offset + $size])
|
||||
}};
|
||||
}
|
||||
pub(crate) use array_ref;
|
||||
|
|
|
@ -8,10 +8,12 @@ use std::{
|
|||
};
|
||||
|
||||
use hex::deserialize as deserialize_hex;
|
||||
use nod::{array_ref, Result};
|
||||
use nod::Result;
|
||||
use serde::Deserialize;
|
||||
use zerocopy::{FromBytes, FromZeros, Immutable, IntoBytes, KnownLayout};
|
||||
|
||||
use crate::util::array_ref;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct GameResult<'a> {
|
||||
pub name: &'a str,
|
||||
|
|
|
@ -1,22 +1,21 @@
|
|||
use std::{
|
||||
cmp::min,
|
||||
fmt,
|
||||
fs::File,
|
||||
io::{Read, Write},
|
||||
io::{Seek, SeekFrom, Write},
|
||||
path::Path,
|
||||
sync::{mpsc::sync_channel, Arc},
|
||||
thread,
|
||||
};
|
||||
|
||||
use indicatif::{ProgressBar, ProgressState, ProgressStyle};
|
||||
use nod::{Compression, Disc, DiscHeader, DiscMeta, OpenOptions, Result, ResultContext};
|
||||
use size::Size;
|
||||
use zerocopy::FromZeros;
|
||||
|
||||
use crate::util::{
|
||||
digest::{digest_thread, DigestResult},
|
||||
display, redump,
|
||||
use nod::{
|
||||
common::Compression,
|
||||
disc::DiscHeader,
|
||||
read::{DiscMeta, DiscOptions, DiscReader, PartitionEncryption},
|
||||
write::{DiscWriter, DiscWriterWeight, FormatOptions, ProcessOptions},
|
||||
Result, ResultContext,
|
||||
};
|
||||
use size::Size;
|
||||
|
||||
use crate::util::{digest::DigestResult, path_display, redump};
|
||||
|
||||
pub fn print_header(header: &DiscHeader, meta: &DiscMeta) {
|
||||
println!("Format: {}", meta.format);
|
||||
|
@ -29,52 +28,71 @@ pub fn print_header(header: &DiscHeader, meta: &DiscMeta) {
|
|||
println!("Lossless: {}", meta.lossless);
|
||||
println!(
|
||||
"Verification data: {}",
|
||||
meta.crc32.is_some()
|
||||
|| meta.md5.is_some()
|
||||
|| meta.sha1.is_some()
|
||||
|| meta.xxhash64.is_some()
|
||||
meta.crc32.is_some() || meta.md5.is_some() || meta.sha1.is_some() || meta.xxh64.is_some()
|
||||
);
|
||||
println!();
|
||||
println!("Title: {}", header.game_title_str());
|
||||
println!("Game ID: {}", header.game_id_str());
|
||||
println!("Disc {}, Revision {}", header.disc_num + 1, header.disc_version);
|
||||
if !header.has_partition_hashes() {
|
||||
println!("[!] Disc has no hashes");
|
||||
}
|
||||
if !header.has_partition_encryption() {
|
||||
println!("[!] Disc is not encrypted");
|
||||
}
|
||||
if !header.has_partition_hashes() {
|
||||
println!("[!] Disc has no hashes");
|
||||
}
|
||||
}
|
||||
|
||||
pub fn convert_and_verify(
|
||||
in_file: &Path,
|
||||
out_file: Option<&Path>,
|
||||
md5: bool,
|
||||
options: &OpenOptions,
|
||||
options: &DiscOptions,
|
||||
format_options: &FormatOptions,
|
||||
) -> Result<()> {
|
||||
println!("Loading {}", display(in_file));
|
||||
let mut disc = Disc::new_with_options(in_file, options)?;
|
||||
println!("Loading {}", path_display(in_file));
|
||||
let disc = DiscReader::new(in_file, options)?;
|
||||
let header = disc.header();
|
||||
let meta = disc.meta();
|
||||
print_header(header, &meta);
|
||||
|
||||
let disc_size = disc.disc_size();
|
||||
|
||||
let mut file = if let Some(out_file) = out_file {
|
||||
Some(
|
||||
File::create(out_file)
|
||||
.with_context(|| format!("Creating file {}", display(out_file)))?,
|
||||
.with_context(|| format!("Creating file {}", path_display(out_file)))?,
|
||||
)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
if out_file.is_some() {
|
||||
println!("\nConverting...");
|
||||
match options.partition_encryption {
|
||||
PartitionEncryption::ForceEncrypted => {
|
||||
println!("\nConverting to {} (encrypted)...", format_options.format)
|
||||
}
|
||||
PartitionEncryption::ForceDecrypted => {
|
||||
println!("\nConverting to {} (decrypted)...", format_options.format)
|
||||
}
|
||||
_ => println!("\nConverting to {}...", format_options.format),
|
||||
}
|
||||
if format_options.compression != Compression::None {
|
||||
println!("Compression: {}", format_options.compression);
|
||||
}
|
||||
if format_options.block_size > 0 {
|
||||
println!("Block size: {}", Size::from_bytes(format_options.block_size));
|
||||
}
|
||||
} else {
|
||||
println!("\nVerifying...");
|
||||
match options.partition_encryption {
|
||||
PartitionEncryption::ForceEncrypted => {
|
||||
println!("\nVerifying (encrypted)...")
|
||||
}
|
||||
PartitionEncryption::ForceDecrypted => {
|
||||
println!("\nVerifying (decrypted)...")
|
||||
}
|
||||
_ => println!("\nVerifying..."),
|
||||
}
|
||||
}
|
||||
let pb = ProgressBar::new(disc_size);
|
||||
let disc_writer = DiscWriter::new(disc, format_options)?;
|
||||
let pb = ProgressBar::new(disc_writer.progress_bound());
|
||||
pb.set_style(ProgressStyle::with_template("{spinner:.green} [{elapsed_precise}] [{wide_bar:.cyan/blue}] {bytes}/{total_bytes} ({bytes_per_sec}, {eta})")
|
||||
.unwrap()
|
||||
.with_key("eta", |state: &ProgressState, w: &mut dyn fmt::Write| {
|
||||
|
@ -82,85 +100,71 @@ pub fn convert_and_verify(
|
|||
})
|
||||
.progress_chars("#>-"));
|
||||
|
||||
const BUFFER_SIZE: usize = 1015808; // LCM(0x8000, 0x7C00)
|
||||
let digest_threads = if md5 {
|
||||
vec![
|
||||
digest_thread::<crc32fast::Hasher>(),
|
||||
digest_thread::<md5::Md5>(),
|
||||
digest_thread::<sha1::Sha1>(),
|
||||
digest_thread::<xxhash_rust::xxh64::Xxh64>(),
|
||||
]
|
||||
} else {
|
||||
vec![
|
||||
digest_thread::<crc32fast::Hasher>(),
|
||||
digest_thread::<sha1::Sha1>(),
|
||||
digest_thread::<xxhash_rust::xxh64::Xxh64>(),
|
||||
]
|
||||
let cpus = num_cpus::get();
|
||||
let processor_threads = match disc_writer.weight() {
|
||||
DiscWriterWeight::Light => 0,
|
||||
DiscWriterWeight::Medium => cpus / 2,
|
||||
DiscWriterWeight::Heavy => cpus,
|
||||
};
|
||||
|
||||
let (w_tx, w_rx) = sync_channel::<Arc<[u8]>>(1);
|
||||
let w_thread = thread::spawn(move || {
|
||||
let mut total_written = 0u64;
|
||||
while let Ok(data) = w_rx.recv() {
|
||||
let mut total_written = 0u64;
|
||||
let finalization = disc_writer.process(
|
||||
|data, pos, _| {
|
||||
if let Some(file) = &mut file {
|
||||
file.write_all(data.as_ref())
|
||||
.with_context(|| {
|
||||
format!("Writing {} bytes at offset {}", data.len(), total_written)
|
||||
})
|
||||
.unwrap();
|
||||
file.write_all(data.as_ref())?;
|
||||
}
|
||||
total_written += data.len() as u64;
|
||||
pb.set_position(total_written);
|
||||
}
|
||||
if let Some(mut file) = file {
|
||||
file.flush().context("Flushing output file").unwrap();
|
||||
}
|
||||
pb.finish();
|
||||
});
|
||||
pb.set_position(pos);
|
||||
Ok(())
|
||||
},
|
||||
&ProcessOptions {
|
||||
processor_threads,
|
||||
digest_crc32: true,
|
||||
digest_md5: md5,
|
||||
digest_sha1: true,
|
||||
digest_xxh64: true,
|
||||
},
|
||||
)?;
|
||||
pb.finish();
|
||||
|
||||
let mut total_read = 0u64;
|
||||
let mut buf = <[u8]>::new_box_zeroed_with_elems(BUFFER_SIZE)?;
|
||||
while total_read < disc_size {
|
||||
let read = min(BUFFER_SIZE as u64, disc_size - total_read) as usize;
|
||||
disc.read_exact(&mut buf[..read]).with_context(|| {
|
||||
format!("Reading {} bytes at disc offset {}", BUFFER_SIZE, total_read)
|
||||
})?;
|
||||
|
||||
let arc = Arc::<[u8]>::from(&buf[..read]);
|
||||
for (tx, _) in &digest_threads {
|
||||
tx.send(arc.clone()).map_err(|_| "Sending data to hash thread")?;
|
||||
// Finalize disc writer
|
||||
if !finalization.header.is_empty() {
|
||||
if let Some(file) = &mut file {
|
||||
file.seek(SeekFrom::Start(0)).context("Seeking to start of output file")?;
|
||||
file.write_all(finalization.header.as_ref()).context("Writing header")?;
|
||||
} else {
|
||||
return Err(nod::Error::Other("No output file, but requires finalization".to_string()));
|
||||
}
|
||||
w_tx.send(arc).map_err(|_| "Sending data to write thread")?;
|
||||
total_read += read as u64;
|
||||
}
|
||||
drop(w_tx); // Close channel
|
||||
w_thread.join().unwrap();
|
||||
if let Some(mut file) = file {
|
||||
file.flush().context("Flushing output file")?;
|
||||
}
|
||||
|
||||
println!();
|
||||
if let Some(path) = out_file {
|
||||
println!("Wrote {} to {}", Size::from_bytes(total_read), display(path));
|
||||
println!("Wrote {} to {}", Size::from_bytes(total_written), path_display(path));
|
||||
}
|
||||
|
||||
println!();
|
||||
let mut crc32 = None;
|
||||
let mut md5 = None;
|
||||
let mut sha1 = None;
|
||||
let mut xxh64 = None;
|
||||
for (tx, handle) in digest_threads {
|
||||
drop(tx); // Close channel
|
||||
match handle.join().unwrap() {
|
||||
DigestResult::Crc32(v) => crc32 = Some(v),
|
||||
DigestResult::Md5(v) => md5 = Some(v),
|
||||
DigestResult::Sha1(v) => sha1 = Some(v),
|
||||
DigestResult::Xxh64(v) => xxh64 = Some(v),
|
||||
}
|
||||
}
|
||||
|
||||
let redump_entry = crc32.and_then(redump::find_by_crc32);
|
||||
let expected_crc32 = meta.crc32.or(redump_entry.as_ref().map(|e| e.crc32));
|
||||
let expected_md5 = meta.md5.or(redump_entry.as_ref().map(|e| e.md5));
|
||||
let expected_sha1 = meta.sha1.or(redump_entry.as_ref().map(|e| e.sha1));
|
||||
let expected_xxh64 = meta.xxhash64;
|
||||
let mut redump_entry = None;
|
||||
let mut expected_crc32 = None;
|
||||
let mut expected_md5 = None;
|
||||
let mut expected_sha1 = None;
|
||||
let mut expected_xxh64 = None;
|
||||
if options.partition_encryption == PartitionEncryption::Original {
|
||||
// Use verification data in disc and check redump
|
||||
redump_entry = finalization.crc32.and_then(redump::find_by_crc32);
|
||||
expected_crc32 = meta.crc32.or(redump_entry.as_ref().map(|e| e.crc32));
|
||||
expected_md5 = meta.md5.or(redump_entry.as_ref().map(|e| e.md5));
|
||||
expected_sha1 = meta.sha1.or(redump_entry.as_ref().map(|e| e.sha1));
|
||||
expected_xxh64 = meta.xxh64;
|
||||
} else if options.partition_encryption == PartitionEncryption::ForceEncrypted {
|
||||
// Ignore verification data in disc, but still check redump
|
||||
redump_entry = finalization.crc32.and_then(redump::find_by_crc32);
|
||||
expected_crc32 = redump_entry.as_ref().map(|e| e.crc32);
|
||||
expected_md5 = redump_entry.as_ref().map(|e| e.md5);
|
||||
expected_sha1 = redump_entry.as_ref().map(|e| e.sha1);
|
||||
}
|
||||
|
||||
fn print_digest(value: DigestResult, expected: Option<DigestResult>) {
|
||||
print!("{:<6}: ", value.name());
|
||||
|
@ -176,36 +180,36 @@ pub fn convert_and_verify(
|
|||
println!();
|
||||
}
|
||||
|
||||
if let Some(entry) = &redump_entry {
|
||||
let mut full_match = true;
|
||||
if let Some(md5) = md5 {
|
||||
if entry.md5 != md5 {
|
||||
full_match = false;
|
||||
if let Some(crc32) = finalization.crc32 {
|
||||
if let Some(entry) = &redump_entry {
|
||||
let mut full_match = true;
|
||||
if let Some(md5) = finalization.md5 {
|
||||
if entry.md5 != md5 {
|
||||
full_match = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
if let Some(sha1) = sha1 {
|
||||
if entry.sha1 != sha1 {
|
||||
full_match = false;
|
||||
if let Some(sha1) = finalization.sha1 {
|
||||
if entry.sha1 != sha1 {
|
||||
full_match = false;
|
||||
}
|
||||
}
|
||||
if full_match {
|
||||
println!("Redump: {} ✅", entry.name);
|
||||
} else {
|
||||
println!("Redump: {} ❓ (partial match)", entry.name);
|
||||
}
|
||||
}
|
||||
if full_match {
|
||||
println!("Redump: {} ✅", entry.name);
|
||||
} else {
|
||||
println!("Redump: {} ❓ (partial match)", entry.name);
|
||||
println!("Redump: Not found ❌");
|
||||
}
|
||||
} else {
|
||||
println!("Redump: Not found ❌");
|
||||
}
|
||||
if let Some(crc32) = crc32 {
|
||||
print_digest(DigestResult::Crc32(crc32), expected_crc32.map(DigestResult::Crc32));
|
||||
}
|
||||
if let Some(md5) = md5 {
|
||||
if let Some(md5) = finalization.md5 {
|
||||
print_digest(DigestResult::Md5(md5), expected_md5.map(DigestResult::Md5));
|
||||
}
|
||||
if let Some(sha1) = sha1 {
|
||||
if let Some(sha1) = finalization.sha1 {
|
||||
print_digest(DigestResult::Sha1(sha1), expected_sha1.map(DigestResult::Sha1));
|
||||
}
|
||||
if let Some(xxh64) = xxh64 {
|
||||
if let Some(xxh64) = finalization.xxh64 {
|
||||
print_digest(DigestResult::Xxh64(xxh64), expected_xxh64.map(DigestResult::Xxh64));
|
||||
}
|
||||
Ok(())
|
||||
|
|
|
@ -6,3 +6,4 @@ reorder_impl_items = true
|
|||
use_field_init_shorthand = true
|
||||
use_small_heuristics = "Max"
|
||||
where_single_line = true
|
||||
format_code_in_doc_comments = true
|
||||
|
|
Loading…
Reference in New Issue