From ce9fbbf822cf5e269474941023e157283d5d0795 Mon Sep 17 00:00:00 2001 From: Luke Street Date: Fri, 16 Feb 2024 22:53:37 -0700 Subject: [PATCH] Finish WIA/RVZ, add WBFS, CISO & more Generally a complete overhaul. --- Cargo.toml | 33 +- README.md | 54 +- build.rs | 9 + src/argp_version.rs | 64 +++ src/bin.rs | 606 +++++++++++++++++---- src/disc/gcn.rs | 248 ++++----- src/disc/mod.rs | 589 ++++++++++---------- src/disc/wii.rs | 768 +++++++++++++------------- src/fst.rs | 297 +++++----- src/io/ciso.rs | 267 +++++++++ src/io/iso.rs | 50 +- src/io/mod.rs | 144 ++--- src/io/nfs.rs | 524 +++++++++--------- src/io/nkit.rs | 146 +++++ src/io/split.rs | 165 ++++++ src/io/wbfs.rs | 203 +++++++ src/io/wia.rs | 1229 ++++++++++++++---------------------------- src/lib.rs | 132 ++++- src/streams.rs | 31 +- src/util/compress.rs | 82 +++ src/util/lfg.rs | 89 ++- src/util/mod.rs | 1 + src/util/reader.rs | 283 ++-------- 23 files changed, 3388 insertions(+), 2626 deletions(-) create mode 100644 build.rs create mode 100644 src/argp_version.rs create mode 100644 src/io/ciso.rs create mode 100644 src/io/nkit.rs create mode 100644 src/io/split.rs create mode 100644 src/io/wbfs.rs create mode 100644 src/util/compress.rs diff --git a/Cargo.toml b/Cargo.toml index d2446df..c356dfd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,8 +11,9 @@ readme = "README.md" description = """ Rust library and CLI tool for reading GameCube and Wii disc images. """ -keywords = ["gamecube", "wii", "iso", "nfs", "gcm"] +keywords = ["gamecube", "wii", "iso", "nfs", "rvz"] categories = ["command-line-utilities", "parser-implementations"] +build = "build.rs" [[bin]] name = "nodtool" @@ -24,27 +25,37 @@ lto = "thin" strip = "debuginfo" [features] -default = ["compress-bzip2", "compress-zstd"] #, "compress-lzma" +default = ["compress-bzip2", "compress-lzma", "compress-zstd"] asm = ["md-5/asm", "sha1/asm"] compress-bzip2 = ["bzip2"] +compress-lzma = ["liblzma"] compress-zstd = ["zstd"] -#compress-lzma = ["xz2"] nightly = ["crc32fast/nightly"] [dependencies] -aes = "0.8.3" -argh = "0.1.12" +aes = "0.8.4" argh_derive = "0.1.12" +argp = "0.3.0" base16ct = "0.2.0" -binrw = "0.13.3" -bytemuck = "1.14.1" -bzip2 = { version = "0.4.4", optional = true } +bzip2 = { version = "0.4.4", features = ["static"], optional = true } cbc = "0.1.2" -crc32fast = "1.3.2" +crc32fast = "1.4.0" +digest = "0.10.7" +enable-ansi-support = "0.2.1" encoding_rs = "0.8.33" file-size = "1.0.3" +indicatif = "0.17.8" +itertools = "0.12.1" +liblzma = { git = "https://github.com/encounter/liblzma-rs.git", rev = "ce29b22", features = ["static"], optional = true } +log = "0.4.20" md-5 = "0.10.6" +rayon = "1.8.1" sha1 = "0.10.6" -thiserror = "1.0.56" -xz2 = { version = "0.1.7", optional = true } +supports-color = "3.0.0" +thiserror = "1.0.57" +tracing = "0.1.40" +tracing-attributes = "0.1.27" +tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } +xxhash-rust = { version = "0.8.8", features = ["xxh64"] } +zerocopy = { version = "0.7.32", features = ["alloc", "derive"] } zstd = { version = "0.13.0", optional = true } diff --git a/README.md b/README.md index 0465b64..230badf 100644 --- a/README.md +++ b/README.md @@ -10,52 +10,68 @@ Library for traversing & reading GameCube and Wii disc images. -Based on the C++ library [nod](https://github.com/AxioDL/nod), +Originally based on the C++ library [nod](https://github.com/AxioDL/nod), but does not currently support authoring. Currently supported file formats: - ISO (GCM) - WIA / RVZ - WBFS -- NFS (Wii U VC files, e.g. `hif_000000.nfs`) +- CISO +- NFS (Wii U VC) -### CLI tool +## CLI tool -This crate includes a CLI tool `nodtool`, which can be used to extract disc images to a specified directory: +This crate includes a command-line tool called `nodtool`. + +### info + +Displays information about a disc image. + +```shell +nodtool info /path/to/game.iso +``` + +### extract + +Extracts the contents of a disc image to a directory. ```shell nodtool extract /path/to/game.iso [outdir] ``` -For Wii U VC titles, use `content/hif_*.nfs`: +For Wii U VC titles, use `content/hif_000000.nfs`: ```shell nodtool extract /path/to/game/content/hif_000000.nfs [outdir] ``` -### Library example +### convert + +Converts any supported format to raw ISO. + +```shell +nodtool convert /path/to/game.wia /path/to/game.iso +``` + +## Library example Opening a disc image and reading a file: ```rust use std::io::Read; -use nod::{ - disc::{new_disc_base, PartHeader}, - fst::NodeType, - io::{new_disc_io, DiscIOOptions}, -}; +use nod::{Disc, PartitionKind}; fn main() -> nod::Result<()> { - let options = DiscIOOptions::default(); - let mut disc_io = new_disc_io("path/to/file.iso".as_ref(), &options)?; - let disc_base = new_disc_base(disc_io.as_mut())?; - let mut partition = disc_base.get_data_partition(disc_io.as_mut(), false)?; - let header = partition.read_header()?; - if let Some(NodeType::File(node)) = header.find_node("/MP3/Worlds.txt") { + let disc = Disc::new("path/to/file.iso")?; + let mut partition = disc.open_partition_kind(PartitionKind::Data)?; + let meta = partition.meta()?; + let fst = meta.fst()?; + if let Some((_, node)) = fst.find("/MP3/Worlds.txt") { let mut s = String::new(); partition - .begin_file_stream(node) + .open_file(node) .expect("Failed to open file stream") .read_to_string(&mut s) .expect("Failed to read file"); @@ -65,7 +81,7 @@ fn main() -> nod::Result<()> { } ``` -### License +## License Licensed under either of diff --git a/build.rs b/build.rs new file mode 100644 index 0000000..a488c56 --- /dev/null +++ b/build.rs @@ -0,0 +1,9 @@ +fn main() { + let output = std::process::Command::new("git") + .args(["rev-parse", "HEAD"]) + .output() + .expect("Failed to execute git"); + let rev = String::from_utf8(output.stdout).expect("Failed to parse git output"); + println!("cargo:rustc-env=GIT_COMMIT_SHA={rev}"); + println!("cargo:rustc-rerun-if-changed=.git/HEAD"); +} diff --git a/src/argp_version.rs b/src/argp_version.rs new file mode 100644 index 0000000..eccce05 --- /dev/null +++ b/src/argp_version.rs @@ -0,0 +1,64 @@ +// Originally from https://gist.github.com/suluke/e0c672492126be0a4f3b4f0e1115d77c +//! Extend `argp` to be better integrated with the `cargo` ecosystem +//! +//! For now, this only adds a --version/-V option which causes early-exit. +use std::ffi::OsStr; + +use argp::{parser::ParseGlobalOptions, EarlyExit, FromArgs, TopLevelCommand}; + +struct ArgsOrVersion(T) +where T: FromArgs; + +impl TopLevelCommand for ArgsOrVersion where T: FromArgs {} + +impl FromArgs for ArgsOrVersion +where T: FromArgs +{ + fn _from_args( + command_name: &[&str], + args: &[&OsStr], + parent: Option<&mut dyn ParseGlobalOptions>, + ) -> Result { + /// Also use argp for catching `--version`-only invocations + #[derive(FromArgs)] + struct Version { + /// Print version information and exit. + #[argp(switch, short = 'V')] + pub version: bool, + } + + match Version::from_args(command_name, args) { + Ok(v) => { + if v.version { + println!( + "{} {} {}", + command_name.first().unwrap_or(&""), + env!("CARGO_PKG_VERSION"), + env!("GIT_COMMIT_SHA"), + ); + std::process::exit(0); + } else { + // Pass through empty arguments + T::_from_args(command_name, args, parent).map(Self) + } + } + Err(exit) => match exit { + EarlyExit::Help(_help) => { + // TODO: Chain help info from Version + // For now, we just put the switch on T as well + T::from_args(command_name, &["--help"]).map(Self) + } + EarlyExit::Err(_) => T::_from_args(command_name, args, parent).map(Self), + }, + } + } +} + +/// Create a `FromArgs` type from the current process’s `env::args`. +/// +/// This function will exit early from the current process if argument parsing was unsuccessful or if information like `--help` was requested. +/// Error messages will be printed to stderr, and `--help` output to stdout. +pub fn from_env() -> T +where T: TopLevelCommand { + argp::parse_args_or_exit::>(argp::DEFAULT).0 +} diff --git a/src/bin.rs b/src/bin.rs index 9b48752..d710108 100644 --- a/src/bin.rs +++ b/src/bin.rs @@ -1,82 +1,215 @@ +mod argp_version; + use std::{ + borrow::Cow, + env, error::Error, + ffi::OsStr, fs, fs::File, io, - io::{BufWriter, Write}, + io::{BufWriter, Read, Write}, path::{Path, PathBuf}, + str::FromStr, + sync::{ + mpsc::{sync_channel, SyncSender}, + Arc, + }, + thread, + thread::JoinHandle, }; -use argh_derive::FromArgs; +use argp::{FromArgValue, FromArgs}; +use digest::{Digest, Output}; +use enable_ansi_support::enable_ansi_support; +use indicatif::{ProgressBar, ProgressState, ProgressStyle}; +use itertools::Itertools; use nod::{ - disc::{new_disc_base, PartHeader, PartReadStream, PartitionType}, - fst::NodeType, - io::{has_extension, new_disc_io, DiscIOOptions}, - Result, ResultContext, + Disc, DiscHeader, Fst, Node, OpenOptions, PartitionBase, PartitionKind, PartitionMeta, Result, + ResultContext, }; -use sha1::Digest; +use supports_color::Stream; +use tracing::level_filters::LevelFilter; +use tracing_subscriber::EnvFilter; +use zerocopy::FromZeroes; #[derive(FromArgs, Debug)] /// Tool for reading GameCube and Wii disc images. struct TopLevel { - #[argh(subcommand)] + #[argp(subcommand)] command: SubCommand, + #[argp(option, short = 'C')] + /// Change working directory. + chdir: Option, + #[argp(option, short = 'L')] + /// Minimum logging level. (Default: info) + /// Possible values: error, warn, info, debug, trace + log_level: Option, + #[allow(unused)] + #[argp(switch, short = 'V')] + /// Print version information and exit. + version: bool, + #[argp(switch)] + /// Disable color output. (env: NO_COLOR) + no_color: bool, } #[derive(FromArgs, Debug)] -#[argh(subcommand)] +#[argp(subcommand)] enum SubCommand { + Info(InfoArgs), Extract(ExtractArgs), Convert(ConvertArgs), Verify(VerifyArgs), } #[derive(FromArgs, Debug)] -/// Extract a disc image. -#[argh(subcommand, name = "extract")] -struct ExtractArgs { - #[argh(positional)] - /// path to disc image (ISO or NFS) +/// Displays information about a disc image. +#[argp(subcommand, name = "info")] +struct InfoArgs { + #[argp(positional)] + /// path to disc image file: PathBuf, - #[argh(positional)] +} + +#[derive(FromArgs, Debug)] +/// Extract a disc image. +#[argp(subcommand, name = "extract")] +struct ExtractArgs { + #[argp(positional)] + /// path to disc image + file: PathBuf, + #[argp(positional)] /// output directory (optional) out: Option, - #[argh(switch, short = 'q')] + #[argp(switch, short = 'q')] /// quiet output quiet: bool, - #[argh(switch, short = 'h')] + #[argp(switch, short = 'h')] /// validate disc hashes (Wii only) validate: bool, } #[derive(FromArgs, Debug)] -/// Extract a disc image. -#[argh(subcommand, name = "convert")] +/// Converts a disc image to ISO. +#[argp(subcommand, name = "convert")] struct ConvertArgs { - #[argh(positional)] + #[argp(positional)] /// path to disc image file: PathBuf, - #[argh(positional)] + #[argp(positional)] /// output ISO file out: PathBuf, } #[derive(FromArgs, Debug)] /// Verifies a disc image. -#[argh(subcommand, name = "verify")] +#[argp(subcommand, name = "verify")] struct VerifyArgs { - #[argh(positional)] + #[argp(positional)] /// path to disc image file: PathBuf, } +#[derive(Debug, Eq, PartialEq, Copy, Clone)] +enum LogLevel { + Error, + Warn, + Info, + Debug, + Trace, +} + +impl FromStr for LogLevel { + type Err = (); + + fn from_str(s: &str) -> std::result::Result { + Ok(match s { + "error" => Self::Error, + "warn" => Self::Warn, + "info" => Self::Info, + "debug" => Self::Debug, + "trace" => Self::Trace, + _ => return Err(()), + }) + } +} + +impl ToString for LogLevel { + fn to_string(&self) -> String { + match self { + LogLevel::Error => "error", + LogLevel::Warn => "warn", + LogLevel::Info => "info", + LogLevel::Debug => "debug", + LogLevel::Trace => "trace", + } + .to_string() + } +} + +impl FromArgValue for LogLevel { + fn from_arg_value(value: &OsStr) -> std::result::Result { + String::from_arg_value(value) + .and_then(|s| Self::from_str(&s).map_err(|_| "Invalid log level".to_string())) + } +} + +// Duplicated from supports-color so we can check early. +fn env_no_color() -> bool { + match env::var("NO_COLOR").as_deref() { + Ok("") | Ok("0") | Err(_) => false, + Ok(_) => true, + } +} + fn main() { - let args: TopLevel = argh::from_env(); - let result = match args.command { + let args: TopLevel = argp_version::from_env(); + let use_colors = if args.no_color || env_no_color() { + false + } else { + // Try to enable ANSI support on Windows. + let _ = enable_ansi_support(); + // Disable isatty check for supports-color. (e.g. when used with ninja) + env::set_var("IGNORE_IS_TERMINAL", "1"); + supports_color::on(Stream::Stdout).is_some_and(|c| c.has_basic) + }; + + let format = + tracing_subscriber::fmt::format().with_ansi(use_colors).with_target(false).without_time(); + let builder = tracing_subscriber::fmt().event_format(format); + if let Some(level) = args.log_level { + builder + .with_max_level(match level { + LogLevel::Error => LevelFilter::ERROR, + LogLevel::Warn => LevelFilter::WARN, + LogLevel::Info => LevelFilter::INFO, + LogLevel::Debug => LevelFilter::DEBUG, + LogLevel::Trace => LevelFilter::TRACE, + }) + .init(); + } else { + builder + .with_env_filter( + EnvFilter::builder() + .with_default_directive(LevelFilter::INFO.into()) + .from_env_lossy(), + ) + .init(); + } + + let mut result = Ok(()); + if let Some(dir) = &args.chdir { + result = env::set_current_dir(dir).map_err(|e| { + nod::Error::Io(format!("Failed to change working directory to '{}'", dir.display()), e) + }); + } + result = result.and_then(|_| match args.command { + SubCommand::Info(c_args) => info(c_args), SubCommand::Convert(c_args) => convert(c_args), SubCommand::Extract(c_args) => extract(c_args), SubCommand::Verify(c_args) => verify(c_args), - }; + }); if let Err(e) = result { eprintln!("Failed: {}", e); if let Some(source) = e.source() { @@ -86,32 +219,108 @@ fn main() { } } +fn print_header(header: &DiscHeader) { + println!("Name: {}", header.game_title_str()); + println!("Game ID: {}", header.game_id_str()); + println!("Disc {}, Revision {}", header.disc_num + 1, header.disc_version); + if header.no_partition_hashes != 0 { + println!("[!] Disc has no hashes"); + } + if header.no_partition_encryption != 0 { + println!("[!] Disc is not encrypted"); + } +} + +fn info(args: InfoArgs) -> Result<()> { + let disc = Disc::new_with_options(args.file, &OpenOptions { + rebuild_hashes: false, + validate_hashes: false, + rebuild_encryption: false, + })?; + let header = disc.header(); + print_header(header); + + if header.is_wii() { + for (idx, info) in disc.partitions().iter().enumerate() { + println!(); + println!("Partition {}:{}", info.group_index, info.part_index); + println!("\tType: {}", info.kind); + println!("\tPartition offset: {:#X}", info.part_offset); + println!( + "\tData offset / size: {:#X} / {:#X} ({})", + info.part_offset + info.data_offset, + info.data_size, + file_size::fit_4(info.data_size) + ); + if let Some(header) = &info.header { + println!( + "\tTMD offset / size: {:#X} / {:#X}", + info.part_offset + header.tmd_off(), + header.tmd_size() + ); + println!( + "\tCert offset / size: {:#X} / {:#X}", + info.part_offset + header.cert_chain_off(), + header.cert_chain_size() + ); + println!( + "\tH3 offset / size: {:#X} / {:#X}", + info.part_offset + header.h3_table_off(), + header.h3_table_size() + ); + } + + let mut partition = disc.open_partition(idx)?; + let meta = partition.meta()?; + let header = meta.header(); + let tmd = meta.tmd_header(); + let title_id_str = if let Some(tmd) = tmd { + format!( + "{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}", + tmd.title_id[0], + tmd.title_id[1], + tmd.title_id[2], + tmd.title_id[3], + tmd.title_id[4], + tmd.title_id[5], + tmd.title_id[6], + tmd.title_id[7] + ) + } else { + "N/A".to_string() + }; + println!("\tName: {}", header.game_title_str()); + println!("\tGame ID: {} ({})", header.game_id_str(), title_id_str); + println!("\tDisc {}, Revision {}", header.disc_num + 1, header.disc_version); + } + } else if header.is_gamecube() { + // TODO + } else { + println!( + "Invalid GC/Wii magic: {:#010X}/{:#010X}", + header.gcn_magic.get(), + header.wii_magic.get() + ); + } + Ok(()) +} + fn convert(args: ConvertArgs) -> Result<()> { convert_and_verify(&args.file, Some(&args.out)) } fn verify(args: VerifyArgs) -> Result<()> { convert_and_verify(&args.file, None) } fn convert_and_verify(in_file: &Path, out_file: Option<&Path>) -> Result<()> { println!("Loading {}", in_file.display()); - let mut disc_io = new_disc_io(in_file, &DiscIOOptions { rebuild_hashes: true })?; - let disc_base = new_disc_base(disc_io.as_mut())?; - let header = disc_base.get_header(); - println!( - "\nGame ID: {}{}{}{}{}{}", - header.game_id[0] as char, - header.game_id[1] as char, - header.game_id[2] as char, - header.game_id[3] as char, - header.game_id[4] as char, - header.game_id[5] as char - ); - println!("Game title: {}", header.game_title); - println!("Disc num: {}", header.disc_num); - println!("Disc version: {}", header.disc_version); + let disc = Disc::new_with_options(in_file, &OpenOptions { + rebuild_hashes: true, + validate_hashes: false, + rebuild_encryption: true, + })?; + let header = disc.header(); + print_header(header); - let mut stream = disc_io.begin_read_stream(0).context("Creating disc read stream")?; - let mut crc = crc32fast::Hasher::new(); - let mut md5 = md5::Md5::new(); - let mut sha1 = sha1::Sha1::new(); + let meta = disc.meta()?; + let mut stream = disc.open()?.take(disc.disc_size()); let mut file = if let Some(out_file) = out_file { Some( @@ -122,39 +331,130 @@ fn convert_and_verify(in_file: &Path, out_file: Option<&Path>) -> Result<()> { None }; + println!("\nHashing..."); + let pb = ProgressBar::new(stream.limit()); + pb.set_style(ProgressStyle::with_template("{spinner:.green} [{elapsed_precise}] [{wide_bar:.cyan/blue}] {bytes}/{total_bytes} ({bytes_per_sec}, {eta})") + .unwrap() + .with_key("eta", |state: &ProgressState, w: &mut dyn std::fmt::Write| { + write!(w, "{:.1}s", state.eta().as_secs_f64()).unwrap() + }) + .progress_chars("#>-")); + const BUFFER_SIZE: usize = 1015808; // LCM(0x8000, 0x7C00) - let mut buf = vec![0u8; BUFFER_SIZE]; + let digest_threads = [ + digest_thread::(), + digest_thread::(), + digest_thread::(), + digest_thread::(), + ]; + + let (w_tx, w_rx) = sync_channel::>(1); + let w_thread = thread::spawn(move || { + let mut total_written = 0u64; + while let Ok(data) = w_rx.recv() { + if let Some(file) = &mut file { + file.write_all(data.as_ref()) + .with_context(|| { + format!("Writing {} bytes at offset {}", data.len(), total_written) + }) + .unwrap(); + } + total_written += data.len() as u64; + pb.set_position(total_written); + } + if let Some(mut file) = file { + file.flush().context("Flushing output file").unwrap(); + } + pb.finish(); + }); + let mut total_read = 0u64; + let mut buf = ::new_box_slice_zeroed(BUFFER_SIZE); loop { - let read = stream.read(&mut buf).with_context(|| { + let read = stream.read(buf.as_mut()).with_context(|| { format!("Reading {} bytes at disc offset {}", BUFFER_SIZE, total_read) })?; if read == 0 { break; } - let slice = &buf[..read]; - crc.update(slice); - md5.update(slice); - sha1.update(slice); - if let Some(file) = &mut file { - file.write_all(slice).with_context(|| { - format!("Writing {} bytes at offset {}", slice.len(), total_read) - })?; + + let arc = Arc::<[u8]>::from(&buf[..read]); + for (tx, _) in &digest_threads { + tx.send(arc.clone()).map_err(|_| "Sending data to hash thread")?; } + w_tx.send(arc).map_err(|_| "Sending data to write thread")?; total_read += read as u64; } + drop(w_tx); // Close channel + w_thread.join().unwrap(); + + println!(); + if let Some(path) = out_file { + println!("Wrote {} to {}", file_size::fit_4(total_read), path.display()); + } println!(); - println!("CRC32: {:08x}", crc.finalize()); - println!("MD5: {:032x}", md5.finalize()); - println!("SHA-1: {:040x}", sha1.finalize()); - if let (Some(path), Some(file)) = (out_file, &mut file) { - file.flush().context("Flushing output file")?; - println!("Wrote {} to {}", file_size::fit_4(total_read), path.display()); + for (tx, handle) in digest_threads.into_iter() { + drop(tx); // Close channel + match handle.join().unwrap() { + DigestResult::Crc32(crc) => { + print!("CRC32: {:08x}", crc); + if let Some(expected_crc) = meta.crc32 { + if expected_crc != crc { + print!(" ❌ (expected: {:08x})", expected_crc); + } else { + print!(" ✅"); + } + } + println!(); + } + DigestResult::Md5(md5) => { + print!("MD5: {:032x}", md5); + if let Some(expected_md5) = meta.md5 { + let expected_md5 = >::from(expected_md5); + if expected_md5 != md5 { + print!(" ❌ (expected: {:032x})", expected_md5); + } else { + print!(" ✅"); + } + } + println!(); + } + DigestResult::Sha1(sha1) => { + print!("SHA-1: {:040x}", sha1); + if let Some(expected_sha1) = meta.sha1 { + let expected_sha1 = >::from(expected_sha1); + if expected_sha1 != sha1 { + print!(" ❌ (expected: {:040x})", expected_sha1); + } else { + print!(" ✅"); + } + } + println!(); + } + DigestResult::Xxh64(xxh64) => { + print!("XXH64: {:016x}", xxh64); + if let Some(expected_xxh64) = meta.xxhash64 { + if expected_xxh64 != xxh64 { + print!(" ❌ (expected: {:016x})", expected_xxh64); + } else { + print!(" ✅"); + } + } + println!(); + } + } } Ok(()) } +pub fn has_extension(filename: &Path, extension: &str) -> bool { + match filename.extension() { + Some(ext) => ext.eq_ignore_ascii_case(extension), + None => false, + } +} + fn extract(args: ExtractArgs) -> Result<()> { let output_dir: PathBuf; if let Some(dir) = args.out { @@ -169,24 +469,54 @@ fn extract(args: ExtractArgs) -> Result<()> { } else { output_dir = args.file.with_extension(""); } - let mut disc_io = new_disc_io(&args.file, &DiscIOOptions { rebuild_hashes: args.validate })?; - let disc_base = new_disc_base(disc_io.as_mut())?; - let mut partition = - disc_base.get_partition(disc_io.as_mut(), PartitionType::Data, args.validate)?; - let header = partition.read_header()?; - extract_sys_files(header.as_ref(), &output_dir.join("sys"), args.quiet)?; - extract_node(header.root_node(), partition.as_mut(), &output_dir.join("files"), args.quiet)?; + let disc = Disc::new_with_options(&args.file, &OpenOptions { + rebuild_hashes: args.validate, + validate_hashes: args.validate, + rebuild_encryption: false, + })?; + let is_wii = disc.header().is_wii(); + let mut partition = disc.open_partition_kind(PartitionKind::Data)?; + let meta = partition.meta()?; + extract_sys_files(meta.as_ref(), &output_dir.join("sys"), args.quiet)?; + + // Extract FST + let files_dir = output_dir.join("files"); + let fst = Fst::new(&meta.raw_fst)?; + let mut path_segments = Vec::<(Cow, usize)>::new(); + for (idx, node, name) in fst.iter() { + // Remove ended path segments + let mut new_size = 0; + for (_, end) in path_segments.iter() { + if *end == idx { + break; + } + new_size += 1; + } + path_segments.truncate(new_size); + + // Add the new path segment + let end = if node.is_dir() { node.length(false) as usize } else { idx + 1 }; + path_segments.push((name?, end)); + + let path = path_segments.iter().map(|(name, _)| name.as_ref()).join("/"); + if node.is_dir() { + fs::create_dir_all(files_dir.join(&path)) + .with_context(|| format!("Creating directory {}", path))?; + } else { + extract_node(node, partition.as_mut(), &files_dir, &path, is_wii, args.quiet)?; + } + } Ok(()) } -fn extract_sys_files(header: &dyn PartHeader, out_dir: &Path, quiet: bool) -> Result<()> { +fn extract_sys_files(data: &PartitionMeta, out_dir: &Path, quiet: bool) -> Result<()> { fs::create_dir_all(out_dir) .with_context(|| format!("Creating output directory {}", out_dir.display()))?; - extract_file(header.boot_bytes(), &out_dir.join("boot.bin"), quiet)?; - extract_file(header.bi2_bytes(), &out_dir.join("bi2.bin"), quiet)?; - extract_file(header.apploader_bytes(), &out_dir.join("apploader.img"), quiet)?; - extract_file(header.fst_bytes(), &out_dir.join("fst.bin"), quiet)?; - extract_file(header.dol_bytes(), &out_dir.join("main.dol"), quiet)?; + extract_file(&data.raw_boot, &out_dir.join("boot.bin"), quiet)?; + extract_file(&data.raw_bi2, &out_dir.join("bi2.bin"), quiet)?; + extract_file(&data.raw_apploader, &out_dir.join("apploader.img"), quiet)?; + extract_file(&data.raw_fst, &out_dir.join("fst.bin"), quiet)?; + extract_file(&data.raw_dol, &out_dir.join("main.dol"), quiet)?; Ok(()) } @@ -203,53 +533,93 @@ fn extract_file(bytes: &[u8], out_path: &Path, quiet: bool) -> Result<()> { } fn extract_node( - node: &NodeType, - partition: &mut dyn PartReadStream, + node: &Node, + partition: &mut dyn PartitionBase, base_path: &Path, + name: &str, + is_wii: bool, quiet: bool, ) -> Result<()> { - match node { - NodeType::File(v) => { - let mut file_path = base_path.to_path_buf(); - file_path.push(v.name.as_str()); - if !quiet { - println!( - "Extracting {} (size: {})", - file_path.display(), - file_size::fit_4(v.length as u64) - ); - } - let file = File::create(&file_path) - .with_context(|| format!("Creating file {}", file_path.display()))?; - let mut buf_writer = BufWriter::with_capacity(partition.ideal_buffer_size(), file); - let mut stream = partition.begin_file_stream(v).with_context(|| { - format!( - "Opening file {} on disc for reading (offset {}, size {})", - v.name, v.offset, v.length - ) - })?; - io::copy(&mut stream, &mut buf_writer) - .with_context(|| format!("Extracting file {}", file_path.display()))?; - buf_writer.flush().with_context(|| format!("Flushing file {}", file_path.display()))?; - } - NodeType::Directory(v, c) => { - if v.name.is_empty() { - fs::create_dir_all(base_path).with_context(|| { - format!("Creating output directory {}", base_path.display()) - })?; - for x in c { - extract_node(x, partition, base_path, quiet)?; - } - } else { - let mut new_base = base_path.to_path_buf(); - new_base.push(v.name.as_str()); - fs::create_dir_all(&new_base) - .with_context(|| format!("Creating output directory {}", new_base.display()))?; - for x in c { - extract_node(x, partition, new_base.as_path(), quiet)?; - } - } - } + let file_path = base_path.join(name); + if !quiet { + println!( + "Extracting {} (size: {})", + file_path.display(), + file_size::fit_4(node.length(is_wii)) + ); } + let file = File::create(&file_path) + .with_context(|| format!("Creating file {}", file_path.display()))?; + let mut w = BufWriter::with_capacity(partition.ideal_buffer_size(), file); + let mut r = partition.open_file(node).with_context(|| { + format!( + "Opening file {} on disc for reading (offset {}, size {})", + name, + node.offset(is_wii), + node.length(is_wii) + ) + })?; + io::copy(&mut r, &mut w).with_context(|| format!("Extracting file {}", file_path.display()))?; + w.flush().with_context(|| format!("Flushing file {}", file_path.display()))?; Ok(()) } + +fn digest_thread() -> (SyncSender>, JoinHandle) +where H: Hasher + Send + 'static { + let (tx, rx) = sync_channel::>(1); + let handle = thread::spawn(move || { + let mut hasher = H::new(); + while let Ok(data) = rx.recv() { + hasher.update(data.as_ref()); + } + hasher.finalize() + }); + (tx, handle) +} + +enum DigestResult { + Crc32(u32), + Md5(Output), + Sha1(Output), + Xxh64(u64), +} + +trait Hasher { + fn new() -> Self; + fn finalize(self) -> DigestResult; + fn update(&mut self, data: &[u8]); +} + +impl Hasher for md5::Md5 { + fn new() -> Self { Digest::new() } + + fn finalize(self) -> DigestResult { DigestResult::Md5(Digest::finalize(self)) } + + fn update(&mut self, data: &[u8]) { Digest::update(self, data) } +} + +impl Hasher for sha1::Sha1 { + fn new() -> Self { Digest::new() } + + fn finalize(self) -> DigestResult { DigestResult::Sha1(Digest::finalize(self)) } + + fn update(&mut self, data: &[u8]) { Digest::update(self, data) } +} + +impl Hasher for crc32fast::Hasher { + fn new() -> Self { crc32fast::Hasher::new() } + + fn finalize(self) -> DigestResult { DigestResult::Crc32(crc32fast::Hasher::finalize(self)) } + + fn update(&mut self, data: &[u8]) { crc32fast::Hasher::update(self, data) } +} + +impl Hasher for xxhash_rust::xxh64::Xxh64 { + fn new() -> Self { xxhash_rust::xxh64::Xxh64::new(0) } + + fn finalize(self) -> DigestResult { + DigestResult::Xxh64(xxhash_rust::xxh64::Xxh64::digest(&self)) + } + + fn update(&mut self, data: &[u8]) { xxhash_rust::xxh64::Xxh64::update(self, data) } +} diff --git a/src/disc/gcn.rs b/src/disc/gcn.rs index 997879f..819d46e 100644 --- a/src/disc/gcn.rs +++ b/src/disc/gcn.rs @@ -1,77 +1,109 @@ use std::{ io, - io::{Cursor, Read, Seek, SeekFrom}, + io::{Read, Seek, SeekFrom}, + mem::size_of, }; +use zerocopy::FromBytes; + use crate::{ + array_ref, disc::{ - AppLoaderHeader, DiscBase, DiscIO, DolHeader, Header, PartHeader, PartReadStream, - PartitionHeader, PartitionType, SECTOR_SIZE, + AppLoaderHeader, DiscBase, DiscHeader, DiscIO, DolHeader, PartitionBase, PartitionHeader, + PartitionInfo, PartitionKind, PartitionMeta, BI2_SIZE, BOOT_SIZE, MINI_DVD_SIZE, + SECTOR_SIZE, }, - fst::{find_node, read_fst, Node, NodeKind, NodeType}, + fst::{Node, NodeKind}, streams::{ReadStream, SharedWindowedReadStream}, util::{ div_rem, - reader::{read_bytes, FromReader}, + reader::{read_from, read_vec}, }, - Error, Result, ResultContext, + Error, OpenOptions, Result, ResultContext, }; pub(crate) struct DiscGCN { - pub(crate) header: Header, + pub(crate) header: DiscHeader, + pub(crate) disc_size: u64, + // pub(crate) junk_start: u64, } impl DiscGCN { - pub(crate) fn new(header: Header) -> Result { Ok(DiscGCN { header }) } + pub(crate) fn new( + _stream: &mut dyn ReadStream, + header: DiscHeader, + disc_size: Option, + ) -> Result { + // stream.seek(SeekFrom::Start(size_of::() as u64)).context("Seeking to partition header")?; + // let partition_header: PartitionHeader = read_from(stream).context("Reading partition header")?; + // let junk_start = partition_header.fst_off(false) + partition_header.fst_sz(false); + Ok(DiscGCN { header, disc_size: disc_size.unwrap_or(MINI_DVD_SIZE) /*, junk_start*/ }) + } +} + +fn open_partition<'a>(disc_io: &'a dyn DiscIO) -> Result> { + let stream = disc_io.open()?; + Ok(Box::new(PartitionGC { stream, offset: 0, cur_block: u32::MAX, buf: [0; SECTOR_SIZE] })) } impl DiscBase for DiscGCN { - fn get_header(&self) -> &Header { &self.header } + fn header(&self) -> &DiscHeader { &self.header } - fn get_data_partition<'a>( - &self, - disc_io: &'a mut dyn DiscIO, - _validate_hashes: bool, - ) -> Result> { - let stream = disc_io.begin_read_stream(0).context("Opening data partition stream")?; - Ok(Box::from(GCPartReadStream { - stream, - offset: 0, - cur_block: u32::MAX, - buf: [0; SECTOR_SIZE], - })) + fn partitions(&self) -> Vec { + vec![PartitionInfo { + group_index: 0, + part_index: 0, + part_offset: 0, + kind: PartitionKind::Data, + data_offset: 0, + data_size: self.disc_size, + header: None, + lfg_seed: *array_ref!(self.header.game_id, 0, 4), + // junk_start: self.junk_start, + }] } - fn get_partition<'a>( + fn open_partition<'a>( &self, - disc_io: &'a mut dyn DiscIO, - part_type: PartitionType, - _validate_hashes: bool, - ) -> Result> { - if part_type == PartitionType::Data { - Ok(Box::from(GCPartReadStream { - stream: disc_io.begin_read_stream(0).context("Opening partition read stream")?, - offset: 0, - cur_block: u32::MAX, - buf: [0; SECTOR_SIZE], - })) - } else { - Err(Error::DiscFormat(format!( + disc_io: &'a dyn DiscIO, + index: usize, + _options: &OpenOptions, + ) -> Result> { + if index != 0 { + return Err(Error::DiscFormat(format!( + "Invalid partition index {} for GameCube disc", + index + ))); + } + open_partition(disc_io) + } + + fn open_partition_kind<'a>( + &self, + disc_io: &'a dyn DiscIO, + part_type: PartitionKind, + _options: &OpenOptions, + ) -> Result> { + if part_type != PartitionKind::Data { + return Err(Error::DiscFormat(format!( "Invalid partition type {:?} for GameCube disc", part_type - ))) + ))); } + open_partition(disc_io) } + + fn disc_size(&self) -> u64 { self.disc_size } } -struct GCPartReadStream<'a> { +struct PartitionGC<'a> { stream: Box, offset: u64, cur_block: u32, buf: [u8; SECTOR_SIZE], } -impl<'a> Read for GCPartReadStream<'a> { +impl<'a> Read for PartitionGC<'a> { fn read(&mut self, buf: &mut [u8]) -> io::Result { let (block, block_offset) = div_rem(self.offset, SECTOR_SIZE as u64); let mut block = block as u32; @@ -104,12 +136,12 @@ impl<'a> Read for GCPartReadStream<'a> { } } -impl<'a> Seek for GCPartReadStream<'a> { +impl<'a> Seek for PartitionGC<'a> { fn seek(&mut self, pos: SeekFrom) -> io::Result { self.offset = match pos { SeekFrom::Start(v) => v, - SeekFrom::End(v) => (self.stable_stream_len()? as i64 + v) as u64, - SeekFrom::Current(v) => (self.offset as i64 + v) as u64, + SeekFrom::End(v) => self.stable_stream_len()?.saturating_add_signed(v), + SeekFrom::Current(v) => self.offset.saturating_add_signed(v), }; let block = self.offset / SECTOR_SIZE as u64; if block as u32 != self.cur_block { @@ -122,138 +154,94 @@ impl<'a> Seek for GCPartReadStream<'a> { fn stream_position(&mut self) -> io::Result { Ok(self.offset) } } -impl<'a> ReadStream for GCPartReadStream<'a> { +impl<'a> ReadStream for PartitionGC<'a> { fn stable_stream_len(&mut self) -> io::Result { self.stream.stable_stream_len() } fn as_dyn(&mut self) -> &mut dyn ReadStream { self } } -impl<'a> PartReadStream for GCPartReadStream<'a> { - fn begin_file_stream(&mut self, node: &Node) -> io::Result { - assert_eq!(node.kind, NodeKind::File); - self.new_window(node.offset as u64, node.length as u64) +impl<'a> PartitionBase for PartitionGC<'a> { + fn meta(&mut self) -> Result> { + self.seek(SeekFrom::Start(0)).context("Seeking to partition header")?; + read_part_header(self, false) } - fn read_header(&mut self) -> Result> { - self.seek(SeekFrom::Start(0)).context("Seeking to partition header")?; - Ok(Box::from(read_part_header(self)?)) + fn open_file(&mut self, node: &Node) -> io::Result { + assert_eq!(node.kind(), NodeKind::File); + self.new_window(node.offset(false), node.length(false)) } fn ideal_buffer_size(&self) -> usize { SECTOR_SIZE } } -const BOOT_SIZE: usize = Header::STATIC_SIZE + PartitionHeader::STATIC_SIZE; -const BI2_SIZE: usize = 0x2000; - -#[derive(Clone, Debug)] -pub(crate) struct GCPartition { - raw_boot: [u8; BOOT_SIZE], - raw_bi2: [u8; BI2_SIZE], - raw_apploader: Vec, - raw_fst: Vec, - raw_dol: Vec, - // Parsed - header: Header, - partition_header: PartitionHeader, - apploader_header: AppLoaderHeader, - root_node: NodeType, - dol_header: DolHeader, -} - -fn read_part_header(reader: &mut R) -> Result +pub(crate) fn read_part_header(reader: &mut R, is_wii: bool) -> Result> where R: Read + Seek + ?Sized { // boot.bin - let raw_boot = <[u8; BOOT_SIZE]>::from_reader(reader).context("Reading boot.bin")?; - let mut boot_bytes = raw_boot.as_slice(); - let header = Header::from_reader(&mut boot_bytes).context("Parsing disc header")?; - let partition_header = - PartitionHeader::from_reader(&mut boot_bytes).context("Parsing partition header")?; - debug_assert_eq!(boot_bytes.len(), 0, "failed to consume boot.bin"); + let raw_boot: [u8; BOOT_SIZE] = read_from(reader).context("Reading boot.bin")?; + let partition_header = PartitionHeader::ref_from(&raw_boot[size_of::()..]).unwrap(); // bi2.bin - let raw_bi2 = <[u8; BI2_SIZE]>::from_reader(reader).context("Reading bi2.bin")?; + let raw_bi2: [u8; BI2_SIZE] = read_from(reader).context("Reading bi2.bin")?; // apploader.bin - let mut raw_apploader = - read_bytes(reader, AppLoaderHeader::STATIC_SIZE).context("Reading apploader header")?; - let apploader_header = AppLoaderHeader::from_reader(&mut raw_apploader.as_slice()) - .context("Parsing apploader header")?; + let mut raw_apploader: Vec = + read_vec(reader, size_of::()).context("Reading apploader header")?; + let apploader_header = AppLoaderHeader::ref_from(raw_apploader.as_slice()).unwrap(); raw_apploader.resize( - AppLoaderHeader::STATIC_SIZE - + apploader_header.size as usize - + apploader_header.trailer_size as usize, + size_of::() + + apploader_header.size.get() as usize + + apploader_header.trailer_size.get() as usize, 0, ); reader - .read_exact(&mut raw_apploader[AppLoaderHeader::STATIC_SIZE..]) + .read_exact(&mut raw_apploader[size_of::()..]) .context("Reading apploader")?; // fst.bin reader - .seek(SeekFrom::Start(partition_header.fst_off as u64)) + .seek(SeekFrom::Start(partition_header.fst_off(is_wii))) .context("Seeking to FST offset")?; - let raw_fst = read_bytes(reader, partition_header.fst_sz as usize).with_context(|| { - format!( - "Reading partition FST (offset {}, size {})", - partition_header.fst_off, partition_header.fst_sz - ) - })?; - let root_node = read_fst(&mut Cursor::new(&*raw_fst))?; + let raw_fst: Vec = read_vec(reader, partition_header.fst_sz(is_wii) as usize) + .with_context(|| { + format!( + "Reading partition FST (offset {}, size {})", + partition_header.fst_off, partition_header.fst_sz + ) + })?; // main.dol reader - .seek(SeekFrom::Start(partition_header.dol_off as u64)) + .seek(SeekFrom::Start(partition_header.dol_off(is_wii))) .context("Seeking to DOL offset")?; - let mut raw_dol = read_bytes(reader, DolHeader::STATIC_SIZE).context("Reading DOL header")?; - let dol_header = - DolHeader::from_reader(&mut raw_dol.as_slice()).context("Parsing DOL header")?; + let mut raw_dol: Vec = + read_vec(reader, size_of::()).context("Reading DOL header")?; + let dol_header = DolHeader::ref_from(raw_dol.as_slice()).unwrap(); let dol_size = dol_header .text_offs .iter() .zip(&dol_header.text_sizes) - .map(|(offs, size)| offs + size) + .map(|(offs, size)| offs.get() + size.get()) .chain( - dol_header.data_offs.iter().zip(&dol_header.data_sizes).map(|(offs, size)| offs + size), + dol_header + .data_offs + .iter() + .zip(&dol_header.data_sizes) + .map(|(offs, size)| offs.get() + size.get()), ) .max() - .unwrap_or(DolHeader::STATIC_SIZE as u32); + .unwrap_or(size_of::() as u32); raw_dol.resize(dol_size as usize, 0); - reader.read_exact(&mut raw_dol[DolHeader::STATIC_SIZE..]).context("Reading DOL")?; + reader.read_exact(&mut raw_dol[size_of::()..]).context("Reading DOL")?; - Ok(GCPartition { + Ok(Box::new(PartitionMeta { raw_boot, raw_bi2, raw_apploader, raw_fst, raw_dol, - header, - partition_header, - apploader_header, - root_node, - dol_header, - }) -} - -impl PartHeader for GCPartition { - fn root_node(&self) -> &NodeType { &self.root_node } - - fn find_node(&self, path: &str) -> Option<&NodeType> { find_node(&self.root_node, path) } - - fn boot_bytes(&self) -> &[u8] { &self.raw_boot } - - fn bi2_bytes(&self) -> &[u8] { &self.raw_bi2 } - - fn apploader_bytes(&self) -> &[u8] { &self.raw_apploader } - - fn fst_bytes(&self) -> &[u8] { &self.raw_fst } - - fn dol_bytes(&self) -> &[u8] { &self.raw_dol } - - fn disc_header(&self) -> &Header { &self.header } - - fn partition_header(&self) -> &PartitionHeader { &self.partition_header } - - fn apploader_header(&self) -> &AppLoaderHeader { &self.apploader_header } - - fn dol_header(&self) -> &DolHeader { &self.dol_header } + raw_ticket: None, + raw_tmd: None, + raw_cert_chain: None, + raw_h3_table: None, + })) } diff --git a/src/disc/mod.rs b/src/disc/mod.rs index b31ac41..0cd3182 100644 --- a/src/disc/mod.rs +++ b/src/disc/mod.rs @@ -1,311 +1,332 @@ //! Disc type related logic (GameCube, Wii) -use std::{ffi::CStr, fmt::Debug, io, io::Read}; +use std::{ + borrow::Cow, + ffi::CStr, + fmt::{Debug, Display, Formatter}, + io, + mem::size_of, + str::from_utf8, +}; + +use zerocopy::{big_endian::*, AsBytes, FromBytes, FromZeroes}; use crate::{ - disc::{gcn::DiscGCN, wii::DiscWii}, - fst::{Node, NodeType}, + disc::{ + gcn::DiscGCN, + wii::{DiscWii, Ticket, TmdHeader, WiiPartitionHeader}, + }, + fst::Node, io::DiscIO, + static_assert, streams::{ReadStream, SharedWindowedReadStream}, - util::reader::{skip_bytes, struct_size, FromReader}, - Error, Result, ResultContext, + util::reader::read_from, + Error, Fst, OpenOptions, Result, ResultContext, }; pub(crate) mod gcn; pub(crate) mod wii; +pub(crate) const SECTOR_SIZE: usize = 0x8000; + /// Shared GameCube & Wii disc header -#[derive(Clone, Debug, PartialEq)] -pub struct Header { +#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)] +#[repr(C, align(4))] +pub struct DiscHeader { /// Game ID (e.g. GM8E01 for Metroid Prime) pub game_id: [u8; 6], /// Used in multi-disc games pub disc_num: u8, /// Disc version pub disc_version: u8, - /// Audio streaming enabled (bool) + /// Audio streaming enabled pub audio_streaming: u8, /// Audio streaming buffer size pub audio_stream_buf_size: u8, + /// Padding + _pad1: [u8; 14], /// If this is a Wii disc, this will be 0x5D1C9EA3 - pub wii_magic: u32, + pub wii_magic: U32, /// If this is a GameCube disc, this will be 0xC2339F3D - pub gcn_magic: u32, + pub gcn_magic: U32, /// Game title - pub game_title: String, - /// Disable hash verification - pub disable_hash_verification: u8, - /// Disable disc encryption and H3 hash table loading and verification - pub disable_disc_enc: u8, + pub game_title: [u8; 64], + /// If 1, disc omits partition hashes + pub no_partition_hashes: u8, + /// If 1, disc omits partition encryption + pub no_partition_encryption: u8, + /// Padding + _pad2: [u8; 926], } -fn from_c_str(bytes: &[u8]) -> io::Result { - CStr::from_bytes_until_nul(bytes) - .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))? - .to_str() - .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) - .map(|s| s.to_string()) -} +static_assert!(size_of::() == 0x400); -impl FromReader for Header { - type Args<'a> = (); +impl DiscHeader { + /// Game ID as a string. + pub fn game_id_str(&self) -> &str { from_utf8(&self.game_id).unwrap_or("[invalid]") } - const STATIC_SIZE: usize = 0x400; - - fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result - where R: Read + ?Sized { - let game_id = <[u8; 6]>::from_reader(reader)?; - let disc_num = u8::from_reader(reader)?; - let disc_version = u8::from_reader(reader)?; - let audio_streaming = u8::from_reader(reader)?; - let audio_stream_buf_size = u8::from_reader(reader)?; - skip_bytes::<14, _>(reader)?; // padding - let wii_magic = u32::from_reader(reader)?; - let gcn_magic = u32::from_reader(reader)?; - let game_title = from_c_str(&<[u8; 64]>::from_reader(reader)?)?; - let disable_hash_verification = u8::from_reader(reader)?; - let disable_disc_enc = u8::from_reader(reader)?; - skip_bytes::<926, _>(reader)?; // padding - Ok(Self { - game_id, - disc_num, - disc_version, - audio_streaming, - audio_stream_buf_size, - wii_magic, - gcn_magic, - game_title, - disable_hash_verification, - disable_disc_enc, - }) + /// Game title as a string. + pub fn game_title_str(&self) -> &str { + CStr::from_bytes_until_nul(&self.game_title) + .ok() + .and_then(|c| c.to_str().ok()) + .unwrap_or("[invalid]") } + + /// Whether this is a GameCube disc. + pub fn is_gamecube(&self) -> bool { self.gcn_magic.get() == 0xC2339F3D } + + /// Whether this is a Wii disc. + pub fn is_wii(&self) -> bool { self.wii_magic.get() == 0x5D1C9EA3 } } /// Partition header -#[derive(Clone, Debug, PartialEq)] +#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)] +#[repr(C, align(4))] pub struct PartitionHeader { /// Debug monitor offset - pub debug_mon_off: u32, + pub debug_mon_off: U32, /// Debug monitor load address - pub debug_load_addr: u32, + pub debug_load_addr: U32, + /// Padding + _pad1: [u8; 0x18], /// Offset to main DOL (Wii: >> 2) - pub dol_off: u32, + pub dol_off: U32, /// Offset to file system table (Wii: >> 2) - pub fst_off: u32, - /// File system size - pub fst_sz: u32, - /// File system max size - pub fst_max_sz: u32, + pub fst_off: U32, + /// File system size (Wii: >> 2) + pub fst_sz: U32, + /// File system max size (Wii: >> 2) + pub fst_max_sz: U32, /// File system table load address - pub fst_memory_address: u32, + pub fst_memory_address: U32, /// User position - pub user_position: u32, + pub user_position: U32, /// User size - pub user_sz: u32, + pub user_sz: U32, + /// Padding + _pad2: [u8; 4], } -impl FromReader for PartitionHeader { - type Args<'a> = (); +static_assert!(size_of::() == 0x40); - const STATIC_SIZE: usize = struct_size([ - u32::STATIC_SIZE, // debug_mon_off - u32::STATIC_SIZE, // debug_load_addr - 0x18, // padding - u32::STATIC_SIZE, // dol_off - u32::STATIC_SIZE, // fst_off - u32::STATIC_SIZE, // fst_sz - u32::STATIC_SIZE, // fst_max_sz - u32::STATIC_SIZE, // fst_memory_address - u32::STATIC_SIZE, // user_position - u32::STATIC_SIZE, // user_sz - 4, // padding - ]); +impl PartitionHeader { + pub fn dol_off(&self, is_wii: bool) -> u64 { + if is_wii { + self.dol_off.get() as u64 * 4 + } else { + self.dol_off.get() as u64 + } + } - fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result - where R: Read + ?Sized { - let debug_mon_off = u32::from_reader(reader)?; - let debug_load_addr = u32::from_reader(reader)?; - skip_bytes::<0x18, _>(reader)?; // padding - let dol_off = u32::from_reader(reader)?; - let fst_off = u32::from_reader(reader)?; - let fst_sz = u32::from_reader(reader)?; - let fst_max_sz = u32::from_reader(reader)?; - let fst_memory_address = u32::from_reader(reader)?; - let user_position = u32::from_reader(reader)?; - let user_sz = u32::from_reader(reader)?; - skip_bytes::<4, _>(reader)?; // padding - Ok(Self { - debug_mon_off, - debug_load_addr, - dol_off, - fst_off, - fst_sz, - fst_max_sz, - fst_memory_address, - user_position, - user_sz, - }) + pub fn fst_off(&self, is_wii: bool) -> u64 { + if is_wii { + self.fst_off.get() as u64 * 4 + } else { + self.fst_off.get() as u64 + } + } + + pub fn fst_sz(&self, is_wii: bool) -> u64 { + if is_wii { + self.fst_sz.get() as u64 * 4 + } else { + self.fst_sz.get() as u64 + } + } + + pub fn fst_max_sz(&self, is_wii: bool) -> u64 { + if is_wii { + self.fst_max_sz.get() as u64 * 4 + } else { + self.fst_max_sz.get() as u64 + } } } -#[derive(Debug, PartialEq, Clone)] +/// Apploader header +#[derive(Debug, PartialEq, Clone, FromBytes, FromZeroes, AsBytes)] +#[repr(C, align(4))] pub struct AppLoaderHeader { - pub date: String, - pub entry_point: u32, - pub size: u32, - pub trailer_size: u32, + /// Apploader build date + pub date: [u8; 16], + /// Entry point + pub entry_point: U32, + /// Apploader size + pub size: U32, + /// Apploader trailer size + pub trailer_size: U32, + /// Padding + _pad: [u8; 4], } -impl FromReader for AppLoaderHeader { - type Args<'a> = (); - - const STATIC_SIZE: usize = struct_size([ - 16, // date - u32::STATIC_SIZE, // entry_point - u32::STATIC_SIZE, // size - u32::STATIC_SIZE, // trailer_size - 4, // padding - ]); - - fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result - where R: Read + ?Sized { - let date = from_c_str(&<[u8; 16]>::from_reader(reader)?)?; - let entry_point = u32::from_reader(reader)?; - let size = u32::from_reader(reader)?; - let trailer_size = u32::from_reader(reader)?; - skip_bytes::<4, _>(reader)?; // padding - Ok(Self { date, entry_point, size, trailer_size }) +impl AppLoaderHeader { + /// Apploader build date as a string + pub fn date_str(&self) -> Option<&str> { + CStr::from_bytes_until_nul(&self.date).ok().and_then(|c| c.to_str().ok()) } } +/// Maximum number of text sections in a DOL pub const DOL_MAX_TEXT_SECTIONS: usize = 7; +/// Maximum number of data sections in a DOL pub const DOL_MAX_DATA_SECTIONS: usize = 11; -#[derive(Debug, Clone)] +/// DOL header +#[derive(Debug, Clone, FromBytes, FromZeroes)] pub struct DolHeader { - pub text_offs: [u32; DOL_MAX_TEXT_SECTIONS], - pub data_offs: [u32; DOL_MAX_DATA_SECTIONS], - pub text_addrs: [u32; DOL_MAX_TEXT_SECTIONS], - pub data_addrs: [u32; DOL_MAX_DATA_SECTIONS], - pub text_sizes: [u32; DOL_MAX_TEXT_SECTIONS], - pub data_sizes: [u32; DOL_MAX_DATA_SECTIONS], - pub bss_addr: u32, - pub bss_size: u32, - pub entry_point: u32, + /// Text section offsets + pub text_offs: [U32; DOL_MAX_TEXT_SECTIONS], + /// Data section offsets + pub data_offs: [U32; DOL_MAX_DATA_SECTIONS], + /// Text section addresses + pub text_addrs: [U32; DOL_MAX_TEXT_SECTIONS], + /// Data section addresses + pub data_addrs: [U32; DOL_MAX_DATA_SECTIONS], + /// Text section sizes + pub text_sizes: [U32; DOL_MAX_TEXT_SECTIONS], + /// Data section sizes + pub data_sizes: [U32; DOL_MAX_DATA_SECTIONS], + /// BSS address + pub bss_addr: U32, + /// BSS size + pub bss_size: U32, + /// Entry point + pub entry_point: U32, + /// Padding + _pad: [u8; 0x1C], } -impl FromReader for DolHeader { - type Args<'a> = (); - - const STATIC_SIZE: usize = 0x100; - - fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result - where R: Read + ?Sized { - let result = Self { - text_offs: <_>::from_reader(reader)?, - data_offs: <_>::from_reader(reader)?, - text_addrs: <_>::from_reader(reader)?, - data_addrs: <_>::from_reader(reader)?, - text_sizes: <_>::from_reader(reader)?, - data_sizes: <_>::from_reader(reader)?, - bss_addr: <_>::from_reader(reader)?, - bss_size: <_>::from_reader(reader)?, - entry_point: <_>::from_reader(reader)?, - }; - skip_bytes::<0x1C, _>(reader)?; // padding - Ok(result) - } -} +static_assert!(size_of::() == 0x100); +/// Partition type #[derive(Debug, PartialEq, Eq, Copy, Clone)] -pub enum PartitionType { +pub enum PartitionKind { Data, Update, Channel, + Other(u32), } -pub(crate) const SECTOR_SIZE: usize = 0x8000; +impl Display for PartitionKind { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + Self::Data => write!(f, "Data"), + Self::Update => write!(f, "Update"), + Self::Channel => write!(f, "Channel"), + Self::Other(v) => { + let bytes = v.to_be_bytes(); + write!(f, "Other ({:08X}, {})", v, String::from_utf8_lossy(&bytes)) + } + } + } +} + +impl PartitionKind { + /// Returns the directory name for the partition kind. + pub fn dir_name(&self) -> Cow { + match self { + Self::Data => Cow::Borrowed("DATA"), + Self::Update => Cow::Borrowed("UPDATE"), + Self::Channel => Cow::Borrowed("CHANNEL"), + Self::Other(v) => { + let bytes = v.to_be_bytes(); + Cow::Owned(format!("P-{}", String::from_utf8_lossy(&bytes))) + } + } + } +} + +impl From for PartitionKind { + fn from(v: u32) -> Self { + match v { + 0 => Self::Data, + 1 => Self::Update, + 2 => Self::Channel, + v => Self::Other(v), + } + } +} + +/// Information about a GameCube or Wii disc partition. +#[derive(Debug, Clone)] +pub struct PartitionInfo { + /// Partition group index + pub group_index: u32, + /// Partition index within the group + pub part_index: u32, + /// Partition offset within disc + pub part_offset: u64, + /// Partition kind + pub kind: PartitionKind, + /// Data offset within partition + pub data_offset: u64, + /// Data size + pub data_size: u64, + /// Raw Wii partition header + pub header: Option, + /// Lagged Fibonacci generator seed (for junk data) + pub lfg_seed: [u8; 4], + // /// Junk data start offset + // pub junk_start: u64, +} /// Contains a disc's header & partition information. pub trait DiscBase: Send + Sync { /// Retrieves the disc's header. - fn get_header(&self) -> &Header; + fn header(&self) -> &DiscHeader; - /// Opens a new partition read stream for the first data partition. + /// A list of partitions on the disc. + fn partitions(&self) -> Vec; + + /// Opens a new, decrypted partition read stream for the specified partition index. /// /// `validate_hashes`: Validate Wii disc hashes while reading (slow!) - /// - /// # Examples - /// - /// Basic usage: - /// ```no_run - /// use nod::{ - /// disc::new_disc_base, - /// io::{new_disc_io, DiscIOOptions}, - /// }; - /// - /// # fn main() -> nod::Result<()> { - /// let options = DiscIOOptions::default(); - /// let mut disc_io = new_disc_io("path/to/file.iso".as_ref(), &options)?; - /// let disc_base = new_disc_base(disc_io.as_mut())?; - /// let mut partition = disc_base.get_data_partition(disc_io.as_mut(), false)?; - /// # Ok(()) - /// # } - /// ``` - fn get_data_partition<'a>( + fn open_partition<'a>( &self, - disc_io: &'a mut dyn DiscIO, - validate_hashes: bool, - ) -> Result>; + disc_io: &'a dyn DiscIO, + index: usize, + options: &OpenOptions, + ) -> Result>; /// Opens a new partition read stream for the first partition matching /// the specified type. /// /// `validate_hashes`: Validate Wii disc hashes while reading (slow!) - fn get_partition<'a>( + fn open_partition_kind<'a>( &self, - disc_io: &'a mut dyn DiscIO, - part_type: PartitionType, - validate_hashes: bool, - ) -> Result>; + disc_io: &'a dyn DiscIO, + part_type: PartitionKind, + options: &OpenOptions, + ) -> Result>; + + /// The disc's size in bytes, or an estimate if not stored by the format. + fn disc_size(&self) -> u64; } /// Creates a new [`DiscBase`] instance. -/// -/// # Examples -/// -/// Basic usage: -/// ```no_run -/// use nod::{ -/// disc::new_disc_base, -/// io::{new_disc_io, DiscIOOptions}, -/// }; -/// -/// # fn main() -> nod::Result<()> { -/// let options = DiscIOOptions::default(); -/// let mut disc_io = new_disc_io("path/to/file.iso".as_ref(), &options)?; -/// let disc_base = new_disc_base(disc_io.as_mut())?; -/// disc_base.get_header(); -/// # Ok(()) -/// # } -/// ``` -pub fn new_disc_base(disc_io: &mut dyn DiscIO) -> Result> { - let mut stream = disc_io.begin_read_stream(0).context("Opening disc stream")?; - let header_bytes = - <[u8; Header::STATIC_SIZE]>::from_reader(&mut stream).context("Reading disc header")?; - let header = - Header::from_reader(&mut header_bytes.as_slice()).context("Parsing disc header")?; - if header.wii_magic == 0x5D1C9EA3 { - Ok(Box::from(DiscWii::new(stream.as_mut(), header)?)) - } else if header.gcn_magic == 0xC2339F3D { - Ok(Box::from(DiscGCN::new(header)?)) +pub fn new(disc_io: &mut dyn DiscIO) -> Result> { + let disc_size = disc_io.disc_size(); + let mut stream = disc_io.open()?; + let header: DiscHeader = read_from(stream.as_mut()).context("Reading disc header")?; + if header.is_wii() { + Ok(Box::new(DiscWii::new(stream.as_mut(), header, disc_size)?)) + } else if header.is_gamecube() { + Ok(Box::new(DiscGCN::new(stream.as_mut(), header, disc_size)?)) } else { - Err(Error::DiscFormat(format!("Invalid GC/Wii magic: {:#010X}", header.wii_magic))) + Err(Error::DiscFormat(format!( + "Invalid GC/Wii magic: {:#010X}/{:#010X}", + header.gcn_magic.get(), + header.wii_magic.get() + ))) } } /// An open read stream for a disc partition. -pub trait PartReadStream: ReadStream { +pub trait PartitionBase: ReadStream { + /// Reads the partition header and file system table. + fn meta(&mut self) -> Result>; + /// Seeks the read stream to the specified file system node /// and returns a windowed stream. /// @@ -315,22 +336,17 @@ pub trait PartReadStream: ReadStream { /// ```no_run /// use std::io::Read; /// - /// use nod::{ - /// disc::{new_disc_base, PartHeader}, - /// fst::NodeType, - /// io::{new_disc_io, DiscIOOptions}, - /// }; + /// use nod::{Disc, PartitionKind}; /// /// fn main() -> nod::Result<()> { - /// let options = DiscIOOptions::default(); - /// let mut disc_io = new_disc_io("path/to/file.iso".as_ref(), &options)?; - /// let disc_base = new_disc_base(disc_io.as_mut())?; - /// let mut partition = disc_base.get_data_partition(disc_io.as_mut(), false)?; - /// let header = partition.read_header()?; - /// if let Some(NodeType::File(node)) = header.find_node("/MP3/Worlds.txt") { + /// let disc = Disc::new("path/to/file.iso")?; + /// let mut partition = disc.open_partition_kind(PartitionKind::Data)?; + /// let meta = partition.meta()?; + /// let fst = meta.fst()?; + /// if let Some((_, node)) = fst.find("/MP3/Worlds.txt") { /// let mut s = String::new(); /// partition - /// .begin_file_stream(node) + /// .open_file(node) /// .expect("Failed to open file stream") /// .read_to_string(&mut s) /// .expect("Failed to read file"); @@ -339,10 +355,7 @@ pub trait PartReadStream: ReadStream { /// Ok(()) /// } /// ``` - fn begin_file_stream(&mut self, node: &Node) -> io::Result; - - /// Reads the partition header and file system table. - fn read_header(&mut self) -> Result>; + fn open_file(&mut self, node: &Node) -> io::Result; /// The ideal size for buffered reads from this partition. /// GameCube discs have a data block size of 0x8000, @@ -350,64 +363,60 @@ pub trait PartReadStream: ReadStream { fn ideal_buffer_size(&self) -> usize; } -/// Disc partition header with file system table. -pub trait PartHeader: Debug + Send + Sync { - /// The root node for the filesystem. - fn root_node(&self) -> &NodeType; - - /// Finds a particular file or directory by path. - /// - /// # Examples - /// - /// Basic usage: - /// ```no_run - /// use nod::{ - /// disc::{new_disc_base, PartHeader}, - /// fst::NodeType, - /// io::{new_disc_io, DiscIOOptions}, - /// }; - /// - /// fn main() -> nod::Result<()> { - /// let options = DiscIOOptions::default(); - /// let mut disc_io = new_disc_io("path/to/file.iso".as_ref(), &options)?; - /// let disc_base = new_disc_base(disc_io.as_mut())?; - /// let mut partition = disc_base.get_data_partition(disc_io.as_mut(), false)?; - /// let header = partition.read_header()?; - /// if let Some(NodeType::File(node)) = header.find_node("/MP1/Metroid1.pak") { - /// println!("{}", node.name); - /// } - /// if let Some(NodeType::Directory(node, children)) = header.find_node("/MP1") { - /// println!("Number of files: {}", children.len()); - /// } - /// Ok(()) - /// } - /// ``` - fn find_node(&self, path: &str) -> Option<&NodeType>; +/// Size of the disc header and partition header (boot.bin) +pub const BOOT_SIZE: usize = size_of::() + size_of::(); +/// Size of the debug and region information (bi2.bin) +pub const BI2_SIZE: usize = 0x2000; +/// Disc partition metadata +#[derive(Clone, Debug)] +pub struct PartitionMeta { /// Disc and partition header (boot.bin) - fn boot_bytes(&self) -> &[u8]; - + pub raw_boot: [u8; BOOT_SIZE], /// Debug and region information (bi2.bin) - fn bi2_bytes(&self) -> &[u8]; - + pub raw_bi2: [u8; BI2_SIZE], /// Apploader (apploader.bin) - fn apploader_bytes(&self) -> &[u8]; - + pub raw_apploader: Vec, /// File system table (fst.bin) - fn fst_bytes(&self) -> &[u8]; - + pub raw_fst: Vec, /// Main binary (main.dol) - fn dol_bytes(&self) -> &[u8]; - - /// Disc header - fn disc_header(&self) -> &Header; - - /// Partition header - fn partition_header(&self) -> &PartitionHeader; - - /// Apploader header - fn apploader_header(&self) -> &AppLoaderHeader; - - /// DOL header - fn dol_header(&self) -> &DolHeader; + pub raw_dol: Vec, + /// Ticket (ticket.bin, Wii only) + pub raw_ticket: Option>, + /// TMD (tmd.bin, Wii only) + pub raw_tmd: Option>, + /// Certificate chain (cert.bin, Wii only) + pub raw_cert_chain: Option>, + /// H3 hash table (h3.bin, Wii only) + pub raw_h3_table: Option>, } + +impl PartitionMeta { + pub fn header(&self) -> &DiscHeader { + DiscHeader::ref_from(&self.raw_boot[..size_of::()]).unwrap() + } + + pub fn partition_header(&self) -> &PartitionHeader { + PartitionHeader::ref_from(&self.raw_boot[size_of::()..]).unwrap() + } + + pub fn apploader_header(&self) -> &AppLoaderHeader { + AppLoaderHeader::ref_from_prefix(&self.raw_apploader).unwrap() + } + + pub fn fst(&self) -> Result { Fst::new(&self.raw_fst) } + + pub fn dol_header(&self) -> &DolHeader { DolHeader::ref_from_prefix(&self.raw_dol).unwrap() } + + pub fn ticket(&self) -> Option<&Ticket> { + self.raw_ticket.as_ref().and_then(|v| Ticket::ref_from(v)) + } + + pub fn tmd_header(&self) -> Option<&TmdHeader> { + self.raw_tmd.as_ref().and_then(|v| TmdHeader::ref_from_prefix(v)) + } +} + +pub const MINI_DVD_SIZE: u64 = 1_459_978_240; +pub const SL_DVD_SIZE: u64 = 4_699_979_776; +pub const DL_DVD_SIZE: u64 = 8_511_160_320; diff --git a/src/disc/wii.rs b/src/disc/wii.rs index 7ed0542..2e490df 100644 --- a/src/disc/wii.rs +++ b/src/disc/wii.rs @@ -1,443 +1,442 @@ use std::{ io, io::{Read, Seek, SeekFrom}, + mem::size_of, }; -use aes::{ - cipher::{block_padding::NoPadding, BlockDecryptMut, KeyIvInit}, - Aes128, Block, -}; use sha1::{digest, Digest, Sha1}; +use zerocopy::{big_endian::*, AsBytes, FromBytes, FromZeroes}; use crate::{ array_ref, disc::{ - AppLoaderHeader, DiscBase, DiscIO, DolHeader, Header, PartHeader, PartReadStream, - PartitionHeader, PartitionType, SECTOR_SIZE, + gcn::read_part_header, DiscBase, DiscHeader, DiscIO, PartitionBase, PartitionInfo, + PartitionKind, PartitionMeta, DL_DVD_SIZE, MINI_DVD_SIZE, SECTOR_SIZE, SL_DVD_SIZE, }, - fst::{find_node, Node, NodeKind, NodeType}, - streams::{wrap_windowed, OwningWindowedReadStream, ReadStream, SharedWindowedReadStream}, + fst::{Node, NodeKind}, + io::{aes_decrypt, KeyBytes}, + static_assert, + streams::{wrap_windowed, ReadStream, SharedWindowedReadStream}, util::{ div_rem, - reader::{skip_bytes, struct_size, FromReader}, + reader::{read_from, read_vec}, }, - Error, Result, ResultContext, + Error, OpenOptions, PartitionHeader, Result, ResultContext, }; pub(crate) const HASHES_SIZE: usize = 0x400; pub(crate) const BLOCK_SIZE: usize = SECTOR_SIZE - HASHES_SIZE; // 0x7C00 -/// AES-128-CBC decryptor -type Aes128Cbc = cbc::Decryptor; - #[rustfmt::skip] -const COMMON_KEYS: [[u8; 16]; 2] = [ +const COMMON_KEYS: [KeyBytes; 2] = [ /* Normal */ [0xeb, 0xe4, 0x2a, 0x22, 0x5e, 0x85, 0x93, 0xe4, 0x48, 0xd9, 0xc5, 0x45, 0x73, 0x81, 0xaa, 0xf7], /* Korean */ [0x63, 0xb8, 0x2b, 0xb4, 0xf4, 0x61, 0x4e, 0x2e, 0x13, 0xf2, 0xfe, 0xfb, 0xba, 0x4c, 0x9b, 0x7e], ]; -#[derive(Debug, PartialEq, Eq, Copy, Clone)] -enum SigType { - Rsa4096, - Rsa2048, - EllipticalCurve, +#[derive(Debug, PartialEq, FromBytes, FromZeroes, AsBytes)] +#[repr(C, align(4))] +struct WiiPartEntry { + offset: U32, + kind: U32, } -impl FromReader for SigType { - type Args<'a> = (); +static_assert!(size_of::() == 8); - const STATIC_SIZE: usize = u32::STATIC_SIZE; - - fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result - where R: Read + ?Sized { - match u32::from_reader(reader)? { - 0x00010000 => Ok(SigType::Rsa4096), - 0x00010001 => Ok(SigType::Rsa2048), - 0x00010002 => Ok(SigType::EllipticalCurve), - _ => Err(io::Error::new(io::ErrorKind::InvalidData, "invalid signature type")), - } - } -} - -impl SigType { - fn size(self) -> usize { - match self { - SigType::Rsa4096 => 512, - SigType::Rsa2048 => 256, - SigType::EllipticalCurve => 64, - } - } -} - -#[derive(Debug, PartialEq, Eq, Copy, Clone)] -enum KeyType { - Rsa4096, - Rsa2048, -} - -impl FromReader for KeyType { - type Args<'a> = (); - - const STATIC_SIZE: usize = u32::STATIC_SIZE; - - fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result - where R: Read + ?Sized { - match u32::from_reader(reader)? { - 0x00000000 => Ok(KeyType::Rsa4096), - 0x00000001 => Ok(KeyType::Rsa2048), - _ => Err(io::Error::new(io::ErrorKind::InvalidData, "invalid key type")), - } - } -} - -impl KeyType { - fn size(self) -> usize { - match self { - KeyType::Rsa4096 => 512, - KeyType::Rsa2048 => 256, - } - } +impl WiiPartEntry { + fn offset(&self) -> u64 { (self.offset.get() as u64) << 2 } } #[derive(Debug, PartialEq)] -struct WiiPart { - // #[br(map = |x: u32| (x as u64) << 2)] - part_data_off: u64, - part_type: PartitionType, - // #[br(restore_position, args(part_data_off))] - part_header: WiiPartitionHeader, +pub(crate) struct WiiPartInfo { + pub(crate) group_idx: u32, + pub(crate) part_idx: u32, + pub(crate) offset: u64, + pub(crate) kind: PartitionKind, + pub(crate) header: WiiPartitionHeader, + pub(crate) junk_id: [u8; 4], + pub(crate) junk_start: u64, } -#[derive(Debug, PartialEq)] -struct WiiPartInfo { - // #[br(seek_before = SeekFrom::Start(0x40000))] - part_count: u32, - // #[br(map = |x: u32| (x as u64) << 2)] - part_info_off: u64, - // #[br(seek_before = SeekFrom::Start(part_info_off), count = part_count)] - parts: Vec, +const WII_PART_GROUP_OFF: u64 = 0x40000; + +#[derive(Debug, PartialEq, FromBytes, FromZeroes, AsBytes)] +#[repr(C, align(4))] +struct WiiPartGroup { + part_count: U32, + part_entry_off: U32, } -#[derive(Debug, PartialEq, Default)] -struct TicketTimeLimit { - enable_time_limit: u32, - time_limit: u32, +static_assert!(size_of::() == 8); + +impl WiiPartGroup { + fn part_entry_off(&self) -> u64 { (self.part_entry_off.get() as u64) << 2 } } -impl FromReader for TicketTimeLimit { - type Args<'a> = (); - - const STATIC_SIZE: usize = struct_size([ - u32::STATIC_SIZE, // enable_time_limit - u32::STATIC_SIZE, // time_limit - ]); - - fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result - where R: Read + ?Sized { - let enable_time_limit = u32::from_reader(reader)?; - let time_limit = u32::from_reader(reader)?; - Ok(TicketTimeLimit { enable_time_limit, time_limit }) - } +#[derive(Debug, Clone, PartialEq, FromBytes, FromZeroes, AsBytes)] +#[repr(C, align(4))] +pub struct SignedHeader { + /// Signature type, always 0x00010001 (RSA-2048) + pub sig_type: U32, + /// RSA-2048 signature + pub sig: [u8; 256], + _pad: [u8; 60], } -#[derive(Debug, PartialEq)] -struct Ticket { - sig_type: SigType, - sig: [u8; 256], - sig_issuer: [u8; 64], - ecdh: [u8; 60], - enc_key: [u8; 16], - ticket_id: [u8; 8], - console_id: [u8; 4], - title_id: [u8; 8], - ticket_version: u16, - permitted_titles_mask: u32, - permit_mask: u32, - title_export_allowed: u8, - common_key_idx: u8, - content_access_permissions: [u8; 64], - time_limits: [TicketTimeLimit; 8], +static_assert!(size_of::() == 0x140); + +#[derive(Debug, Clone, PartialEq, Default, FromBytes, FromZeroes, AsBytes)] +#[repr(C, align(4))] +pub struct TicketTimeLimit { + pub enable_time_limit: U32, + pub time_limit: U32, } -impl FromReader for Ticket { - type Args<'a> = (); +static_assert!(size_of::() == 8); - const STATIC_SIZE: usize = 0x2A4; - - fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result - where R: Read + ?Sized { - let sig_type = SigType::from_reader(reader)?; - let sig = <[u8; 256]>::from_reader(reader)?; - skip_bytes::<0x3C, _>(reader)?; - let sig_issuer = <[u8; 64]>::from_reader(reader)?; - let ecdh = <[u8; 60]>::from_reader(reader)?; - skip_bytes::<3, _>(reader)?; - let enc_key = <[u8; 16]>::from_reader(reader)?; - skip_bytes::<1, _>(reader)?; - let ticket_id = <[u8; 8]>::from_reader(reader)?; - let console_id = <[u8; 4]>::from_reader(reader)?; - let title_id = <[u8; 8]>::from_reader(reader)?; - skip_bytes::<2, _>(reader)?; - let ticket_version = u16::from_reader(reader)?; - let permitted_titles_mask = u32::from_reader(reader)?; - let permit_mask = u32::from_reader(reader)?; - let title_export_allowed = u8::from_reader(reader)?; - let common_key_idx = u8::from_reader(reader)?; - skip_bytes::<48, _>(reader)?; - let content_access_permissions = <[u8; 64]>::from_reader(reader)?; - let time_limits = [ - TicketTimeLimit::from_reader(reader)?, - TicketTimeLimit::from_reader(reader)?, - TicketTimeLimit::from_reader(reader)?, - TicketTimeLimit::from_reader(reader)?, - TicketTimeLimit::from_reader(reader)?, - TicketTimeLimit::from_reader(reader)?, - TicketTimeLimit::from_reader(reader)?, - TicketTimeLimit::from_reader(reader)?, - ]; - Ok(Ticket { - sig_type, - sig, - sig_issuer, - ecdh, - enc_key, - ticket_id, - console_id, - title_id, - ticket_version, - permitted_titles_mask, - permit_mask, - title_export_allowed, - common_key_idx, - content_access_permissions, - time_limits, - }) - } +#[derive(Debug, Clone, PartialEq, FromBytes, FromZeroes, AsBytes)] +#[repr(C, align(4))] +pub struct Ticket { + pub header: SignedHeader, + pub sig_issuer: [u8; 64], + pub ecdh: [u8; 60], + pub version: u8, + _pad1: U16, + pub title_key: KeyBytes, + _pad2: u8, + pub ticket_id: [u8; 8], + pub console_id: [u8; 4], + pub title_id: [u8; 8], + _pad3: U16, + pub ticket_title_version: U16, + pub permitted_titles_mask: U32, + pub permit_mask: U32, + pub title_export_allowed: u8, + pub common_key_idx: u8, + _pad4: [u8; 48], + pub content_access_permissions: [u8; 64], + _pad5: [u8; 2], + pub time_limits: [TicketTimeLimit; 8], } -#[derive(Debug, PartialEq)] -struct TmdContent { - id: u32, - index: u16, - content_type: u16, - size: u64, - hash: [u8; 20], +static_assert!(size_of::() == 0x2A4); + +#[derive(Debug, Clone, PartialEq, FromBytes, FromZeroes, AsBytes)] +#[repr(C, align(4))] +pub struct TmdHeader { + pub header: SignedHeader, + pub sig_issuer: [u8; 64], + pub version: u8, + pub ca_crl_version: u8, + pub signer_crl_version: u8, + pub is_vwii: u8, + pub ios_id: [u8; 8], + pub title_id: [u8; 8], + pub title_type: u32, + pub group_id: U16, + _pad1: [u8; 2], + pub region: U16, + pub ratings: KeyBytes, + _pad2: [u8; 12], + pub ipc_mask: [u8; 12], + _pad3: [u8; 18], + pub access_flags: U32, + pub title_version: U16, + pub num_contents: U16, + pub boot_idx: U16, + pub minor_version: U16, } -#[derive(Debug, PartialEq)] -struct Tmd { - sig_type: SigType, - // #[br(count = 256)] - sig: Vec, - // #[br(pad_before = 60, count = 64)] - sig_issuer: Vec, - version: u8, - ca_crl_version: u8, - signer_crl_version: u8, - // #[br(pad_before = 1)] - ios_id_major: u32, - ios_id_minor: u32, - title_id_major: u32, - title_id_minor: [u8; 4], - title_type: u32, - group_id: u16, - // #[br(pad_before = 62)] - access_flags: u32, - title_version: u16, - num_contents: u16, - // #[br(pad_after = 2)] - boot_idx: u16, - // #[br(count = num_contents)] - contents: Vec, +static_assert!(size_of::() == 0x1E4); + +pub const H3_TABLE_SIZE: usize = 0x18000; + +#[derive(Debug, Clone, PartialEq, FromBytes, FromZeroes, AsBytes)] +#[repr(C, align(4))] +pub struct WiiPartitionHeader { + pub ticket: Ticket, + tmd_size: U32, + tmd_off: U32, + cert_chain_size: U32, + cert_chain_off: U32, + h3_table_off: U32, + data_off: U32, + data_size: U32, } -#[derive(Debug, PartialEq)] -struct Certificate { - sig_type: SigType, - // #[br(count = sig_size(sig_type))] - sig: Vec, - // #[br(pad_before = 60, count = 64)] - issuer: Vec, - key_type: KeyType, - // #[br(count = 64)] - subject: Vec, - // #[br(count = key_size(key_type))] - key: Vec, - modulus: u32, - // #[br(pad_after = 52)] - pub_exp: u32, -} +static_assert!(size_of::() == 0x2C0); -#[derive(Debug, PartialEq)] -// #[br(import(partition_off: u64))] -struct WiiPartitionHeader { - // #[br(seek_before = SeekFrom::Start(partition_off))] - ticket: Ticket, - tmd_size: u32, - // #[br(map = |x: u32| ((x as u64) << 2) + partition_off)] - tmd_off: u64, - cert_chain_size: u32, - // #[br(map = |x: u32| ((x as u64) << 2) + partition_off)] - cert_chain_off: u64, - // #[br(map = |x: u32| ((x as u64) << 2) + partition_off)] - global_hash_table_off: u64, - // #[br(map = |x: u32| ((x as u64) << 2) + partition_off)] - data_off: u64, - // #[br(map = |x: u32| (x as u64) << 2)] - data_size: u64, +impl WiiPartitionHeader { + pub fn tmd_size(&self) -> u64 { self.tmd_size.get() as u64 } - // #[br(seek_before = SeekFrom::Start(tmd_off))] - tmd: Tmd, - // #[br(seek_before = SeekFrom::Start(cert_chain_off))] - ca_cert: Certificate, - tmd_cert: Certificate, - ticket_cert: Certificate, - // #[br(seek_before = SeekFrom::Start(global_hash_table_off), count = 0x18000)] - h3_data: Vec, -} + pub fn tmd_off(&self) -> u64 { (self.tmd_off.get() as u64) << 2 } -impl FromReader for WiiPartitionHeader { - type Args<'a> = u64; + pub fn cert_chain_size(&self) -> u64 { self.cert_chain_size.get() as u64 } - const STATIC_SIZE: usize = Ticket::STATIC_SIZE; + pub fn cert_chain_off(&self) -> u64 { (self.cert_chain_off.get() as u64) << 2 } - fn from_reader_args(reader: &mut R, args: Self::Args<'_>) -> io::Result - where R: Read + ?Sized { - todo!() - } + pub fn h3_table_off(&self) -> u64 { (self.h3_table_off.get() as u64) << 2 } + + pub fn h3_table_size(&self) -> u64 { H3_TABLE_SIZE as u64 } + + pub fn data_off(&self) -> u64 { (self.data_off.get() as u64) << 2 } + + pub fn data_size(&self) -> u64 { (self.data_size.get() as u64) << 2 } } pub(crate) struct DiscWii { - header: Header, - part_info: WiiPartInfo, + header: DiscHeader, + part_info: Vec, + disc_size: u64, } impl DiscWii { - pub(crate) fn new(mut stream: &mut dyn ReadStream, header: Header) -> Result { - let mut disc = DiscWii { header, part_info: todo!() }; // stream.read_be()? - disc.decrypt_partition_keys()?; - Ok(disc) + pub(crate) fn new( + stream: &mut dyn ReadStream, + header: DiscHeader, + disc_size: Option, + ) -> Result { + let part_info = read_partition_info(stream)?; + // Guess disc size if not provided + let disc_size = disc_size.unwrap_or_else(|| guess_disc_size(&part_info)); + Ok(Self { header, part_info, disc_size }) } } -impl DiscWii { - pub(crate) fn decrypt_partition_keys(&mut self) -> Result<()> { - for part in self.part_info.parts.as_mut_slice() { - let ticket = &mut part.part_header.ticket; - let mut iv: [u8; 16] = [0; 16]; - iv[..8].copy_from_slice(&ticket.title_id); - Aes128Cbc::new(&COMMON_KEYS[ticket.common_key_idx as usize].into(), &iv.into()) - .decrypt_padded_mut::(&mut ticket.enc_key)?; +pub(crate) fn read_partition_info(stream: &mut dyn ReadStream) -> Result> { + stream.seek(SeekFrom::Start(WII_PART_GROUP_OFF)).context("Seeking to partition groups")?; + let part_groups: [WiiPartGroup; 4] = read_from(stream).context("Reading partition groups")?; + let mut part_info = Vec::new(); + for (group_idx, group) in part_groups.iter().enumerate() { + let part_count = group.part_count.get(); + if part_count == 0 { + continue; + } + stream + .seek(SeekFrom::Start(group.part_entry_off())) + .with_context(|| format!("Seeking to partition group {group_idx}"))?; + let entries: Vec = read_vec(stream, part_count as usize) + .with_context(|| format!("Reading partition group {group_idx}"))?; + for (part_idx, entry) in entries.iter().enumerate() { + let offset = entry.offset(); + stream + .seek(SeekFrom::Start(offset)) + .with_context(|| format!("Seeking to partition data {group_idx}:{part_idx}"))?; + let mut header: WiiPartitionHeader = read_from(stream) + .with_context(|| format!("Reading partition header {group_idx}:{part_idx}"))?; + + // Decrypt title key + let mut iv: KeyBytes = [0; 16]; + iv[..8].copy_from_slice(&header.ticket.title_id); + let common_key = + COMMON_KEYS.get(header.ticket.common_key_idx as usize).ok_or(Error::DiscFormat( + format!("unknown common key index {}", header.ticket.common_key_idx), + ))?; + aes_decrypt(common_key, iv, &mut header.ticket.title_key); + + // Open partition stream and read junk data seed + let inner = stream + .new_window(offset + header.data_off(), header.data_size()) + .context("Wrapping partition stream")?; + let mut stream = PartitionWii { + header: header.clone(), + tmd: vec![], + cert_chain: vec![], + h3_table: vec![], + stream: Box::new(inner), + key: Some(header.ticket.title_key), + offset: 0, + cur_block: 0, + buf: [0; SECTOR_SIZE], + validate_hashes: false, + }; + let junk_id: [u8; 4] = read_from(&mut stream).context("Reading junk seed bytes")?; + stream + .seek(SeekFrom::Start(size_of::() as u64)) + .context("Seeking to partition header")?; + let part_header: PartitionHeader = + read_from(&mut stream).context("Reading partition header")?; + let junk_start = part_header.fst_off(true) + part_header.fst_sz(true); + + // log::debug!( + // "Partition: {:?} - {:?}: {:?}", + // offset + header.data_off(), + // header.data_size(), + // header.ticket.title_key + // ); + + part_info.push(WiiPartInfo { + group_idx: group_idx as u32, + part_idx: part_idx as u32, + offset, + kind: entry.kind.get().into(), + header, + junk_id, + junk_start, + }); } - Ok(()) } + Ok(part_info) +} + +pub(crate) fn guess_disc_size(part_info: &[WiiPartInfo]) -> u64 { + let max_offset = part_info + .iter() + .flat_map(|v| { + [ + v.offset + v.header.tmd_off() + v.header.tmd_size(), + v.offset + v.header.cert_chain_off() + v.header.cert_chain_size(), + v.offset + v.header.h3_table_off() + v.header.h3_table_size(), + v.offset + v.header.data_off() + v.header.data_size(), + ] + }) + .max() + .unwrap_or(0x50000); + if max_offset <= MINI_DVD_SIZE && !part_info.iter().any(|v| v.kind == PartitionKind::Data) { + // Datel disc + MINI_DVD_SIZE + } else if max_offset < SL_DVD_SIZE { + SL_DVD_SIZE + } else { + DL_DVD_SIZE + } +} + +fn open_partition<'a>( + part: &WiiPartInfo, + disc_io: &'a dyn DiscIO, + options: &OpenOptions, + header: &DiscHeader, +) -> Result> { + let data_off = part.offset + part.header.data_off(); + let has_crypto = header.no_partition_encryption == 0; + let mut base = disc_io.open()?; + + base.seek(SeekFrom::Start(part.offset + part.header.tmd_off())) + .context("Seeking to TMD offset")?; + let tmd: Vec = + read_vec(&mut base, part.header.tmd_size() as usize).context("Reading TMD")?; + + base.seek(SeekFrom::Start(part.offset + part.header.cert_chain_off())) + .context("Seeking to cert chain offset")?; + let cert_chain: Vec = read_vec(&mut base, part.header.cert_chain_size() as usize) + .context("Reading cert chain")?; + + base.seek(SeekFrom::Start(part.offset + part.header.h3_table_off())) + .context("Seeking to H3 table offset")?; + let h3_table: Vec = read_vec(&mut base, H3_TABLE_SIZE).context("Reading H3 table")?; + + let stream = wrap_windowed(base, data_off, part.header.data_size()).with_context(|| { + format!("Wrapping {}:{} partition stream", part.group_idx, part.part_idx) + })?; + Ok(Box::new(PartitionWii { + header: part.header.clone(), + tmd, + cert_chain, + h3_table, + stream: Box::new(stream), + key: has_crypto.then_some(part.header.ticket.title_key), + offset: 0, + cur_block: u32::MAX, + buf: [0; SECTOR_SIZE], + validate_hashes: options.validate_hashes && header.no_partition_hashes == 0, + })) } impl DiscBase for DiscWii { - fn get_header(&self) -> &Header { &self.header } + fn header(&self) -> &DiscHeader { &self.header } - fn get_data_partition<'a>( - &self, - disc_io: &'a mut dyn DiscIO, - validate_hashes: bool, - ) -> Result> { - let part = self - .part_info - .parts + fn partitions(&self) -> Vec { + self.part_info .iter() - .find(|v| v.part_type == PartitionType::Data) - .ok_or_else(|| Error::DiscFormat("Failed to locate data partition".to_string()))?; - let data_off = part.part_header.data_off; - let has_crypto = disc_io.has_wii_crypto(); - let base = disc_io - .begin_read_stream(data_off) - .map_err(|e| Error::Io("Opening data partition stream".to_string(), e))?; - let stream = wrap_windowed(base, data_off, part.part_header.data_size) - .context("Wrapping data partition stream")?; - let result = Box::new(WiiPartReadStream { - stream, - crypto: if has_crypto { Some(part.part_header.ticket.enc_key) } else { None }, - offset: 0, - cur_block: u32::MAX, - buf: [0; 0x8000], - validate_hashes, - }); - Ok(result) + .map(|v| PartitionInfo { + group_index: v.group_idx, + part_index: v.part_idx, + part_offset: v.offset, + kind: v.kind, + data_offset: v.header.data_off(), + data_size: v.header.data_size(), + header: Some(v.header.clone()), + lfg_seed: v.junk_id, + // junk_start: v.junk_start, + }) + .collect() } - fn get_partition<'a>( + fn open_partition<'a>( &self, - disc_io: &'a mut dyn DiscIO, - part_type: PartitionType, - validate_hashes: bool, - ) -> Result> { - let part = - self.part_info.parts.iter().find(|v| v.part_type == part_type).ok_or_else(|| { - Error::DiscFormat(format!("Failed to locate {:?} partition", part_type)) - })?; - let data_off = part.part_header.data_off; - let has_crypto = disc_io.has_wii_crypto(); - let base = disc_io - .begin_read_stream(data_off) - .with_context(|| format!("Opening {:?} partition stream", part_type))?; - let stream = wrap_windowed(base, data_off, part.part_header.data_size) - .with_context(|| format!("Wrapping {:?} partition stream", part_type))?; - let result = Box::new(WiiPartReadStream { - stream, - crypto: if has_crypto { Some(part.part_header.ticket.enc_key) } else { None }, - offset: 0, - cur_block: u32::MAX, - buf: [0; 0x8000], - validate_hashes, - }); - Ok(result) + disc_io: &'a dyn DiscIO, + index: usize, + options: &OpenOptions, + ) -> Result> { + let part = self.part_info.get(index).ok_or_else(|| { + Error::DiscFormat(format!("Failed to locate partition index {}", index)) + })?; + open_partition(part, disc_io, options, &self.header) } + + fn open_partition_kind<'a>( + &self, + disc_io: &'a dyn DiscIO, + part_type: PartitionKind, + options: &OpenOptions, + ) -> Result> { + let part = self.part_info.iter().find(|&v| v.kind == part_type).ok_or_else(|| { + Error::DiscFormat(format!("Failed to locate {:?} partition", part_type)) + })?; + open_partition(part, disc_io, options, &self.header) + } + + fn disc_size(&self) -> u64 { self.disc_size } } -struct WiiPartReadStream<'a> { - stream: OwningWindowedReadStream<'a>, - crypto: Option<[u8; 16]>, +struct PartitionWii<'a> { + header: WiiPartitionHeader, + tmd: Vec, + cert_chain: Vec, + h3_table: Vec, + + stream: Box, + key: Option, offset: u64, cur_block: u32, buf: [u8; SECTOR_SIZE], validate_hashes: bool, } -impl<'a> PartReadStream for WiiPartReadStream<'a> { - fn begin_file_stream(&mut self, node: &Node) -> io::Result { - assert_eq!(node.kind, NodeKind::File); - self.new_window((node.offset as u64) << 2, node.length as u64) +impl<'a> PartitionBase for PartitionWii<'a> { + fn meta(&mut self) -> Result> { + self.seek(SeekFrom::Start(0)).context("Seeking to partition header")?; + let mut meta = read_part_header(self, true)?; + meta.raw_ticket = Some(self.header.ticket.as_bytes().to_vec()); + meta.raw_tmd = Some(self.tmd.clone()); + meta.raw_cert_chain = Some(self.cert_chain.clone()); + meta.raw_h3_table = Some(self.h3_table.clone()); + Ok(meta) } - fn read_header(&mut self) -> Result> { - self.seek(SeekFrom::Start(0)).context("Seeking to partition header")?; - todo!() - // Ok(Box::from(self.read_be::()?)) + fn open_file(&mut self, node: &Node) -> io::Result { + assert_eq!(node.kind(), NodeKind::File); + self.new_window(node.offset(true), node.length(true)) } fn ideal_buffer_size(&self) -> usize { BLOCK_SIZE } } #[inline(always)] -fn as_digest(slice: &[u8; 20]) -> digest::Output { (*slice).into() } +pub(crate) fn as_digest(slice: &[u8; 20]) -> digest::Output { (*slice).into() } -fn decrypt_block(part: &mut WiiPartReadStream, cluster: u32) -> io::Result<()> { +fn decrypt_block(part: &mut PartitionWii, cluster: u32) -> io::Result<()> { part.stream.read_exact(&mut part.buf)?; - if let Some(key) = &part.crypto { + if let Some(key) = &part.key { // Fetch IV before decrypting header - let iv_bytes = array_ref![part.buf, 0x3d0, 16]; - let iv = Block::from(*iv_bytes); + let iv = *array_ref![part.buf, 0x3d0, 16]; // Don't need to decrypt header if we're not validating hashes if part.validate_hashes { - Aes128Cbc::new(key.into(), &Block::from([0; 16])) - .decrypt_padded_mut::(&mut part.buf[..HASHES_SIZE]) - .expect("Failed to decrypt header"); + aes_decrypt(key, [0; 16], &mut part.buf[..HASHES_SIZE]); } - Aes128Cbc::new(key.into(), &iv) - .decrypt_padded_mut::(&mut part.buf[HASHES_SIZE..]) - .expect("Failed to decrypt block"); + aes_decrypt(key, iv, &mut part.buf[HASHES_SIZE..]); } if part.validate_hashes { let (mut group, sub_group) = div_rem(cluster as usize, 8); @@ -449,7 +448,13 @@ fn decrypt_block(part: &mut WiiPartReadStream, cluster: u32) -> io::Result<()> { let expected = as_digest(array_ref![part.buf, i * 20, 20]); let output = hash.finalize(); if output != expected { - panic!("Invalid H0 hash! (block {:?}) {:x}\n\texpected {:x}", i, output, expected); + return Err(io::Error::new( + io::ErrorKind::InvalidData, + format!( + "Invalid H0 hash! (block {:?}) {:x}\n\texpected {:x}", + i, output, expected + ), + )); } } // H1 hash @@ -459,10 +464,13 @@ fn decrypt_block(part: &mut WiiPartReadStream, cluster: u32) -> io::Result<()> { let expected = as_digest(array_ref![part.buf, 0x280 + sub_group * 20, 20]); let output = hash.finalize(); if output != expected { - panic!( - "Invalid H1 hash! (subgroup {:?}) {:x}\n\texpected {:x}", - sub_group, output, expected - ); + return Err(io::Error::new( + io::ErrorKind::InvalidData, + format!( + "Invalid H1 hash! (subgroup {:?}) {:x}\n\texpected {:x}", + sub_group, output, expected + ), + )); } } // H2 hash @@ -472,17 +480,20 @@ fn decrypt_block(part: &mut WiiPartReadStream, cluster: u32) -> io::Result<()> { let expected = as_digest(array_ref![part.buf, 0x340 + group * 20, 20]); let output = hash.finalize(); if output != expected { - panic!( - "Invalid H2 hash! (group {:?}) {:x}\n\texpected {:x}", - group, output, expected - ); + return Err(io::Error::new( + io::ErrorKind::InvalidData, + format!( + "Invalid H2 hash! (group {:?}) {:x}\n\texpected {:x}", + group, output, expected + ), + )); } } } Ok(()) } -impl<'a> Read for WiiPartReadStream<'a> { +impl<'a> Read for PartitionWii<'a> { fn read(&mut self, buf: &mut [u8]) -> io::Result { let (block, block_offset) = div_rem(self.offset, BLOCK_SIZE as u64); let mut block = block as u32; @@ -521,16 +532,16 @@ fn to_block_size(v: u64) -> u64 { (v / SECTOR_SIZE as u64) * BLOCK_SIZE as u64 + (v % SECTOR_SIZE as u64) } -impl<'a> Seek for WiiPartReadStream<'a> { +impl<'a> Seek for PartitionWii<'a> { fn seek(&mut self, pos: SeekFrom) -> io::Result { self.offset = match pos { SeekFrom::Start(v) => v, - SeekFrom::End(v) => (self.stable_stream_len()? as i64 + v) as u64, - SeekFrom::Current(v) => (self.offset as i64 + v) as u64, + SeekFrom::End(v) => self.stable_stream_len()?.saturating_add_signed(v), + SeekFrom::Current(v) => self.offset.saturating_add_signed(v), }; - let block = (self.offset / BLOCK_SIZE as u64) as u32; - if block != self.cur_block { - self.stream.seek(SeekFrom::Start(block as u64 * SECTOR_SIZE as u64))?; + let block = self.offset / BLOCK_SIZE as u64; + if block as u32 != self.cur_block { + self.stream.seek(SeekFrom::Start(block * SECTOR_SIZE as u64))?; self.cur_block = u32::MAX; } Ok(self.offset) @@ -539,45 +550,10 @@ impl<'a> Seek for WiiPartReadStream<'a> { fn stream_position(&mut self) -> io::Result { Ok(self.offset) } } -impl<'a> ReadStream for WiiPartReadStream<'a> { +impl<'a> ReadStream for PartitionWii<'a> { fn stable_stream_len(&mut self) -> io::Result { Ok(to_block_size(self.stream.stable_stream_len()?)) } fn as_dyn(&mut self) -> &mut dyn ReadStream { self } } - -#[derive(Clone, Debug, PartialEq)] -pub(crate) struct WiiPartition { - header: Header, - // #[br(seek_before = SeekFrom::Start(0x400))] - part_header: PartitionHeader, - // bi2_header: BI2Header, - // #[br(seek_before = SeekFrom::Start((part_header.fst_off as u64) << 2))] - // #[br(parse_with = node_parser)] - root_node: NodeType, -} - -impl PartHeader for WiiPartition { - fn root_node(&self) -> &NodeType { &self.root_node } - - fn find_node(&self, path: &str) -> Option<&NodeType> { find_node(&self.root_node, path) } - - fn boot_bytes(&self) -> &[u8] { todo!() } - - fn bi2_bytes(&self) -> &[u8] { todo!() } - - fn apploader_bytes(&self) -> &[u8] { todo!() } - - fn fst_bytes(&self) -> &[u8] { todo!() } - - fn dol_bytes(&self) -> &[u8] { todo!() } - - fn disc_header(&self) -> &Header { todo!() } - - fn partition_header(&self) -> &PartitionHeader { todo!() } - - fn apploader_header(&self) -> &AppLoaderHeader { todo!() } - - fn dol_header(&self) -> &DolHeader { todo!() } -} diff --git a/src/fst.rs b/src/fst.rs index b4c9654..8f2a474 100644 --- a/src/fst.rs +++ b/src/fst.rs @@ -1,17 +1,11 @@ //! Disc file system types -use std::{ - ffi::CString, - io, - io::{Read, Seek, SeekFrom}, -}; +use std::{borrow::Cow, ffi::CStr, mem::size_of}; use encoding_rs::SHIFT_JIS; +use zerocopy::{big_endian::*, AsBytes, FromBytes, FromZeroes}; -use crate::{ - util::reader::{struct_size, FromReader, DYNAMIC_SIZE, U24}, - Result, ResultContext, -}; +use crate::{static_assert, Result}; /// File system node kind. #[derive(Clone, Debug, PartialEq)] @@ -20,180 +14,161 @@ pub enum NodeKind { File, /// Node is a directory. Directory, -} - -impl FromReader for NodeKind { - type Args<'a> = (); - - const STATIC_SIZE: usize = 1; - - fn from_reader_args(_reader: &mut R, _args: Self::Args<'_>) -> io::Result - where R: Read + ?Sized { - match u8::from_reader(_reader)? { - 0 => Ok(NodeKind::File), - 1 => Ok(NodeKind::Directory), - _ => Err(io::Error::new(io::ErrorKind::InvalidData, "invalid node kind")), - } - } + /// Invalid node kind. (Should not normally occur) + Invalid, } /// An individual file system node. -#[derive(Clone, Debug, PartialEq)] +#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)] +#[repr(C, align(4))] pub struct Node { + kind: u8, + // u24 big-endian + name_offset: [u8; 3], + offset: U32, + length: U32, +} + +static_assert!(size_of::() == 12); + +impl Node { /// File system node type. - pub kind: NodeKind, + pub fn kind(&self) -> NodeKind { + match self.kind { + 0 => NodeKind::File, + 1 => NodeKind::Directory, + _ => NodeKind::Invalid, + } + } + + /// Whether the node is a file. + pub fn is_file(&self) -> bool { self.kind == 0 } + + /// Whether the node is a directory. + pub fn is_dir(&self) -> bool { self.kind == 1 } /// Offset in the string table to the filename. - pub name_offset: u32, + pub fn name_offset(&self) -> u32 { + u32::from_be_bytes([0, self.name_offset[0], self.name_offset[1], self.name_offset[2]]) + } /// For files, this is the partition offset of the file data. (Wii: >> 2) /// - /// For directories, this is the children start offset in the FST. - pub offset: u32, + /// For directories, this is the parent node index in the FST. + pub fn offset(&self, is_wii: bool) -> u64 { + if is_wii && self.kind == 0 { + self.offset.get() as u64 * 4 + } else { + self.offset.get() as u64 + } + } - /// For files, this is the byte size of the file. + /// For files, this is the byte size of the file. (Wii: >> 2) /// - /// For directories, this is the children end offset in the FST. + /// For directories, this is the child end index in the FST. /// /// Number of child files and directories recursively is `length - offset`. - pub length: u32, - - /// The node name. - pub name: String, -} - -impl FromReader for Node { - type Args<'a> = (); - - const STATIC_SIZE: usize = struct_size([ - NodeKind::STATIC_SIZE, // type - U24::STATIC_SIZE, // name_offset - u32::STATIC_SIZE, // offset - u32::STATIC_SIZE, // length - ]); - - fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result - where R: Read + ?Sized { - let kind = NodeKind::from_reader(reader)?; - let name_offset = U24::from_reader(reader)?.0; - let offset = u32::from_reader(reader)?; - let length = u32::from_reader(reader)?; - Ok(Node { kind, offset, length, name_offset, name: Default::default() }) - } -} - -/// Contains a file system node, and if a directory, its children. -#[derive(Clone, Debug, PartialEq)] -pub enum NodeType { - /// A single file node. - File(Node), - /// A directory node with children. - Directory(Node, Vec), -} - -impl FromReader for NodeType { - type Args<'a> = &'a mut u32; - - const STATIC_SIZE: usize = DYNAMIC_SIZE; - - fn from_reader_args(reader: &mut R, idx: &mut u32) -> io::Result - where R: Read + ?Sized { - let node = Node::from_reader(reader)?; - *idx += 1; - Ok(if node.kind == NodeKind::Directory { - let mut children = Vec::with_capacity((node.length - *idx) as usize); - while *idx < node.length { - children.push(NodeType::from_reader_args(reader, idx)?); - } - NodeType::Directory(node, children) + pub fn length(&self, is_wii: bool) -> u64 { + if is_wii && self.kind == 0 { + self.length.get() as u64 * 4 } else { - NodeType::File(node) - }) - } -} - -fn read_node_name( - reader: &mut R, - string_base: u64, - node: &mut NodeType, - root: bool, -) -> io::Result<()> -where - R: Read + Seek + ?Sized, -{ - let mut decode_name = |v: &mut Node| -> io::Result<()> { - if !root { - let offset = string_base + v.name_offset as u64; - reader.seek(SeekFrom::Start(offset))?; - - let c_string = CString::from_reader(reader)?; - let (decoded, _, errors) = SHIFT_JIS.decode(c_string.as_bytes()); - if errors { - return Err(io::Error::new(io::ErrorKind::InvalidData, "invalid shift-jis")); - } - v.name = decoded.into_owned(); - } - Ok(()) - }; - match node { - NodeType::File(inner) => { - decode_name(inner)?; - } - NodeType::Directory(inner, children) => { - decode_name(inner)?; - for child in children { - read_node_name(reader, string_base, child, false)?; - } - } - } - Ok(()) -} - -pub(crate) fn read_fst(reader: &mut R) -> Result -where R: Read + Seek + ?Sized { - let mut node = NodeType::from_reader_args(reader, &mut 0).context("Parsing FST nodes")?; - let string_base = reader.stream_position().context("Reading FST end position")?; - read_node_name(reader, string_base, &mut node, true).context("Reading FST node names")?; - Ok(node) -} - -fn matches_name(node: &NodeType, name: &str) -> bool { - match node { - NodeType::File(v) => v.name.as_str().eq_ignore_ascii_case(name), - NodeType::Directory(v, _) => { - v.name.is_empty() /* root */ || v.name.as_str().eq_ignore_ascii_case(name) + self.length.get() as u64 } } } -pub(crate) fn find_node<'a>(mut node: &'a NodeType, path: &str) -> Option<&'a NodeType> { - let mut split = path.split('/'); - let mut current = split.next(); - while current.is_some() { - if matches_name(node, current.unwrap()) { - match node { - NodeType::File(_) => { - return if split.next().is_none() { Some(node) } else { None }; +/// A view into the file system tree (FST). +pub struct Fst<'a> { + pub nodes: &'a [Node], + pub string_table: &'a [u8], +} + +impl<'a> Fst<'a> { + /// Create a new FST view from a buffer. + pub fn new(buf: &'a [u8]) -> Result { + let Some(root_node) = Node::ref_from_prefix(buf) else { + return Err("FST root node not found"); + }; + // String table starts after the last node + let string_base = root_node.length(false) * size_of::() as u64; + if string_base >= buf.len() as u64 { + return Err("FST string table out of bounds"); + } + let (node_buf, string_table) = buf.split_at(string_base as usize); + let nodes = Node::slice_from(node_buf).unwrap(); + Ok(Self { nodes, string_table }) + } + + /// Iterate over the nodes in the FST. + pub fn iter(&self) -> FstIter { FstIter { fst: self, idx: 1 } } + + /// Get the name of a node. + pub fn get_name(&self, node: &Node) -> Result, String> { + let name_buf = self.string_table.get(node.name_offset() as usize..).ok_or_else(|| { + format!( + "FST: name offset {} out of bounds (string table size: {})", + node.name_offset(), + self.string_table.len() + ) + })?; + let c_string = CStr::from_bytes_until_nul(name_buf).map_err(|_| { + format!("FST: name at offset {} not null-terminated", node.name_offset()) + })?; + let (decoded, _, errors) = SHIFT_JIS.decode(c_string.to_bytes()); + if errors { + return Err(format!("FST: Failed to decode name at offset {}", node.name_offset())); + } + Ok(decoded) + } + + /// Finds a particular file or directory by path. + pub fn find(&self, path: &str) -> Option<(usize, &Node)> { + let mut split = path.trim_matches('/').split('/'); + let mut current = split.next()?; + let mut idx = 1; + let mut stop_at = None; + while let Some(node) = self.nodes.get(idx) { + if self.get_name(node).as_ref().map_or(false, |name| name.eq_ignore_ascii_case(current)) + { + if let Some(next) = split.next() { + current = next; + } else { + return Some((idx, node)); } - NodeType::Directory(v, c) => { - // Find child - if !v.name.is_empty() || current.unwrap().is_empty() { - current = split.next(); - } - if current.is_none() || current.unwrap().is_empty() { - return if split.next().is_none() { Some(node) } else { None }; - } - for x in c { - if matches_name(x, current.unwrap()) { - node = x; - break; - } - } + // Descend into directory + idx += 1; + stop_at = Some(node.length(false) as usize + idx); + } else if node.is_dir() { + // Skip directory + idx = node.length(false) as usize; + } else { + // Skip file + idx += 1; + } + if let Some(stop) = stop_at { + if idx >= stop { + break; } } - } else { - break; } + None + } +} + +/// Iterator over the nodes in an FST. +pub struct FstIter<'a> { + fst: &'a Fst<'a>, + idx: usize, +} + +impl<'a> Iterator for FstIter<'a> { + type Item = (usize, &'a Node, Result, String>); + + fn next(&mut self) -> Option { + let idx = self.idx; + let node = self.fst.nodes.get(idx)?; + let name = self.fst.get_name(node); + self.idx += 1; + Some((idx, node, name)) } - None } diff --git a/src/io/ciso.rs b/src/io/ciso.rs new file mode 100644 index 0000000..4eeae39 --- /dev/null +++ b/src/io/ciso.rs @@ -0,0 +1,267 @@ +use std::{ + cmp::min, + io, + io::{BufReader, Read, Seek, SeekFrom}, + mem::size_of, + path::Path, +}; + +use zerocopy::{little_endian::*, AsBytes, FromBytes, FromZeroes}; + +use crate::{ + disc::{gcn::DiscGCN, wii::DiscWii, DiscBase, DL_DVD_SIZE, SECTOR_SIZE}, + io::{nkit::NKitHeader, split::SplitFileReader, DiscIO, MagicBytes}, + static_assert, + util::{ + lfg::LaggedFibonacci, + reader::{read_box_slice, read_from}, + }, + DiscHeader, DiscMeta, Error, PartitionInfo, ReadStream, Result, ResultContext, +}; + +pub const CISO_MAGIC: MagicBytes = *b"CISO"; +pub const CISO_MAP_SIZE: usize = SECTOR_SIZE - 8; + +#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)] +#[repr(C, align(4))] +struct CISOHeader { + magic: MagicBytes, + // little endian + block_size: U32, + block_present: [u8; CISO_MAP_SIZE], +} + +static_assert!(size_of::() == SECTOR_SIZE); + +pub struct DiscIOCISO { + inner: SplitFileReader, + header: CISOHeader, + block_map: [u16; CISO_MAP_SIZE], + nkit_header: Option, + junk_blocks: Option>, + partitions: Vec, + disc_num: u8, +} + +impl DiscIOCISO { + pub fn new(filename: &Path) -> Result { + let mut inner = BufReader::new(SplitFileReader::new(filename)?); + + // Read header + let header: CISOHeader = read_from(&mut inner).context("Reading CISO header")?; + if header.magic != CISO_MAGIC { + return Err(Error::DiscFormat("Invalid CISO magic".to_string())); + } + + // Build block map + let mut block_map = [0u16; CISO_MAP_SIZE]; + let mut block = 0u16; + for (presence, out) in header.block_present.iter().zip(block_map.iter_mut()) { + if *presence == 1 { + *out = block; + block += 1; + } else { + *out = u16::MAX; + } + } + let file_size = SECTOR_SIZE as u64 + block as u64 * header.block_size.get() as u64; + if file_size > inner.get_ref().len() { + return Err(Error::DiscFormat(format!( + "CISO file size mismatch: expected at least {} bytes, got {}", + file_size, + inner.get_ref().len() + ))); + } + + // Read NKit header if present (after CISO data) + let nkit_header = if inner.get_ref().len() > file_size + 4 { + inner.seek(SeekFrom::Start(file_size)).context("Seeking to NKit header")?; + NKitHeader::try_read_from(&mut inner) + } else { + None + }; + + // Read junk data bitstream if present (after NKit header) + let junk_blocks = if nkit_header.is_some() { + let n = 1 + DL_DVD_SIZE / header.block_size.get() as u64 / 8; + Some(read_box_slice(&mut inner, n as usize).context("Reading NKit bitstream")?) + } else { + None + }; + + let (partitions, disc_num) = if junk_blocks.is_some() { + let mut stream: Box = Box::new(CISOReadStream { + inner: BufReader::new(inner.get_ref().clone()), + block_size: header.block_size.get(), + block_map, + cur_block: u16::MAX, + pos: 0, + junk_blocks: None, + partitions: vec![], + disc_num: 0, + }); + let header: DiscHeader = read_from(stream.as_mut()).context("Reading disc header")?; + let disc_num = header.disc_num; + let disc_base: Box = if header.is_wii() { + Box::new(DiscWii::new(stream.as_mut(), header, None)?) + } else if header.is_gamecube() { + Box::new(DiscGCN::new(stream.as_mut(), header, None)?) + } else { + return Err(Error::DiscFormat(format!( + "Invalid GC/Wii magic: {:#010X}/{:#010X}", + header.gcn_magic.get(), + header.wii_magic.get() + ))); + }; + (disc_base.partitions(), disc_num) + } else { + (vec![], 0) + }; + + // Reset reader + let mut inner = inner.into_inner(); + inner.reset(); + Ok(Self { inner, header, block_map, nkit_header, junk_blocks, partitions, disc_num }) + } +} + +impl DiscIO for DiscIOCISO { + fn open(&self) -> Result> { + Ok(Box::new(CISOReadStream { + inner: BufReader::new(self.inner.clone()), + block_size: self.header.block_size.get(), + block_map: self.block_map, + cur_block: u16::MAX, + pos: 0, + junk_blocks: self.junk_blocks.clone(), + partitions: self.partitions.clone(), + disc_num: self.disc_num, + })) + } + + fn meta(&self) -> Result { + Ok(self.nkit_header.as_ref().map(DiscMeta::from).unwrap_or_default()) + } + + fn disc_size(&self) -> Option { self.nkit_header.as_ref().and_then(|h| h.size) } +} + +struct CISOReadStream { + inner: BufReader, + block_size: u32, + block_map: [u16; CISO_MAP_SIZE], + cur_block: u16, + pos: u64, + + // Data for recreating junk data + junk_blocks: Option>, + partitions: Vec, + disc_num: u8, +} + +impl CISOReadStream { + fn read_junk_data(&mut self, buf: &mut [u8]) -> io::Result { + let Some(junk_blocks) = self.junk_blocks.as_deref() else { + return Ok(0); + }; + let block_size = self.block_size as u64; + let block = (self.pos / block_size) as u16; + if junk_blocks[(block / 8) as usize] & (1 << (7 - (block & 7))) == 0 { + return Ok(0); + } + let Some(partition) = self.partitions.iter().find(|p| { + let start = p.part_offset + p.data_offset; + start <= self.pos && self.pos < start + p.data_size + }) else { + log::warn!("No partition found for junk data at offset {:#x}", self.pos); + return Ok(0); + }; + let offset = self.pos - (partition.part_offset + partition.data_offset); + let to_read = min( + buf.len(), + // The LFG is only valid for a single sector + SECTOR_SIZE - (offset % SECTOR_SIZE as u64) as usize, + ); + let mut lfg = LaggedFibonacci::default(); + lfg.init_with_seed(partition.lfg_seed, self.disc_num, offset); + lfg.fill(&mut buf[..to_read]); + self.pos += to_read as u64; + Ok(to_read) + } +} + +impl Read for CISOReadStream { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + let block_size = self.block_size as u64; + let block = (self.pos / block_size) as u16; + let block_offset = self.pos & (block_size - 1); + if block != self.cur_block { + if block >= CISO_MAP_SIZE as u16 { + return Ok(0); + } + + // Find the block in the map + let phys_block = self.block_map[block as usize]; + if phys_block == u16::MAX { + // Try to recreate junk data + let read = self.read_junk_data(buf)?; + if read > 0 { + return Ok(read); + } + + // Otherwise, read zeroes + let to_read = min(buf.len(), (block_size - block_offset) as usize); + buf[..to_read].fill(0); + self.pos += to_read as u64; + return Ok(to_read); + } + + // Seek to the new block + let file_offset = + size_of::() as u64 + phys_block as u64 * block_size + block_offset; + self.inner.seek(SeekFrom::Start(file_offset))?; + self.cur_block = block; + } + + let to_read = min(buf.len(), (block_size - block_offset) as usize); + let read = self.inner.read(&mut buf[..to_read])?; + self.pos += read as u64; + Ok(read) + } +} + +impl Seek for CISOReadStream { + fn seek(&mut self, pos: SeekFrom) -> io::Result { + let new_pos = match pos { + SeekFrom::Start(v) => v, + SeekFrom::End(_) => { + return Err(io::Error::new( + io::ErrorKind::Unsupported, + "CISOReadStream: SeekFrom::End is not supported", + )); + } + SeekFrom::Current(v) => self.pos.saturating_add_signed(v), + }; + + let block_size = self.block_size as u64; + let new_block = (self.pos / block_size) as u16; + if new_block == self.cur_block { + // Seek within the same block + self.inner.seek(SeekFrom::Current(new_pos as i64 - self.pos as i64))?; + } else { + // Seek to a different block, handled by next read + self.cur_block = u16::MAX; + } + + self.pos = new_pos; + Ok(new_pos) + } +} + +impl ReadStream for CISOReadStream { + fn stable_stream_len(&mut self) -> io::Result { + Ok(self.block_size as u64 * CISO_MAP_SIZE as u64) + } + + fn as_dyn(&mut self) -> &mut dyn ReadStream { self } +} diff --git a/src/io/iso.rs b/src/io/iso.rs index 116fbf8..6b971fb 100644 --- a/src/io/iso.rs +++ b/src/io/iso.rs @@ -1,49 +1,25 @@ -use std::{ - fs::File, - io, - io::{Seek, SeekFrom}, - path::{Path, PathBuf}, +use std::{io::BufReader, path::Path}; + +use crate::{ + io::{split::SplitFileReader, DiscIO}, + streams::ReadStream, + Result, }; -use crate::{io::DiscIO, streams::ReadStream, Result}; - -pub(crate) struct DiscIOISO { - pub(crate) filename: PathBuf, +pub struct DiscIOISO { + pub inner: SplitFileReader, } impl DiscIOISO { - pub(crate) fn new(filename: &Path) -> Result { - Ok(DiscIOISO { filename: filename.to_owned() }) + pub fn new(filename: &Path) -> Result { + Ok(Self { inner: SplitFileReader::new(filename)? }) } } impl DiscIO for DiscIOISO { - fn begin_read_stream(&mut self, offset: u64) -> io::Result> { - let mut file = File::open(&*self.filename)?; - file.seek(SeekFrom::Start(offset))?; - Ok(Box::from(file)) + fn open(&self) -> Result> { + Ok(Box::new(BufReader::new(self.inner.clone()))) } -} -pub(crate) struct DiscIOISOStream -where T: ReadStream + Sized -{ - pub(crate) stream: T, -} - -impl DiscIOISOStream -where T: ReadStream + Sized -{ - pub(crate) fn new(stream: T) -> Result> { Ok(DiscIOISOStream { stream }) } -} - -impl DiscIO for DiscIOISOStream -where T: ReadStream + Sized + Send + Sync -{ - fn begin_read_stream<'a>(&'a mut self, offset: u64) -> io::Result> { - let size = self.stream.stable_stream_len()?; - let mut stream = self.stream.new_window(0, size)?; - stream.seek(SeekFrom::Start(offset))?; - Ok(Box::from(stream)) - } + fn disc_size(&self) -> Option { Some(self.inner.len()) } } diff --git a/src/io/mod.rs b/src/io/mod.rs index 542af09..5b80e64 100644 --- a/src/io/mod.rs +++ b/src/io/mod.rs @@ -1,52 +1,52 @@ -//! Disc file format related logic (ISO, NFS, etc) +//! Disc file format related logic (CISO, NFS, WBFS, WIA, etc.) -use std::{fs, io, path::Path}; +use std::{fs, fs::File, path::Path}; use crate::{ - io::{ - iso::{DiscIOISO, DiscIOISOStream}, - nfs::DiscIONFS, - wia::DiscIOWIA, - }, - streams::{ByteReadStream, ReadStream}, - Error, Result, + streams::ReadStream, util::reader::read_from, Error, OpenOptions, Result, ResultContext, }; +pub(crate) mod ciso; pub(crate) mod iso; pub(crate) mod nfs; +pub(crate) mod nkit; +pub(crate) mod split; +pub(crate) mod wbfs; pub(crate) mod wia; -#[derive(Default, Debug, Clone)] -pub struct DiscIOOptions { - /// Rebuild hashes for the disc image. - pub rebuild_hashes: bool, -} +/// SHA-1 hash bytes +pub(crate) type HashBytes = [u8; 20]; -/// Abstraction over supported disc file types. +/// AES key bytes +pub(crate) type KeyBytes = [u8; 16]; + +/// Magic bytes +pub(crate) type MagicBytes = [u8; 4]; + +/// Abstraction over supported disc file formats. pub trait DiscIO: Send + Sync { /// Opens a new read stream for the disc file(s). /// Generally does _not_ need to be used directly. - fn begin_read_stream(&mut self, offset: u64) -> io::Result>; + fn open(&self) -> Result>; - /// If false, the file format does not use standard Wii partition encryption. (e.g. NFS) - fn has_wii_crypto(&self) -> bool { true } + /// Returns extra metadata included in the disc file format, if any. + fn meta(&self) -> Result { Ok(DiscMeta::default()) } + + /// If None, the file format does not store the original disc size. (e.g. WBFS, NFS) + fn disc_size(&self) -> Option; +} + +/// Extra metadata included in some disc file formats. +#[derive(Debug, Clone, Default)] +pub struct DiscMeta { + pub crc32: Option, + pub md5: Option<[u8; 16]>, + pub sha1: Option<[u8; 20]>, + pub xxhash64: Option, } /// Creates a new [`DiscIO`] instance. -/// -/// # Examples -/// -/// Basic usage: -/// ```no_run -/// use nod::io::{new_disc_io, DiscIOOptions}; -/// -/// # fn main() -> nod::Result<()> { -/// let options = DiscIOOptions::default(); -/// let mut disc_io = new_disc_io("path/to/file.iso".as_ref(), &options)?; -/// # Ok(()) -/// # } -/// ``` -pub fn new_disc_io(filename: &Path, options: &DiscIOOptions) -> Result> { +pub fn open(filename: &Path, options: &OpenOptions) -> Result> { let path_result = fs::canonicalize(filename); if let Err(err) = path_result { return Err(Error::Io(format!("Failed to open {}", filename.display()), err)); @@ -59,66 +59,38 @@ pub fn new_disc_io(filename: &Path, options: &DiscIOOptions) -> Result Ok(Box::new(ciso::DiscIOCISO::new(path)?)), + nfs::NFS_MAGIC => match path.parent() { Some(parent) if parent.is_dir() => { - Ok(Box::from(DiscIONFS::new(path.parent().unwrap())?)) + Ok(Box::new(nfs::DiscIONFS::new(path.parent().unwrap(), options)?)) } _ => Err(Error::DiscFormat("Failed to locate NFS parent directory".to_string())), - } - } else if has_extension(path, "wia") || has_extension(path, "rvz") { - Ok(Box::from(DiscIOWIA::new(path, options)?)) - } else { - Err(Error::DiscFormat("Unknown file type".to_string())) + }, + wbfs::WBFS_MAGIC => Ok(Box::new(wbfs::DiscIOWBFS::new(path)?)), + wia::WIA_MAGIC | wia::RVZ_MAGIC => Ok(Box::new(wia::DiscIOWIA::new(path, options)?)), + _ => Ok(Box::new(iso::DiscIOISO::new(path)?)), } } -/// Creates a new [`DiscIO`] instance from a byte slice. -/// -/// # Examples -/// -/// Basic usage: -/// ```no_run -/// use nod::io::new_disc_io_from_buf; -/// -/// # fn main() -> nod::Result<()> { -/// # #[allow(non_upper_case_globals)] const buf: &[u8] = &[0u8; 0]; -/// let mut disc_io = new_disc_io_from_buf(buf)?; -/// # Ok(()) -/// # } -/// ``` -pub fn new_disc_io_from_buf(buf: &[u8]) -> Result> { - new_disc_io_from_stream(ByteReadStream { bytes: buf, position: 0 }) +/// Encrypts data in-place using AES-128-CBC with the given key and IV. +pub(crate) fn aes_encrypt(key: &KeyBytes, iv: KeyBytes, data: &mut [u8]) { + use aes::cipher::{block_padding::NoPadding, BlockEncryptMut, KeyIvInit}; + >::new(key.into(), &aes::Block::from(iv)) + .encrypt_padded_mut::(data, data.len()) + .unwrap(); // Safe: using NoPadding } -/// Creates a new [`DiscIO`] instance from an existing [`ReadStream`]. -/// -/// # Examples -/// -/// Basic usage: -/// ```no_run -/// use nod::{io::new_disc_io_from_stream, streams::ByteReadStream}; -/// -/// # fn main() -> nod::Result<()> { -/// # #[allow(non_upper_case_globals)] const buf: &[u8] = &[0u8; 0]; -/// let stream = ByteReadStream { bytes: buf, position: 0 }; -/// let mut disc_io = new_disc_io_from_stream(stream)?; -/// # Ok(()) -/// # } -/// ``` -pub fn new_disc_io_from_stream<'a, T: 'a + ReadStream + Sized + Send + Sync>( - stream: T, -) -> Result> { - Ok(Box::from(DiscIOISOStream::new(stream)?)) -} - -/// Helper function for checking a file extension. -#[inline(always)] -pub fn has_extension(filename: &Path, extension: &str) -> bool { - match filename.extension() { - Some(ext) => ext.eq_ignore_ascii_case(extension), - None => false, - } +/// Decrypts data in-place using AES-128-CBC with the given key and IV. +pub(crate) fn aes_decrypt(key: &KeyBytes, iv: KeyBytes, data: &mut [u8]) { + use aes::cipher::{block_padding::NoPadding, BlockDecryptMut, KeyIvInit}; + >::new(key.into(), &aes::Block::from(iv)) + .decrypt_padded_mut::(data) + .unwrap(); // Safe: using NoPadding } diff --git a/src/io/nfs.rs b/src/io/nfs.rs index 7700f53..e3edea6 100644 --- a/src/io/nfs.rs +++ b/src/io/nfs.rs @@ -2,342 +2,306 @@ use std::{ fs::File, io, io::{BufReader, Read, Seek, SeekFrom}, + mem::size_of, path::{Component, Path, PathBuf}, }; -use aes::{ - cipher::{block_padding::NoPadding, BlockDecryptMut, KeyIvInit}, - Aes128, -}; +use zerocopy::{big_endian::U32, AsBytes, FromBytes, FromZeroes}; use crate::{ - disc::SECTOR_SIZE, - io::DiscIO, + array_ref, + disc::{ + wii::{read_partition_info, HASHES_SIZE}, + SECTOR_SIZE, + }, + io::{aes_decrypt, aes_encrypt, split::SplitFileReader, DiscIO, KeyBytes, MagicBytes}, + static_assert, streams::ReadStream, - util::reader::{read_vec, struct_size, FromReader}, - Error, Result, ResultContext, + util::reader::read_from, + DiscHeader, Error, OpenOptions, Result, ResultContext, }; -type Aes128Cbc = cbc::Decryptor; +pub const NFS_MAGIC: MagicBytes = *b"EGGS"; +pub const NFS_END_MAGIC: MagicBytes = *b"SGGE"; -#[derive(Clone, Debug, PartialEq)] -pub(crate) struct LBARange { - pub(crate) start_block: u32, - pub(crate) num_blocks: u32, +#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)] +#[repr(C, align(4))] +pub struct LBARange { + pub start_sector: U32, + pub num_sectors: U32, } -impl FromReader for LBARange { - type Args<'a> = (); - - const STATIC_SIZE: usize = struct_size([ - u32::STATIC_SIZE, // start_block - u32::STATIC_SIZE, // num_blocks - ]); - - fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result - where R: Read + ?Sized { - Ok(LBARange { - start_block: u32::from_reader(reader)?, - num_blocks: u32::from_reader(reader)?, - }) - } +#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)] +#[repr(C, align(4))] +pub struct NFSHeader { + pub magic: MagicBytes, + pub version: U32, + pub unk1: U32, + pub unk2: U32, + pub num_lba_ranges: U32, + pub lba_ranges: [LBARange; 61], + pub end_magic: MagicBytes, } -type MagicBytes = [u8; 4]; - -#[derive(Clone, Debug, PartialEq)] -pub(crate) struct NFSHeader { - pub(crate) version: u32, - pub(crate) unk1: u32, - pub(crate) unk2: u32, - pub(crate) lba_ranges: Vec, -} - -impl FromReader for NFSHeader { - type Args<'a> = (); - - const STATIC_SIZE: usize = struct_size([ - MagicBytes::STATIC_SIZE, // magic - u32::STATIC_SIZE, // version - u32::STATIC_SIZE, // unk1 - u32::STATIC_SIZE, // unk2 - u32::STATIC_SIZE, // lba_range_count - LBARange::STATIC_SIZE * 61, // lba_ranges - MagicBytes::STATIC_SIZE, // end_magic - ]); - - fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result - where R: Read + ?Sized { - if MagicBytes::from_reader(reader)? != *b"EGGS" { - return Err(io::Error::new(io::ErrorKind::InvalidData, "Invalid NFS magic")); - } - let version = u32::from_reader(reader)?; - let unk1 = u32::from_reader(reader)?; - let unk2 = u32::from_reader(reader)?; - let lba_range_count = u32::from_reader(reader)?; - let mut lba_ranges = read_vec(reader, 61)?; - lba_ranges.truncate(lba_range_count as usize); - if MagicBytes::from_reader(reader)? != *b"SGGE" { - return Err(io::Error::new(io::ErrorKind::InvalidData, "Invalid NFS end magic")); - } - Ok(NFSHeader { version, unk1, unk2, lba_ranges }) - } -} - -#[derive(Clone, Copy, Debug, PartialEq)] -pub(crate) struct Fbo { - pub(crate) file: u32, - pub(crate) block: u32, - pub(crate) l_block: u32, - pub(crate) offset: u32, -} - -impl Default for Fbo { - fn default() -> Self { - Fbo { file: u32::MAX, block: u32::MAX, l_block: u32::MAX, offset: u32::MAX } - } -} +static_assert!(size_of::() == 0x200); impl NFSHeader { - pub(crate) fn calculate_num_files(&self) -> u32 { - let total_block_count = - self.lba_ranges.iter().fold(0u32, |acc, range| acc + range.num_blocks); - (((total_block_count as u64) * 0x8000u64 + (0x200u64 + 0xF9FFFFFu64)) / 0xFA00000u64) as u32 + pub fn validate(&self) -> Result<()> { + if self.magic != NFS_MAGIC { + return Err(Error::DiscFormat("Invalid NFS magic".to_string())); + } + if self.num_lba_ranges.get() > 61 { + return Err(Error::DiscFormat("Invalid NFS LBA range count".to_string())); + } + if self.end_magic != NFS_END_MAGIC { + return Err(Error::DiscFormat("Invalid NFS end magic".to_string())); + } + Ok(()) } - pub(crate) fn logical_to_fbo(&self, offset: u64) -> Fbo { - let block_div = (offset / 0x8000) as u32; - let block_off = (offset % 0x8000) as u32; - let mut block = u32::MAX; - let mut physical_block = 0u32; - for range in self.lba_ranges.iter() { - if block_div >= range.start_block && block_div - range.start_block < range.num_blocks { - block = physical_block + (block_div - range.start_block); - break; + pub fn lba_ranges(&self) -> &[LBARange] { + &self.lba_ranges[..self.num_lba_ranges.get() as usize] + } + + pub fn calculate_num_files(&self) -> u32 { + let sector_count = + self.lba_ranges().iter().fold(0u32, |acc, range| acc + range.num_sectors.get()); + (((sector_count as u64) * (SECTOR_SIZE as u64) + + (size_of::() as u64 + 0xF9FFFFFu64)) + / 0xFA00000u64) as u32 + } + + pub fn phys_sector(&self, sector: u32) -> u32 { + let mut cur_sector = 0u32; + for range in self.lba_ranges().iter() { + if sector >= range.start_sector.get() + && sector - range.start_sector.get() < range.num_sectors.get() + { + return cur_sector + (sector - range.start_sector.get()); } - physical_block += range.num_blocks; - } - if block == u32::MAX { - Fbo::default() - } else { - Fbo { file: block / 8000, block: block % 8000, l_block: block_div, offset: block_off } + cur_sector += range.num_sectors.get(); } + u32::MAX } } -pub(crate) struct DiscIONFS { - pub(crate) directory: PathBuf, - pub(crate) key: [u8; 16], - pub(crate) header: Option, +pub struct DiscIONFS { + pub inner: SplitFileReader, + pub header: NFSHeader, + pub raw_size: u64, + pub disc_size: u64, + pub key: KeyBytes, + pub encrypt: bool, } impl DiscIONFS { - pub(crate) fn new(directory: &Path) -> Result { - let mut disc_io = DiscIONFS { directory: directory.to_owned(), key: [0; 16], header: None }; - disc_io.validate_files()?; + pub fn new(directory: &Path, options: &OpenOptions) -> Result { + let mut disc_io = DiscIONFS { + inner: SplitFileReader::empty(), + header: NFSHeader::new_zeroed(), + raw_size: 0, + disc_size: 0, + key: [0; 16], + encrypt: options.rebuild_encryption, + }; + disc_io.load_files(directory)?; Ok(disc_io) } } -pub(crate) struct NFSReadStream<'a> { - disc_io: &'a DiscIONFS, - file: Option, - crypto: [u8; 16], - // Physical address - all UINT32_MAX indicates logical zero block - phys_addr: Fbo, - // Logical address - offset: u64, - // Active file stream and its offset as set in the system. - // Block is typically one ahead of the presently decrypted block. - cur_file: u32, - cur_block: u32, +pub struct NFSReadStream { + /// Underlying file reader + inner: SplitFileReader, + /// NFS file header + header: NFSHeader, + /// Inner disc header + disc_header: Option, + /// Estimated disc size + disc_size: u64, + /// Current offset + pos: u64, + /// Current sector + sector: u32, + /// Current decrypted sector buf: [u8; SECTOR_SIZE], + /// AES key + key: KeyBytes, + /// Wii partition info + part_info: Vec, } -impl<'a> NFSReadStream<'a> { - fn set_cur_file(&mut self, cur_file: u32) -> Result<()> { - if cur_file >= self.disc_io.header.as_ref().unwrap().calculate_num_files() { - return Err(Error::DiscFormat(format!("Out of bounds NFS file access: {}", cur_file))); - } - self.cur_file = cur_file; - self.cur_block = u32::MAX; - let path = self.disc_io.get_nfs(cur_file)?; - self.file = Option::from( - File::open(&path).with_context(|| format!("Opening file {}", path.display()))?, - ); - Ok(()) - } +struct PartitionInfo { + start_sector: u32, + end_sector: u32, + key: KeyBytes, +} - fn set_cur_block(&mut self, cur_block: u32) -> io::Result<()> { - self.cur_block = cur_block; - self.file - .as_ref() - .unwrap() - .seek(SeekFrom::Start(self.cur_block as u64 * SECTOR_SIZE as u64 + 0x200u64))?; - Ok(()) - } - - fn set_phys_addr(&mut self, phys_addr: Fbo) -> Result<()> { - // If we're just changing the offset, nothing else needs to be done - if self.phys_addr.file == phys_addr.file && self.phys_addr.block == phys_addr.block { - self.phys_addr.offset = phys_addr.offset; - return Ok(()); - } - self.phys_addr = phys_addr; - - // Set logical zero block - if phys_addr.file == u32::MAX { +impl NFSReadStream { + fn read_sector(&mut self, sector: u32) -> io::Result<()> { + // Calculate physical sector + let phys_sector = self.header.phys_sector(sector); + if phys_sector == u32::MAX { + // Logical zero sector self.buf.fill(0u8); return Ok(()); } - // Make necessary file and block current with system - if phys_addr.file != self.cur_file { - self.set_cur_file(phys_addr.file)?; - } - if phys_addr.block != self.cur_block { - self.set_cur_block(phys_addr.block) - .with_context(|| format!("Seeking to NFS block {}", phys_addr.block))?; - } - - // Read block, handling 0x200 overlap case - if phys_addr.block == 7999 { - self.file - .as_ref() - .unwrap() - .read_exact(&mut self.buf[..SECTOR_SIZE - 0x200]) - .context("Reading NFS block 7999 part 1")?; - self.set_cur_file(self.cur_file + 1)?; - self.file - .as_ref() - .unwrap() - .read_exact(&mut self.buf[SECTOR_SIZE - 0x200..]) - .context("Reading NFS block 7999 part 2")?; - self.cur_block = 0; - } else { - self.file - .as_ref() - .unwrap() - .read_exact(&mut self.buf) - .with_context(|| format!("Reading NFS block {}", phys_addr.block))?; - self.cur_block += 1; - } + // Read sector + let offset = size_of::() as u64 + phys_sector as u64 * SECTOR_SIZE as u64; + self.inner.seek(SeekFrom::Start(offset))?; + self.inner.read_exact(&mut self.buf)?; // Decrypt + let iv_bytes = sector.to_be_bytes(); #[rustfmt::skip] - let iv: [u8; 16] = [ + let iv: KeyBytes = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - (phys_addr.l_block & 0xFF) as u8, - ((phys_addr.l_block >> 8) & 0xFF) as u8, - ((phys_addr.l_block >> 16) & 0xFF) as u8, - ((phys_addr.l_block >> 24) & 0xFF) as u8, + iv_bytes[0], iv_bytes[1], iv_bytes[2], iv_bytes[3], ]; - Aes128Cbc::new(self.crypto.as_ref().into(), &iv.into()) - .decrypt_padded_mut::(&mut self.buf)?; + aes_decrypt(&self.key, iv, &mut self.buf); + + if sector == 0 { + if let Some(header) = &self.disc_header { + // Replace disc header in buffer + let header_bytes = header.as_bytes(); + self.buf[..header_bytes.len()].copy_from_slice(header_bytes); + } + } + + // Re-encrypt if needed + if let Some(part) = self + .part_info + .iter() + .find(|part| sector >= part.start_sector && sector < part.end_sector) + { + // Encrypt hashes + aes_encrypt(&part.key, [0u8; 16], &mut self.buf[..HASHES_SIZE]); + // Encrypt data using IV from H2 + aes_encrypt(&part.key, *array_ref![self.buf, 0x3d0, 16], &mut self.buf[HASHES_SIZE..]); + } Ok(()) } - - fn set_logical_addr(&mut self, addr: u64) -> Result<()> { - self.set_phys_addr(self.disc_io.header.as_ref().unwrap().logical_to_fbo(addr)) - } } -impl<'a> Read for NFSReadStream<'a> { +impl Read for NFSReadStream { fn read(&mut self, buf: &mut [u8]) -> io::Result { - let mut rem = buf.len(); - let mut read: usize = 0; - while rem > 0 { - let mut read_size = rem; - let block_offset: usize = - if self.phys_addr.offset == u32::MAX { 0 } else { self.phys_addr.offset as usize }; - if read_size + block_offset > SECTOR_SIZE { - read_size = SECTOR_SIZE - block_offset - } - buf[read..read + read_size] - .copy_from_slice(&self.buf[block_offset..block_offset + read_size]); - read += read_size; - rem -= read_size; - self.offset += read_size as u64; - self.set_logical_addr(self.offset).map_err(|e| match e { - Error::Io(s, e) => io::Error::new(e.kind(), s), - _ => io::Error::from(io::ErrorKind::Other), - })?; + let sector = (self.pos / SECTOR_SIZE as u64) as u32; + let sector_off = (self.pos % SECTOR_SIZE as u64) as usize; + if sector != self.sector { + self.read_sector(sector)?; + self.sector = sector; } + + let read = buf.len().min(SECTOR_SIZE - sector_off); + buf[..read].copy_from_slice(&self.buf[sector_off..sector_off + read]); + self.pos += read as u64; Ok(read) } } -impl<'a> Seek for NFSReadStream<'a> { +impl Seek for NFSReadStream { fn seek(&mut self, pos: SeekFrom) -> io::Result { - self.offset = match pos { + self.pos = match pos { SeekFrom::Start(v) => v, - SeekFrom::End(v) => (self.stable_stream_len()? as i64 + v) as u64, - SeekFrom::Current(v) => (self.offset as i64 + v) as u64, + SeekFrom::End(_) => { + return Err(io::Error::new( + io::ErrorKind::Unsupported, + "NFSReadStream: SeekFrom::End is not supported", + )); + } + SeekFrom::Current(v) => self.pos.saturating_add_signed(v), }; - self.set_logical_addr(self.offset).map_err(|v| match v { - Error::Io(_, v) => v, - _ => io::Error::from(io::ErrorKind::Other), - })?; - Ok(self.offset) + Ok(self.pos) } - fn stream_position(&mut self) -> io::Result { Ok(self.offset) } + fn stream_position(&mut self) -> io::Result { Ok(self.pos) } } -impl<'a> ReadStream for NFSReadStream<'a> { - fn stable_stream_len(&mut self) -> io::Result { todo!() } +impl ReadStream for NFSReadStream { + fn stable_stream_len(&mut self) -> io::Result { Ok(self.disc_size) } fn as_dyn(&mut self) -> &mut dyn ReadStream { self } } impl DiscIO for DiscIONFS { - fn begin_read_stream(&mut self, offset: u64) -> io::Result> { - Ok(Box::from(NFSReadStream { - disc_io: self, - file: None, - crypto: self.key, - phys_addr: Fbo::default(), - offset, - cur_file: u32::MAX, - cur_block: u32::MAX, + fn open(&self) -> Result> { + let mut stream = NFSReadStream { + inner: self.inner.clone(), + header: self.header.clone(), + disc_header: None, + disc_size: self.disc_size, + pos: 0, + sector: u32::MAX, buf: [0; SECTOR_SIZE], - })) + key: self.key, + part_info: vec![], + }; + let mut disc_header: DiscHeader = read_from(&mut stream).context("Reading disc header")?; + if !self.encrypt { + // If we're not re-encrypting, disable partition encryption in disc header + disc_header.no_partition_encryption = 1; + } + + // Read partition info so we can re-encrypt + if self.encrypt && disc_header.is_wii() { + for part in read_partition_info(&mut stream)? { + let start = part.offset + part.header.data_off(); + let end = start + part.header.data_size(); + if start % SECTOR_SIZE as u64 != 0 || end % SECTOR_SIZE as u64 != 0 { + return Err(Error::DiscFormat(format!( + "Partition start / end not aligned to sector size: {} / {}", + start, end + ))); + } + stream.part_info.push(PartitionInfo { + start_sector: (start / SECTOR_SIZE as u64) as u32, + end_sector: (end / SECTOR_SIZE as u64) as u32, + key: part.header.ticket.title_key, + }); + } + } + + stream.disc_header = Some(disc_header); + // Reset stream position + stream.pos = 0; + stream.sector = u32::MAX; + Ok(Box::new(stream)) } - fn has_wii_crypto(&self) -> bool { false } + fn disc_size(&self) -> Option { None } +} + +fn get_path

(directory: &Path, path: P) -> PathBuf +where P: AsRef { + let mut buf = directory.to_path_buf(); + for component in path.as_ref().components() { + match component { + Component::ParentDir => { + buf.pop(); + } + _ => buf.push(component), + } + } + buf +} + +fn get_nfs(directory: &Path, num: u32) -> Result { + let path = get_path(directory, format!("hif_{:06}.nfs", num)); + if path.exists() { + Ok(path) + } else { + Err(Error::DiscFormat(format!("Failed to locate {}", path.display()))) + } } impl DiscIONFS { - fn get_path

(&self, path: P) -> PathBuf - where P: AsRef { - let mut buf = self.directory.clone(); - for component in path.as_ref().components() { - match component { - Component::ParentDir => { - buf.pop(); - } - _ => buf.push(component), - } - } - buf - } - - fn get_nfs(&self, num: u32) -> Result { - let path = self.get_path(format!("hif_{:06}.nfs", num)); - if path.exists() { - Ok(path) - } else { - Err(Error::DiscFormat(format!("Failed to locate {}", path.display()))) - } - } - - pub(crate) fn validate_files(&mut self) -> Result<()> { + pub fn load_files(&mut self, directory: &Path) -> Result<()> { { // Load key file let primary_key_path = - self.get_path(["..", "code", "htk.bin"].iter().collect::()); - let secondary_key_path = self.get_path("htk.bin"); + get_path(directory, ["..", "code", "htk.bin"].iter().collect::()); + let secondary_key_path = get_path(directory, "htk.bin"); let mut key_path = primary_key_path.canonicalize(); if key_path.is_err() { key_path = secondary_key_path.canonicalize(); @@ -355,19 +319,47 @@ impl DiscIONFS { .read(&mut self.key) .map_err(|v| Error::Io(format!("Failed to read {}", resolved_path.display()), v))?; } + { // Load header from first file - let path = self.get_nfs(0)?; + let path = get_nfs(directory, 0)?; + self.inner.add(&path)?; + let mut file = BufReader::new( File::open(&path).with_context(|| format!("Opening file {}", path.display()))?, ); - let header = NFSHeader::from_reader(&mut file) + let header: NFSHeader = read_from(&mut file) .with_context(|| format!("Reading NFS header from file {}", path.display()))?; + header.validate()?; + // log::debug!("{:?}", header); + // Ensure remaining files exist for i in 1..header.calculate_num_files() { - self.get_nfs(i)?; + self.inner.add(&get_nfs(directory, i)?)?; } - self.header = Option::from(header); + + // Calculate sizes + let num_sectors = + header.lba_ranges().iter().map(|range| range.num_sectors.get()).sum::(); + let max_sector = header + .lba_ranges() + .iter() + .map(|range| range.start_sector.get() + range.num_sectors.get()) + .max() + .unwrap(); + let raw_size = size_of::() + (num_sectors as usize * SECTOR_SIZE); + let data_size = max_sector as usize * SECTOR_SIZE; + if raw_size > self.inner.len() as usize { + return Err(Error::DiscFormat(format!( + "NFS raw size mismatch: expected at least {}, got {}", + raw_size, + self.inner.len() + ))); + } + + self.header = header; + self.raw_size = raw_size as u64; + self.disc_size = data_size as u64; } Ok(()) } diff --git a/src/io/nkit.rs b/src/io/nkit.rs new file mode 100644 index 0000000..12e997d --- /dev/null +++ b/src/io/nkit.rs @@ -0,0 +1,146 @@ +use std::{ + io, + io::{Read, Seek, SeekFrom}, +}; + +use crate::{ + io::MagicBytes, + util::reader::{read_from, read_u16_be, read_u32_be, read_u64_be, read_vec}, + DiscMeta, +}; + +#[allow(unused)] +#[repr(u16)] +enum NKitHeaderFlags { + Size = 0x1, + Crc32 = 0x2, + Md5 = 0x4, + Sha1 = 0x8, + Xxhash64 = 0x10, + Key = 0x20, + Encrypted = 0x40, + ExtraData = 0x80, + IndexFile = 0x100, +} + +const NKIT_HEADER_V1_FLAGS: u16 = NKitHeaderFlags::Crc32 as u16 + | NKitHeaderFlags::Md5 as u16 + | NKitHeaderFlags::Sha1 as u16 + | NKitHeaderFlags::Xxhash64 as u16; + +const fn calc_header_size(version: u8, flags: u16, key_len: u32) -> usize { + let mut size = 8; + if version >= 2 { + // header size + flags + size += 4; + } + if flags & NKitHeaderFlags::Size as u16 != 0 { + size += 8; + } + if flags & NKitHeaderFlags::Crc32 as u16 != 0 { + size += 4; + } + if flags & NKitHeaderFlags::Md5 as u16 != 0 { + size += 16; + } + if flags & NKitHeaderFlags::Sha1 as u16 != 0 { + size += 20; + } + if flags & NKitHeaderFlags::Xxhash64 as u16 != 0 { + size += 8; + } + if flags & NKitHeaderFlags::Key as u16 != 0 { + size += key_len as usize + 2; + } + size +} + +#[allow(unused)] +#[derive(Debug, Clone)] +pub struct NKitHeader { + pub version: u8, + pub flags: u16, + pub size: Option, + pub crc32: Option, + pub md5: Option<[u8; 16]>, + pub sha1: Option<[u8; 20]>, + pub xxhash64: Option, +} + +const VERSION_PREFIX: [u8; 7] = *b"NKIT v"; + +impl NKitHeader { + pub fn try_read_from(reader: &mut R) -> Option + where R: Read + Seek + ?Sized { + let magic: MagicBytes = read_from(reader).ok()?; + if magic == *b"NKIT" { + reader.seek(SeekFrom::Current(-4)).ok()?; + match NKitHeader::read_from(reader) { + Ok(header) => Some(header), + Err(e) => { + log::warn!("Failed to read NKit header: {}", e); + None + } + } + } else { + None + } + } + + pub fn read_from(reader: &mut R) -> io::Result + where R: Read + ?Sized { + let version_string: [u8; 8] = read_from(reader)?; + if version_string[0..7] != VERSION_PREFIX + || version_string[7] < b'1' + || version_string[7] > b'9' + { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + "Invalid NKit header version string", + )); + } + let version = version_string[7] - b'0'; + let header_size = match version { + 1 => calc_header_size(version, NKIT_HEADER_V1_FLAGS, 0) as u16, + 2 => read_u16_be(reader)?, + _ => { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + format!("Unsupported NKit header version: {}", version), + )); + } + }; + + let mut remaining_header_size = header_size as usize - 8; + if version >= 2 { + // We read the header size already + remaining_header_size -= 2; + } + let header_bytes = read_vec(reader, remaining_header_size)?; + let mut reader = &header_bytes[..]; + + let flags = if version == 1 { NKIT_HEADER_V1_FLAGS } else { read_u16_be(&mut reader)? }; + let size = (flags & NKitHeaderFlags::Size as u16 != 0) + .then(|| read_u64_be(&mut reader)) + .transpose()?; + let crc32 = (flags & NKitHeaderFlags::Crc32 as u16 != 0) + .then(|| read_u32_be(&mut reader)) + .transpose()?; + let md5 = (flags & NKitHeaderFlags::Md5 as u16 != 0) + .then(|| read_from::<[u8; 16], _>(&mut reader)) + .transpose()?; + let sha1 = (flags & NKitHeaderFlags::Sha1 as u16 != 0) + .then(|| read_from::<[u8; 20], _>(&mut reader)) + .transpose()?; + let xxhash64 = (flags & NKitHeaderFlags::Xxhash64 as u16 != 0) + .then(|| read_u64_be(&mut reader)) + .transpose()?; + Ok(Self { version, flags, size, crc32, md5, sha1, xxhash64 }) + } +} + +impl From<&NKitHeader> for DiscMeta { + fn from(value: &NKitHeader) -> Self { + Self { crc32: value.crc32, md5: value.md5, sha1: value.sha1, xxhash64: value.xxhash64 } + } +} diff --git a/src/io/split.rs b/src/io/split.rs new file mode 100644 index 0000000..8b8fb2b --- /dev/null +++ b/src/io/split.rs @@ -0,0 +1,165 @@ +use std::{ + fs::File, + io, + io::{Read, Seek, SeekFrom}, + path::{Path, PathBuf}, +}; + +use crate::{ErrorContext, ReadStream, Result, ResultContext}; + +#[derive(Debug)] +pub struct SplitFileReader { + files: Vec>, + open_file: Option>, + pos: u64, +} + +#[derive(Debug, Clone)] +struct Split { + inner: T, + begin: u64, + size: u64, +} + +impl Split { + fn contains(&self, pos: u64) -> bool { self.begin <= pos && pos < self.begin + self.size } +} + +// .iso.1, .iso.2, etc. +fn split_path_1(input: &Path, index: u32) -> PathBuf { + let input_str = input.to_str().unwrap_or("[INVALID]"); + let mut out = input_str.to_string(); + out.push('.'); + out.push(char::from_digit(index, 10).unwrap()); + PathBuf::from(out) +} + +// .part1.iso, .part2.iso, etc. +fn split_path_2(input: &Path, index: u32) -> PathBuf { + let extension = input.extension().and_then(|s| s.to_str()).unwrap_or("iso"); + let input_without_ext = input.with_extension(""); + let input_str = input_without_ext.to_str().unwrap_or("[INVALID]"); + let mut out = input_str.to_string(); + out.push_str(".part"); + out.push(char::from_digit(index, 10).unwrap()); + out.push('.'); + out.push_str(extension); + PathBuf::from(out) +} + +// .wbf1, .wbf2, etc. +fn split_path_3(input: &Path, index: u32) -> PathBuf { + let input_str = input.to_str().unwrap_or("[INVALID]"); + let mut chars = input_str.chars(); + chars.next_back(); + let mut out = chars.as_str().to_string(); + out.push(char::from_digit(index, 10).unwrap()); + PathBuf::from(out) +} + +impl SplitFileReader { + pub fn empty() -> Self { Self { files: Vec::new(), open_file: None, pos: 0 } } + + pub fn new(path: &Path) -> Result { + let mut files = vec![]; + let mut begin = 0; + match path.metadata() { + Ok(metadata) => { + files.push(Split { inner: path.to_path_buf(), begin, size: metadata.len() }); + begin += metadata.len(); + } + Err(e) => { + return Err(e.context(format!("Failed to stat file {}", path.display()))); + } + } + for path_fn in [split_path_1, split_path_2, split_path_3] { + let mut index = 1; + loop { + let path = path_fn(path, index); + if let Ok(metadata) = path.metadata() { + files.push(Split { inner: path, begin, size: metadata.len() }); + begin += metadata.len(); + index += 1; + } else { + break; + } + } + if index > 1 { + break; + } + } + Ok(Self { files, open_file: None, pos: 0 }) + } + + pub fn add(&mut self, path: &Path) -> Result<()> { + let begin = self.len(); + let metadata = + path.metadata().context(format!("Failed to stat file {}", path.display()))?; + self.files.push(Split { inner: path.to_path_buf(), begin, size: metadata.len() }); + Ok(()) + } + + pub fn reset(&mut self) { + self.open_file = None; + self.pos = 0; + } + + pub fn len(&self) -> u64 { self.files.last().map_or(0, |f| f.begin + f.size) } +} + +impl Read for SplitFileReader { + fn read(&mut self, mut buf: &mut [u8]) -> io::Result { + let mut total = 0; + while !buf.is_empty() { + if let Some(split) = &mut self.open_file { + let n = buf.len().min((split.begin + split.size - self.pos) as usize); + if n == 0 { + self.open_file = None; + continue; + } + split.inner.read_exact(&mut buf[..n])?; + total += n; + self.pos += n as u64; + buf = &mut buf[n..]; + } else if let Some(split) = self.files.iter().find(|f| f.contains(self.pos)) { + let mut file = File::open(&split.inner)?; + if self.pos > split.begin { + file.seek(SeekFrom::Start(self.pos - split.begin))?; + } + self.open_file = Some(Split { inner: file, begin: split.begin, size: split.size }); + } else { + break; + } + } + Ok(total) + } +} + +impl Seek for SplitFileReader { + fn seek(&mut self, pos: SeekFrom) -> io::Result { + self.pos = match pos { + SeekFrom::Start(pos) => pos, + SeekFrom::Current(offset) => self.pos.saturating_add_signed(offset), + SeekFrom::End(offset) => self.len().saturating_add_signed(offset), + }; + if let Some(split) = &mut self.open_file { + if split.contains(self.pos) { + // Seek within the open file + split.inner.seek(SeekFrom::Start(self.pos - split.begin))?; + } else { + self.open_file = None; + } + } + Ok(self.pos) + } +} + +impl ReadStream for SplitFileReader { + fn stable_stream_len(&mut self) -> io::Result { Ok(self.len()) } + + fn as_dyn(&mut self) -> &mut dyn ReadStream { self } +} + +impl Clone for SplitFileReader { + fn clone(&self) -> Self { Self { files: self.files.clone(), open_file: None, pos: self.pos } } +} diff --git a/src/io/wbfs.rs b/src/io/wbfs.rs new file mode 100644 index 0000000..f935585 --- /dev/null +++ b/src/io/wbfs.rs @@ -0,0 +1,203 @@ +use std::{ + cmp::min, + io, + io::{BufReader, Read, Seek, SeekFrom}, + mem::size_of, + path::Path, +}; + +use zerocopy::{big_endian::*, AsBytes, FromBytes, FromZeroes}; + +use crate::{ + disc::SECTOR_SIZE, + io::{nkit::NKitHeader, split::SplitFileReader, DiscIO, DiscMeta, MagicBytes}, + util::reader::{read_from, read_vec}, + Error, ReadStream, Result, ResultContext, +}; + +pub const WBFS_MAGIC: MagicBytes = *b"WBFS"; + +#[derive(Debug, Clone, PartialEq, FromBytes, FromZeroes, AsBytes)] +#[repr(C, align(4))] +struct WBFSHeader { + magic: MagicBytes, + num_sectors: U32, + sector_size_shift: u8, + wbfs_sector_size_shift: u8, + _pad: [u8; 2], +} + +impl WBFSHeader { + fn sector_size(&self) -> u32 { 1 << self.sector_size_shift } + + fn wbfs_sector_size(&self) -> u32 { 1 << self.wbfs_sector_size_shift } + + // fn align_lba(&self, x: u32) -> u32 { (x + self.sector_size() - 1) & !(self.sector_size() - 1) } + // + // fn num_wii_sectors(&self) -> u32 { + // (self.num_sectors.get() / SECTOR_SIZE as u32) * self.sector_size() + // } + // + // fn max_wii_sectors(&self) -> u32 { NUM_WII_SECTORS } + // + // fn num_wbfs_sectors(&self) -> u32 { + // self.num_wii_sectors() >> (self.wbfs_sector_size_shift - 15) + // } + + fn max_wbfs_sectors(&self) -> u32 { NUM_WII_SECTORS >> (self.wbfs_sector_size_shift - 15) } +} + +const DISC_HEADER_SIZE: usize = 0x100; +const NUM_WII_SECTORS: u32 = 143432 * 2; // Double layer discs + +pub struct DiscIOWBFS { + pub inner: SplitFileReader, + /// WBFS header + header: WBFSHeader, + /// Map of Wii LBAs to WBFS LBAs + wlba_table: Vec, + /// Optional NKit header + nkit_header: Option, +} + +impl DiscIOWBFS { + pub fn new(filename: &Path) -> Result { + let mut inner = BufReader::new(SplitFileReader::new(filename)?); + + let header: WBFSHeader = read_from(&mut inner).context("Reading WBFS header")?; + if header.magic != WBFS_MAGIC { + return Err(Error::DiscFormat("Invalid WBFS magic".to_string())); + } + // log::debug!("{:?}", header); + // log::debug!("sector_size: {}", header.sector_size()); + // log::debug!("wbfs_sector_size: {}", header.wbfs_sector_size()); + let file_len = inner.stable_stream_len().context("Getting WBFS file size")?; + let expected_file_len = header.num_sectors.get() as u64 * header.sector_size() as u64; + if file_len != expected_file_len { + return Err(Error::DiscFormat(format!( + "Invalid WBFS file size: {}, expected {}", + file_len, expected_file_len + ))); + } + + let disc_table: Vec = + read_vec(&mut inner, header.sector_size() as usize - size_of::()) + .context("Reading WBFS disc table")?; + if disc_table[0] != 1 { + return Err(Error::DiscFormat("WBFS doesn't contain a disc".to_string())); + } + if disc_table[1../*max_disc as usize*/].iter().any(|&x| x != 0) { + return Err(Error::DiscFormat("Only single WBFS discs are supported".to_string())); + } + + // Read WBFS LBA table + inner + .seek(SeekFrom::Start(header.sector_size() as u64 + DISC_HEADER_SIZE as u64)) + .context("Seeking to WBFS LBA table")?; // Skip header + let wlba_table: Vec = read_vec(&mut inner, header.max_wbfs_sectors() as usize) + .context("Reading WBFS LBA table")?; + + // Read NKit header if present (always at 0x10000) + inner.seek(SeekFrom::Start(0x10000)).context("Seeking to NKit header")?; + let nkit_header = NKitHeader::try_read_from(&mut inner); + + // Reset reader + let mut inner = inner.into_inner(); + inner.reset(); + Ok(Self { inner, header, wlba_table, nkit_header }) + } +} + +impl DiscIO for DiscIOWBFS { + fn open(&self) -> Result> { + Ok(Box::new(WBFSReadStream { + inner: BufReader::new(self.inner.clone()), + header: self.header.clone(), + wlba_table: self.wlba_table.clone(), + wlba: u32::MAX, + pos: 0, + disc_size: self.nkit_header.as_ref().and_then(|h| h.size), + })) + } + + fn meta(&self) -> Result { + Ok(self.nkit_header.as_ref().map(DiscMeta::from).unwrap_or_default()) + } + + fn disc_size(&self) -> Option { self.nkit_header.as_ref().and_then(|h| h.size) } +} + +struct WBFSReadStream { + /// File reader + inner: BufReader, + /// WBFS header + header: WBFSHeader, + /// Map of Wii LBAs to WBFS LBAs + wlba_table: Vec, + /// Current WBFS LBA + wlba: u32, + /// Current stream offset + pos: u64, + /// Optional known size + disc_size: Option, +} + +impl WBFSReadStream { + fn disc_size(&self) -> u64 { + self.disc_size.unwrap_or(NUM_WII_SECTORS as u64 * SECTOR_SIZE as u64) + } +} + +impl Read for WBFSReadStream { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + let wlba = (self.pos >> self.header.wbfs_sector_size_shift) as u32; + let wlba_size = self.header.wbfs_sector_size() as u64; + let wlba_offset = self.pos & (wlba_size - 1); + if wlba != self.wlba { + if self.pos >= self.disc_size() || wlba >= self.header.max_wbfs_sectors() { + return Ok(0); + } + let wlba_start = wlba_size * self.wlba_table[wlba as usize].get() as u64; + self.inner.seek(SeekFrom::Start(wlba_start + wlba_offset))?; + self.wlba = wlba; + } + + let to_read = min(buf.len(), (wlba_size - wlba_offset) as usize); + let read = self.inner.read(&mut buf[..to_read])?; + self.pos += read as u64; + Ok(read) + } +} + +impl Seek for WBFSReadStream { + fn seek(&mut self, pos: SeekFrom) -> io::Result { + let new_pos = match pos { + SeekFrom::Start(v) => v, + SeekFrom::End(_) => { + return Err(io::Error::new( + io::ErrorKind::Unsupported, + "WBFSReadStream: SeekFrom::End is not supported", + )); + } + SeekFrom::Current(v) => self.pos.saturating_add_signed(v), + }; + + let new_wlba = (self.pos >> self.header.wbfs_sector_size_shift) as u32; + if new_wlba == self.wlba { + // Seek within the same WBFS LBA + self.inner.seek(SeekFrom::Current(new_pos as i64 - self.pos as i64))?; + } else { + // Seek to a different WBFS LBA, handled by next read + self.wlba = u32::MAX; + } + + self.pos = new_pos; + Ok(new_pos) + } +} + +impl ReadStream for WBFSReadStream { + fn stable_stream_len(&mut self) -> io::Result { Ok(self.disc_size()) } + + fn as_dyn(&mut self) -> &mut dyn ReadStream { self } +} diff --git a/src/io/wia.rs b/src/io/wia.rs index 9ee36df..88134cf 100644 --- a/src/io/wia.rs +++ b/src/io/wia.rs @@ -2,15 +2,16 @@ use std::{ cmp::min, fs::File, io, - io::{BufReader, Read, Seek, SeekFrom, Write}, + io::{BufReader, Read, Seek, SeekFrom}, + mem::size_of, path::{Path, PathBuf}, + sync::{Arc, Mutex}, + time::Instant, }; -use aes::{ - cipher::{block_padding::NoPadding, BlockEncryptMut, KeyIvInit}, - Aes128, Block, -}; +use rayon::iter::{IntoParallelIterator, ParallelIterator}; use sha1::{Digest, Sha1}; +use zerocopy::{big_endian::*, AsBytes, FromBytes, FromZeroes}; use crate::{ array_ref, array_ref_mut, @@ -18,68 +19,27 @@ use crate::{ wii::{BLOCK_SIZE, HASHES_SIZE}, SECTOR_SIZE, }, - io::{DiscIO, DiscIOOptions}, + io::{aes_encrypt, nkit::NKitHeader, DiscIO, DiscMeta, HashBytes, KeyBytes, MagicBytes}, + static_assert, streams::ReadStream, util::{ + compress::{lzma2_props_decode, lzma_props_decode, new_lzma2_decoder, new_lzma_decoder}, lfg::LaggedFibonacci, - reader::{ - read_bytes, read_vec, struct_size, write_vec, FromReader, ToWriter, DYNAMIC_SIZE, - }, + reader::{read_from, read_u16_be, read_vec}, take_seek::TakeSeekExt, }, - Error, Result, ResultContext, + Error, OpenOptions, Result, ResultContext, }; -/// SHA-1 hash bytes -type HashBytes = [u8; 20]; - -/// AES key bytes -type KeyBytes = [u8; 16]; - -/// Magic bytes -type MagicBytes = [u8; 4]; - -/// AES-128-CBC encryptor -type Aes128Cbc = cbc::Encryptor; - -#[derive(Clone, Debug, Eq, PartialEq)] -pub(crate) enum WIARVZMagic { - Wia, - Rvz, -} - -impl FromReader for WIARVZMagic { - type Args<'a> = (); - - const STATIC_SIZE: usize = MagicBytes::STATIC_SIZE; - - fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result - where R: Read + ?Sized { - match &MagicBytes::from_reader(reader)? { - b"WIA\x01" => Ok(Self::Wia), - b"RVZ\x01" => Ok(Self::Rvz), - _ => Err(io::Error::new(io::ErrorKind::InvalidData, "Invalid WIA/RVZ magic")), - } - } -} - -impl ToWriter for WIARVZMagic { - fn to_writer(&self, writer: &mut W) -> io::Result<()> - where W: Write + ?Sized { - match self { - Self::Wia => b"WIA\x01".to_writer(writer), - Self::Rvz => b"RVZ\x01".to_writer(writer), - } - } - - fn write_size(&self) -> usize { Self::STATIC_SIZE } -} +pub const WIA_MAGIC: MagicBytes = *b"WIA\x01"; +pub const RVZ_MAGIC: MagicBytes = *b"RVZ\x01"; /// This struct is stored at offset 0x0 and is 0x48 bytes long. The wit source code says its format /// will never be changed. -#[derive(Clone, Debug)] -pub(crate) struct WIAFileHeader { - pub(crate) magic: WIARVZMagic, +#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)] +#[repr(C, align(4))] +pub struct WIAFileHeader { + pub magic: MagicBytes, /// The WIA format version. /// /// A short note from the wit source code about how version numbers are encoded: @@ -90,173 +50,113 @@ pub(crate) struct WIAFileHeader { /// // If D != 0x00 && D != 0xff => append: 'beta' D /// //----------------------------------------------------- /// ``` - pub(crate) version: u32, + pub version: U32, /// If the reading program supports the version of WIA indicated here, it can read the file. /// /// [version](Self::version) can be higher than `version_compatible`. - pub(crate) version_compatible: u32, + pub version_compatible: U32, /// The size of the [WIADisc] struct. - pub(crate) disc_size: u32, + pub disc_size: U32, /// The SHA-1 hash of the [WIADisc] struct. /// /// The number of bytes to hash is determined by [disc_size](Self::disc_size). - pub(crate) disc_hash: HashBytes, + pub disc_hash: HashBytes, /// The original size of the ISO. - pub(crate) iso_file_size: u64, + pub iso_file_size: U64, /// The size of this file. - pub(crate) wia_file_size: u64, + pub wia_file_size: U64, /// The SHA-1 hash of this struct, up to but not including `file_head_hash` itself. - pub(crate) file_head_hash: HashBytes, + pub file_head_hash: HashBytes, } -impl FromReader for WIAFileHeader { - type Args<'a> = (); +static_assert!(size_of::() == 0x48); - const STATIC_SIZE: usize = struct_size([ - WIARVZMagic::STATIC_SIZE, // magic - u32::STATIC_SIZE, // version - u32::STATIC_SIZE, // version_compatible - u32::STATIC_SIZE, // disc_size - HashBytes::STATIC_SIZE, // disc_hash - u64::STATIC_SIZE, // iso_file_size - u64::STATIC_SIZE, // wia_file_size - HashBytes::STATIC_SIZE, // file_head_hash - ]); - - fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result - where R: Read + ?Sized { - Ok(Self { - magic: <_>::from_reader(reader)?, - version: <_>::from_reader(reader)?, - version_compatible: <_>::from_reader(reader)?, - disc_size: <_>::from_reader(reader)?, - disc_hash: <_>::from_reader(reader)?, - iso_file_size: <_>::from_reader(reader)?, - wia_file_size: <_>::from_reader(reader)?, - file_head_hash: <_>::from_reader(reader)?, - }) - } -} - -impl ToWriter for WIAFileHeader { - fn to_writer(&self, writer: &mut W) -> io::Result<()> - where W: Write + ?Sized { - let mut buf = [0u8; Self::STATIC_SIZE - HashBytes::STATIC_SIZE]; - let mut out = buf.as_mut(); - self.magic.to_writer(&mut out)?; - self.version.to_writer(&mut out)?; - self.version_compatible.to_writer(&mut out)?; - self.disc_size.to_writer(&mut out)?; - self.disc_hash.to_writer(&mut out)?; - self.iso_file_size.to_writer(&mut out)?; - self.wia_file_size.to_writer(&mut out)?; - buf.to_writer(writer)?; - // Calculate and write the hash - hash_bytes(&buf).to_writer(writer)?; +impl WIAFileHeader { + pub fn validate(&self) -> Result<()> { + // Check magic + if self.magic != WIA_MAGIC && self.magic != RVZ_MAGIC { + return Err(Error::DiscFormat(format!("Invalid WIA/RVZ magic: {:#X?}", self.magic))); + } + // Check file head hash + let bytes = self.as_bytes(); + verify_hash(&bytes[..bytes.len() - size_of::()], &self.file_head_hash)?; + // Check version compatibility + if self.version_compatible.get() < 0x30000 { + return Err(Error::DiscFormat(format!( + "WIA/RVZ version {:#X} is not supported", + self.version_compatible + ))); + } Ok(()) } - fn write_size(&self) -> usize { Self::STATIC_SIZE } + pub fn is_rvz(&self) -> bool { self.magic == RVZ_MAGIC } } /// Disc type #[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub(crate) enum DiscType { +pub enum DiscType { /// GameCube disc - GameCube = 1, + GameCube, /// Wii disc - Wii = 2, + Wii, } -impl FromReader for DiscType { - type Args<'a> = (); +impl TryFrom for DiscType { + type Error = Error; - const STATIC_SIZE: usize = u32::STATIC_SIZE; - - fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result - where R: Read + ?Sized { - match u32::from_reader(reader)? { + fn try_from(value: u32) -> Result { + match value { 1 => Ok(Self::GameCube), 2 => Ok(Self::Wii), - _ => Err(io::Error::new(io::ErrorKind::InvalidData, "Invalid disc type")), + v => Err(Error::DiscFormat(format!("Invalid disc type {}", v))), } } } -impl ToWriter for DiscType { - fn to_writer(&self, writer: &mut W) -> io::Result<()> - where W: Write + ?Sized { - match self { - Self::GameCube => 1u32.to_writer(writer), - Self::Wii => 2u32.to_writer(writer), - } - } - - fn write_size(&self) -> usize { Self::STATIC_SIZE } -} - /// Compression type -#[non_exhaustive] #[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub(crate) enum Compression { +pub enum Compression { /// No compression. - None = 0, + None, /// (WIA only) See [WIASegment] - Purge = 1, + Purge, /// BZIP2 compression - Bzip2 = 2, + Bzip2, /// LZMA compression - Lzma = 3, + Lzma, /// LZMA2 compression - Lzma2 = 4, + Lzma2, /// (RVZ only) Zstandard compression - Zstandard = 5, + Zstandard, } -impl FromReader for Compression { - type Args<'a> = (); +impl TryFrom for Compression { + type Error = Error; - const STATIC_SIZE: usize = u32::STATIC_SIZE; - - fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result - where R: Read + ?Sized { - match u32::from_reader(reader)? { + fn try_from(value: u32) -> Result { + match value { 0 => Ok(Self::None), 1 => Ok(Self::Purge), 2 => Ok(Self::Bzip2), 3 => Ok(Self::Lzma), 4 => Ok(Self::Lzma2), 5 => Ok(Self::Zstandard), - _ => Err(io::Error::new(io::ErrorKind::InvalidData, "Invalid compression type")), + v => Err(Error::DiscFormat(format!("Invalid compression type {}", v))), } } } -impl ToWriter for Compression { - fn to_writer(&self, writer: &mut W) -> io::Result<()> - where W: Write + ?Sized { - match self { - Self::None => 0u32.to_writer(writer), - Self::Purge => 1u32.to_writer(writer), - Self::Bzip2 => 2u32.to_writer(writer), - Self::Lzma => 3u32.to_writer(writer), - Self::Lzma2 => 4u32.to_writer(writer), - Self::Zstandard => 5u32.to_writer(writer), - } - } - - fn write_size(&self) -> usize { Self::STATIC_SIZE } -} - const DISC_HEAD_SIZE: usize = 0x80; /// This struct is stored at offset 0x48, immediately after [WIAFileHeader]. -#[derive(Clone, Debug)] -pub(crate) struct WIADisc { - /// The disc type. - pub(crate) disc_type: DiscType, +#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)] +#[repr(C, align(4))] +pub struct WIADisc { + /// The disc type. (1 = GameCube, 2 = Wii) + pub disc_type: U32, /// The compression type. - pub(crate) compression: Compression, + pub compression: U32, /// The compression level used by the compressor. /// /// The possible values are compressor-specific. @@ -264,7 +164,7 @@ pub(crate) struct WIADisc { /// RVZ only: /// > This is signed (instead of unsigned) to support negative compression levels in /// [Zstandard](Compression::Zstandard) (RVZ only). - pub(crate) compression_level: i32, + pub compression_level: I32, /// The size of the chunks that data is divided into. /// /// WIA only: @@ -279,35 +179,35 @@ pub(crate) struct WIADisc { /// > - For Wii partition data, each chunk contains one [WIAExceptionList] which contains /// exceptions for that chunk (and no other chunks). Offset 0 refers to the first hash of the /// current chunk, not the first hash of the full 2 MiB of data. - pub(crate) chunk_size: u32, + pub chunk_size: U32, /// The first 0x80 bytes of the disc image. - pub(crate) disc_head: [u8; DISC_HEAD_SIZE], + pub disc_head: [u8; DISC_HEAD_SIZE], /// The number of [WIAPartition] structs. - pub(crate) num_partitions: u32, + pub num_partitions: U32, /// The size of one [WIAPartition] struct. /// /// If this is smaller than the size of [WIAPartition], fill the missing bytes with 0x00. - pub(crate) partition_type_size: u32, + pub partition_type_size: U32, /// The offset in the file where the [WIAPartition] structs are stored (uncompressed). - pub(crate) partition_offset: u64, + pub partition_offset: U64, /// The SHA-1 hash of the [WIAPartition] structs. /// /// The number of bytes to hash is determined by `num_partitions * partition_type_size`. - pub(crate) partition_hash: HashBytes, + pub partition_hash: HashBytes, /// The number of [WIARawData] structs. - pub(crate) num_raw_data: u32, + pub num_raw_data: U32, /// The offset in the file where the [WIARawData] structs are stored (compressed). - pub(crate) raw_data_offset: u64, + pub raw_data_offset: U64, /// The total compressed size of the [WIARawData] structs. - pub(crate) raw_data_size: u32, + pub raw_data_size: U32, /// The number of [WIAGroup] structs. - pub(crate) num_groups: u32, + pub num_groups: U32, /// The offset in the file where the [WIAGroup] structs are stored (compressed). - pub(crate) group_offset: u64, + pub group_offset: U64, /// The total compressed size of the [WIAGroup] structs. - pub(crate) group_size: u32, + pub group_size: U32, /// The number of used bytes in the [compr_data](Self::compr_data) array. - pub(crate) compr_data_len: u8, + pub compr_data_len: u8, /// Compressor specific data. /// /// If the compression method is [None](Compression::None), [Purge](Compression::Purge), @@ -320,130 +220,47 @@ pub(crate) struct WIADisc { /// For [Lzma](Compression::Lzma), the data is 5 bytes long. The first byte encodes the `lc`, /// `pb`, and `lp` parameters, and the four other bytes encode the dictionary size in little /// endian. - pub(crate) compr_data: [u8; 7], + pub compr_data: [u8; 7], } -impl FromReader for WIADisc { - type Args<'a> = (); +static_assert!(size_of::() == 0xDC); - const STATIC_SIZE: usize = struct_size([ - DiscType::STATIC_SIZE, // disc_type - Compression::STATIC_SIZE, // compression - i32::STATIC_SIZE, // compression_level - u32::STATIC_SIZE, // chunk_size - DISC_HEAD_SIZE, // disc_head - u32::STATIC_SIZE, // num_partitions - u32::STATIC_SIZE, // partition_type_size - u64::STATIC_SIZE, // partition_offset - HashBytes::STATIC_SIZE, // partition_hash - u32::STATIC_SIZE, // num_raw_data - u64::STATIC_SIZE, // raw_data_offset - u32::STATIC_SIZE, // raw_data_size - u32::STATIC_SIZE, // num_groups - u64::STATIC_SIZE, // group_offset - u32::STATIC_SIZE, // group_size - u8::STATIC_SIZE, // compr_data_len - <[u8; 7]>::STATIC_SIZE, // compr_data - ]); - - fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result - where R: Read + ?Sized { - Ok(Self { - disc_type: <_>::from_reader(reader)?, - compression: <_>::from_reader(reader)?, - compression_level: <_>::from_reader(reader)?, - chunk_size: <_>::from_reader(reader)?, - disc_head: <_>::from_reader(reader)?, - num_partitions: <_>::from_reader(reader)?, - partition_type_size: <_>::from_reader(reader)?, - partition_offset: <_>::from_reader(reader)?, - partition_hash: <_>::from_reader(reader)?, - num_raw_data: <_>::from_reader(reader)?, - raw_data_offset: <_>::from_reader(reader)?, - raw_data_size: <_>::from_reader(reader)?, - num_groups: <_>::from_reader(reader)?, - group_offset: <_>::from_reader(reader)?, - group_size: <_>::from_reader(reader)?, - compr_data_len: <_>::from_reader(reader)?, - compr_data: <_>::from_reader(reader)?, - }) - } -} - -impl ToWriter for WIADisc { - fn to_writer(&self, writer: &mut W) -> io::Result<()> - where W: Write + ?Sized { - self.disc_type.to_writer(writer)?; - self.compression.to_writer(writer)?; - self.compression_level.to_writer(writer)?; - self.chunk_size.to_writer(writer)?; - self.disc_head.to_writer(writer)?; - self.num_partitions.to_writer(writer)?; - self.partition_type_size.to_writer(writer)?; - self.partition_offset.to_writer(writer)?; - self.partition_hash.to_writer(writer)?; - self.num_raw_data.to_writer(writer)?; - self.raw_data_offset.to_writer(writer)?; - self.raw_data_size.to_writer(writer)?; - self.num_groups.to_writer(writer)?; - self.group_offset.to_writer(writer)?; - self.group_size.to_writer(writer)?; - self.compr_data_len.to_writer(writer)?; - self.compr_data.to_writer(writer)?; +impl WIADisc { + pub fn validate(&self) -> Result<()> { + DiscType::try_from(self.disc_type.get())?; + Compression::try_from(self.compression.get())?; + if self.partition_type_size.get() != size_of::() as u32 { + return Err(Error::DiscFormat(format!( + "WIA partition type size is {}, expected {}", + self.partition_type_size.get(), + size_of::() + ))); + } Ok(()) } - fn write_size(&self) -> usize { Self::STATIC_SIZE } + pub fn compression(&self) -> Compression { + Compression::try_from(self.compression.get()).unwrap() + } } -#[derive(Clone, Debug)] -pub(crate) struct WIAPartitionData { +#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)] +#[repr(C, align(4))] +pub struct WIAPartitionData { /// The sector on the disc at which this data starts. /// One sector is 32 KiB (or 31 KiB excluding hashes). - pub(crate) first_sector: u32, + pub first_sector: U32, /// The number of sectors on the disc covered by this struct. /// One sector is 32 KiB (or 31 KiB excluding hashes). - pub(crate) num_sectors: u32, + pub num_sectors: U32, /// The index of the first [WIAGroup] struct that points to the data covered by this struct. /// The other [WIAGroup] indices follow sequentially. - pub(crate) group_index: u32, + pub group_index: U32, /// The number of [WIAGroup] structs used for this data. - pub(crate) num_groups: u32, + pub num_groups: U32, } -impl FromReader for WIAPartitionData { - type Args<'a> = (); - - const STATIC_SIZE: usize = struct_size([ - u32::STATIC_SIZE, // first_sector - u32::STATIC_SIZE, // num_sectors - u32::STATIC_SIZE, // group_index - u32::STATIC_SIZE, // num_groups - ]); - - fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result - where R: Read + ?Sized { - Ok(Self { - first_sector: <_>::from_reader(reader)?, - num_sectors: <_>::from_reader(reader)?, - group_index: <_>::from_reader(reader)?, - num_groups: <_>::from_reader(reader)?, - }) - } -} - -impl ToWriter for WIAPartitionData { - fn to_writer(&self, writer: &mut W) -> io::Result<()> - where W: Write + ?Sized { - self.first_sector.to_writer(writer)?; - self.num_sectors.to_writer(writer)?; - self.group_index.to_writer(writer)?; - self.num_groups.to_writer(writer)?; - Ok(()) - } - - fn write_size(&self) -> usize { Self::STATIC_SIZE } -} +static_assert!(size_of::() == 0x10); /// This struct is used for keeping track of Wii partition data that on the actual disc is encrypted /// and hashed. This does not include the unencrypted area at the beginning of partitions that @@ -455,50 +272,24 @@ impl ToWriter for WIAPartitionData { /// the reading program must first recalculate the hashes as done when creating a Wii disc image /// from scratch (see ), and must then apply the hash exceptions /// which are stored along with the data (see the [WIAExceptionList] section). -#[derive(Clone, Debug)] -pub(crate) struct WIAPartition { +#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)] +#[repr(C, align(4))] +pub struct WIAPartition { /// The title key for this partition (128-bit AES), which can be used for re-encrypting the /// partition data. /// /// This key can be used directly, without decrypting it using the Wii common key. - pub(crate) partition_key: KeyBytes, + pub partition_key: KeyBytes, /// To quote the wit source code: `segment 0 is small and defined for management data (boot .. /// fst). segment 1 takes the remaining data.` /// /// The point at which wit splits the two segments is the FST end offset rounded up to the next /// 2 MiB. Giving the first segment a size which is not a multiple of 2 MiB is likely a bad idea /// (unless the second segment has a size of 0). - pub(crate) partition_data: [WIAPartitionData; 2], + pub partition_data: [WIAPartitionData; 2], } -impl FromReader for WIAPartition { - type Args<'a> = (); - - const STATIC_SIZE: usize = struct_size([ - KeyBytes::STATIC_SIZE, // partition_key - WIAPartitionData::STATIC_SIZE * 2, // partition_data - ]); - - fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result - where R: Read + ?Sized { - Ok(Self { - partition_key: <_>::from_reader(reader)?, - partition_data: [<_>::from_reader(reader)?, <_>::from_reader(reader)?], - }) - } -} - -impl ToWriter for WIAPartition { - fn to_writer(&self, writer: &mut W) -> io::Result<()> - where W: Write + ?Sized { - self.partition_key.to_writer(writer)?; - self.partition_data[0].to_writer(writer)?; - self.partition_data[1].to_writer(writer)?; - Ok(()) - } - - fn write_size(&self) -> usize { Self::STATIC_SIZE } -} +static_assert!(size_of::() == 0x30); /// This struct is used for keeping track of disc data that is not stored as [WIAPartition]. /// The data is stored as is (other than compression being applied). @@ -508,51 +299,18 @@ impl ToWriter for WIAPartition { /// should be read from [WIADisc] instead.) This should be handled by rounding the offset down to /// the previous multiple of 0x8000 (and adding the equivalent amount to the size so that the end /// offset stays the same), not by special casing the first [WIARawData]. -#[derive(Clone, Debug)] -pub(crate) struct WIARawData { +#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)] +#[repr(C, align(4))] +pub struct WIARawData { /// The offset on the disc at which this data starts. - pub(crate) raw_data_offset: u64, + pub raw_data_offset: U64, /// The number of bytes on the disc covered by this struct. - pub(crate) raw_data_size: u64, + pub raw_data_size: U64, /// The index of the first [WIAGroup] struct that points to the data covered by this struct. /// The other [WIAGroup] indices follow sequentially. - pub(crate) group_index: u32, + pub group_index: U32, /// The number of [WIAGroup] structs used for this data. - pub(crate) num_groups: u32, -} - -impl FromReader for WIARawData { - type Args<'a> = (); - - const STATIC_SIZE: usize = struct_size([ - u64::STATIC_SIZE, // raw_data_offset - u64::STATIC_SIZE, // raw_data_size - u32::STATIC_SIZE, // group_index - u32::STATIC_SIZE, // num_groups - ]); - - fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result - where R: Read + ?Sized { - Ok(Self { - raw_data_offset: <_>::from_reader(reader)?, - raw_data_size: <_>::from_reader(reader)?, - group_index: <_>::from_reader(reader)?, - num_groups: <_>::from_reader(reader)?, - }) - } -} - -impl ToWriter for WIARawData { - fn to_writer(&self, writer: &mut W) -> io::Result<()> - where W: Write + ?Sized { - self.raw_data_offset.to_writer(writer)?; - self.raw_data_size.to_writer(writer)?; - self.group_index.to_writer(writer)?; - self.num_groups.to_writer(writer)?; - Ok(()) - } - - fn write_size(&self) -> usize { Self::STATIC_SIZE } + pub num_groups: U32, } /// This struct points directly to the actual disc data, stored compressed. @@ -565,104 +323,49 @@ impl ToWriter for WIARawData { /// counting any [WIAExceptionList] structs. However, the last [WIAGroup] of a [WIAPartitionData] /// or [WIARawData] contains less data than that if `num_sectors * 0x8000` (for [WIAPartitionData]) /// or `raw_data_size` (for [WIARawData]) is not evenly divisible by `chunk_size`. -#[derive(Clone, Debug)] -pub(crate) struct WIAGroup { +#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)] +#[repr(C, align(4))] +pub struct WIAGroup { /// The offset in the file where the compressed data is stored. /// /// Stored as a `u32`, divided by 4. - pub(crate) data_offset: u32, + pub data_offset: U32, /// The size of the compressed data, including any [WIAExceptionList] structs. 0 is a special /// case meaning that every byte of the decompressed data is 0x00 and the [WIAExceptionList] /// structs (if there are supposed to be any) contain 0 exceptions. - pub(crate) data_size: u32, -} - -impl FromReader for WIAGroup { - type Args<'a> = (); - - const STATIC_SIZE: usize = struct_size([ - u32::STATIC_SIZE, // data_offset - u32::STATIC_SIZE, // data_size - ]); - - fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result - where R: Read + ?Sized { - Ok(Self { data_offset: <_>::from_reader(reader)?, data_size: <_>::from_reader(reader)? }) - } -} - -impl ToWriter for WIAGroup { - fn to_writer(&self, writer: &mut W) -> io::Result<()> - where W: Write + ?Sized { - self.data_offset.to_writer(writer)?; - self.data_size.to_writer(writer)?; - Ok(()) - } - - fn write_size(&self) -> usize { Self::STATIC_SIZE } + pub data_size: U32, } /// Compared to [WIAGroup], [RVZGroup] changes the meaning of the most significant bit of /// [data_size](Self::data_size) and adds one additional attribute. -#[derive(Clone, Debug)] -pub(crate) struct RVZGroup { +#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)] +#[repr(C, align(4))] +pub struct RVZGroup { /// The offset in the file where the compressed data is stored, divided by 4. - pub(crate) data_offset: u32, + pub data_offset: U32, /// The most significant bit is 1 if the data is compressed using the compression method /// indicated in [WIADisc], and 0 if it is not compressed. The lower 31 bits are the size of /// the compressed data, including any [WIAExceptionList] structs. The lower 31 bits being 0 is /// a special case meaning that every byte of the decompressed and unpacked data is 0x00 and /// the [WIAExceptionList] structs (if there are supposed to be any) contain 0 exceptions. - pub(crate) data_size: u32, + pub data_size_and_flag: U32, /// The size after decompressing but before decoding the RVZ packing. /// If this is 0, RVZ packing is not used for this group. - pub(crate) rvz_packed_size: u32, - /// Extracted from the most significant bit of [data_size](Self::data_size). - pub(crate) is_compressed: bool, + pub rvz_packed_size: U32, } -impl FromReader for RVZGroup { - type Args<'a> = (); +impl RVZGroup { + pub fn data_size(&self) -> u32 { self.data_size_and_flag.get() & 0x7FFFFFFF } - const STATIC_SIZE: usize = struct_size([ - u32::STATIC_SIZE, // data_offset - u32::STATIC_SIZE, // data_size - u32::STATIC_SIZE, // rvz_packed_size - ]); - - fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result - where R: Read + ?Sized { - let data_offset = u32::from_reader(reader)?; - let size_and_flag = u32::from_reader(reader)?; - let rvz_packed_size = u32::from_reader(reader)?; - Ok(Self { - data_offset, - data_size: size_and_flag & 0x7FFFFFFF, - rvz_packed_size, - is_compressed: size_and_flag & 0x80000000 != 0, - }) - } -} - -impl ToWriter for RVZGroup { - fn to_writer(&self, writer: &mut W) -> io::Result<()> - where W: Write + ?Sized { - self.data_offset.to_writer(writer)?; - (self.data_size | (self.is_compressed as u32) << 31).to_writer(writer)?; - self.rvz_packed_size.to_writer(writer)?; - Ok(()) - } - - fn write_size(&self) -> usize { Self::STATIC_SIZE } + pub fn is_compressed(&self) -> bool { self.data_size_and_flag.get() & 0x80000000 != 0 } } impl From for RVZGroup { fn from(value: WIAGroup) -> Self { Self { data_offset: value.data_offset, - data_size: value.data_size, - rvz_packed_size: 0, - is_compressed: true, + data_size_and_flag: U32::new(value.data_size.get() | 0x80000000), + rvz_packed_size: U32::new(0), } } } @@ -682,45 +385,21 @@ impl From for RVZGroup { /// write [WIAException] structs for a padding area which is 32 bytes long, it writes one which /// covers the first 20 bytes of the padding area and one which covers the last 20 bytes of the /// padding area, generating 12 bytes of overlap between the [WIAException] structs. -#[derive(Clone, Debug)] -pub(crate) struct WIAException { +#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)] +#[repr(C, align(2))] +pub struct WIAException { /// The offset among the hashes. The offsets 0x0000-0x0400 here map to the offsets 0x0000-0x0400 /// in the full 2 MiB of data, the offsets 0x0400-0x0800 here map to the offsets 0x8000-0x8400 /// in the full 2 MiB of data, and so on. /// /// The offsets start over at 0 for each new [WIAExceptionList]. - pub(crate) offset: u16, + pub offset: U16, /// The hash that the automatically generated hash at the given offset needs to be replaced /// with. /// /// The replacement should happen after calculating all hashes for the current 2 MiB of data /// but before encrypting the hashes. - pub(crate) hash: HashBytes, -} - -impl FromReader for WIAException { - type Args<'a> = (); - - const STATIC_SIZE: usize = struct_size([ - u16::STATIC_SIZE, // offset - HashBytes::STATIC_SIZE, // hash - ]); - - fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result - where R: Read + ?Sized { - Ok(Self { offset: <_>::from_reader(reader)?, hash: <_>::from_reader(reader)? }) - } -} - -impl ToWriter for WIAException { - fn to_writer(&self, writer: &mut W) -> io::Result<()> - where W: Write + ?Sized { - self.offset.to_writer(writer)?; - self.hash.to_writer(writer)?; - Ok(()) - } - - fn write_size(&self) -> usize { Self::STATIC_SIZE } + pub hash: HashBytes, } /// Each [WIAGroup] of Wii partition data contains one or more [WIAExceptionList] structs before @@ -749,118 +428,49 @@ impl ToWriter for WIAException { /// end offset of the last [WIAExceptionList] is not evenly divisible by 4, padding is inserted /// after it so that the data afterwards will start at a 4 byte boundary. This padding is not /// inserted for the other compression methods. -#[derive(Clone, Debug)] -pub(crate) struct WIAExceptionList { - /// Each [WIAException] describes one difference between the hashes obtained by hashing the - /// partition data and the original hashes. - pub(crate) exceptions: Vec, -} +type WIAExceptionList = Vec; -impl FromReader for WIAExceptionList { - type Args<'a> = (); - - const STATIC_SIZE: usize = DYNAMIC_SIZE; - - fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result - where R: Read + ?Sized { - let num_exceptions = u16::from_reader(reader)?; - let exceptions = read_vec(reader, num_exceptions as usize)?; - Ok(Self { exceptions }) - } -} - -impl ToWriter for WIAExceptionList { - fn to_writer(&self, writer: &mut W) -> io::Result<()> - where W: Write + ?Sized { - (self.exceptions.len() as u16).to_writer(writer)?; - write_vec(writer, &self.exceptions)?; - Ok(()) - } - - fn write_size(&self) -> usize { - u16::STATIC_SIZE + self.exceptions.len() * WIAException::STATIC_SIZE - } -} - -/// This struct is used by the simple compression method [Purge](Compression::Purge), which stores -/// runs of zeroes efficiently and stores other data as is. -/// -/// Each [Purge](Compression::Purge) chunk contains zero or more [WIASegment] structs stored in -/// order of ascending offset, followed by a SHA-1 hash (0x14 bytes) of the [WIAExceptionList] -/// structs (if any) and the [WIASegment] structs. Bytes in the decompressed data that are not -/// covered by any [WIASegment] struct are set to 0x00. -#[derive(Clone, Debug)] -pub(crate) struct WIASegment { - /// The offset of data within the decompressed data. - /// - /// Any [WIAExceptionList] structs are not counted as part of the decompressed data. - pub(crate) offset: u32, - /// The data. - pub(crate) data: Vec, -} - -impl FromReader for WIASegment { - type Args<'a> = (); - - const STATIC_SIZE: usize = DYNAMIC_SIZE; - - fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result - where R: Read + ?Sized { - let offset = u32::from_reader(reader)?; - let size = u32::from_reader(reader)?; - let data = read_bytes(reader, size as usize)?; - Ok(Self { offset, data }) - } -} - -impl ToWriter for WIASegment { - fn to_writer(&self, writer: &mut W) -> io::Result<()> - where W: Write + ?Sized { - self.offset.to_writer(writer)?; - (self.data.len() as u32).to_writer(writer)?; - self.data.to_writer(writer)?; - Ok(()) - } - - fn write_size(&self) -> usize { u32::STATIC_SIZE * 2 + self.data.len() } -} - -pub(crate) enum Decompressor { +pub enum Decompressor { None, - // Purge, #[cfg(feature = "compress-bzip2")] Bzip2, - // Lzma, - // Lzma2, + #[cfg(feature = "compress-lzma")] + Lzma(liblzma::stream::LzmaOptions), + #[cfg(feature = "compress-lzma")] + Lzma2(liblzma::stream::LzmaOptions), #[cfg(feature = "compress-zstd")] Zstandard, } impl Decompressor { - pub(crate) fn new(disc: &WIADisc) -> Result { - match disc.compression { + pub fn new(disc: &WIADisc) -> Result { + let compr_data = &disc.compr_data[..disc.compr_data_len as usize]; + match disc.compression() { Compression::None => Ok(Self::None), - // Compression::Purge => Ok(Self::Purge), #[cfg(feature = "compress-bzip2")] Compression::Bzip2 => Ok(Self::Bzip2), - // Compression::Lzma => Ok(Self::Lzma), - // Compression::Lzma2 => Ok(Self::Lzma2), + #[cfg(feature = "compress-lzma")] + Compression::Lzma => Ok(Self::Lzma(lzma_props_decode(compr_data)?)), + #[cfg(feature = "compress-lzma")] + Compression::Lzma2 => Ok(Self::Lzma2(lzma2_props_decode(compr_data)?)), #[cfg(feature = "compress-zstd")] Compression::Zstandard => Ok(Self::Zstandard), comp => Err(Error::DiscFormat(format!("Unsupported WIA/RVZ compression: {:?}", comp))), } } - pub(crate) fn wrap<'a, R>(&mut self, reader: R) -> Result> + pub fn wrap<'a, R>(&mut self, reader: R) -> io::Result> where R: Read + 'a { Ok(match self { Decompressor::None => Box::new(reader), #[cfg(feature = "compress-bzip2")] Decompressor::Bzip2 => Box::new(bzip2::read::BzDecoder::new(reader)), + #[cfg(feature = "compress-lzma")] + Decompressor::Lzma(options) => Box::new(new_lzma_decoder(reader, options)?), + #[cfg(feature = "compress-lzma")] + Decompressor::Lzma2(options) => Box::new(new_lzma2_decoder(reader, options)?), #[cfg(feature = "compress-zstd")] - Decompressor::Zstandard => { - Box::new(zstd::stream::Decoder::new(reader).context("Creating zstd decoder")?) - } + Decompressor::Zstandard => Box::new(zstd::stream::Decoder::new(reader)?), }) } } @@ -873,62 +483,52 @@ impl Decompressor { /// yielding 8 H2 hashes. /// Finally, the 8 H2 hashes for each group are hashed, yielding 1 H3 hash. /// The H3 hashes for each group are stored in the partition's H3 table. -pub(crate) struct HashTable { +pub struct HashTable { /// SHA-1 hash of the 31 H0 hashes for each sector. - pub(crate) h1_hashes: Vec, + pub h1_hashes: Vec, /// SHA-1 hash of the 8 H1 hashes for each subgroup. - pub(crate) h2_hashes: Vec, + pub h2_hashes: Vec, /// SHA-1 hash of the 8 H2 hashes for each group. - pub(crate) h3_hashes: Vec, + pub h3_hashes: Vec, } -pub(crate) struct DiscIOWIA { - pub(crate) header: WIAFileHeader, - pub(crate) disc: WIADisc, - pub(crate) partitions: Vec, - pub(crate) raw_data: Vec, - pub(crate) groups: Vec, - pub(crate) filename: PathBuf, - pub(crate) encrypt: bool, - pub(crate) hash_tables: Vec, +struct HashResult { + h1_hashes: [HashBytes; 64], + h2_hashes: [HashBytes; 8], + h3_hash: HashBytes, } -/// Wraps a buffer, reading zeros for any extra bytes. -struct SizedRead<'a> { - buf: &'a [u8], - pos: usize, -} - -impl<'a> SizedRead<'a> { - fn new(buf: &'a [u8]) -> Self { Self { buf, pos: 0 } } -} - -impl Read for SizedRead<'_> { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - let written = if self.pos < self.buf.len() { - let to_read = min(buf.len(), self.buf.len() - self.pos); - buf[..to_read].copy_from_slice(&self.buf[self.pos..self.pos + to_read]); - to_read - } else { - 0 - }; - buf[written..].fill(0); - self.pos += buf.len(); - Ok(buf.len()) - } -} - -impl Seek for SizedRead<'_> { - fn seek(&mut self, pos: SeekFrom) -> io::Result { - match pos { - SeekFrom::Start(pos) => self.pos = pos as usize, - SeekFrom::Current(pos) => self.pos = (self.pos as i64 + pos) as usize, - SeekFrom::End(_) => unimplemented!(), +impl HashTable { + fn new(num_sectors: u32) -> Self { + let num_sectors = num_sectors.next_multiple_of(64) as usize; + let num_subgroups = num_sectors / 8; + let num_groups = num_subgroups / 8; + Self { + h1_hashes: HashBytes::new_vec_zeroed(num_sectors), + h2_hashes: HashBytes::new_vec_zeroed(num_subgroups), + h3_hashes: HashBytes::new_vec_zeroed(num_groups), } - Ok(self.pos as u64) } - fn stream_position(&mut self) -> io::Result { Ok(self.pos as u64) } + fn extend(&mut self, group_index: usize, result: &HashResult) { + let h1_start = group_index * 64; + self.h1_hashes[h1_start..h1_start + 64].copy_from_slice(&result.h1_hashes); + let h2_start = group_index * 8; + self.h2_hashes[h2_start..h2_start + 8].copy_from_slice(&result.h2_hashes); + self.h3_hashes[group_index] = result.h3_hash; + } +} + +pub struct DiscIOWIA { + pub header: WIAFileHeader, + pub disc: WIADisc, + pub partitions: Vec, + pub raw_data: Vec, + pub groups: Vec, + pub filename: PathBuf, + pub encrypt: bool, + pub hash_tables: Vec, + pub nkit_header: Option, } #[derive(Debug)] @@ -959,9 +559,9 @@ fn verify_hash(buf: &[u8], expected: &HashBytes) -> Result<()> { let out = hash_bytes(buf); if out != *expected { let mut got_bytes = [0u8; 40]; - let got = base16ct::lower::encode_str(&out, &mut got_bytes)?; + let got = base16ct::lower::encode_str(&out, &mut got_bytes).unwrap(); // Safe: fixed buffer size let mut expected_bytes = [0u8; 40]; - let expected = base16ct::lower::encode_str(expected, &mut expected_bytes)?; + let expected = base16ct::lower::encode_str(expected, &mut expected_bytes).unwrap(); // Safe: fixed buffer size return Err(Error::DiscFormat(format!( "WIA hash mismatch: {}, expected {}", got, expected @@ -971,97 +571,74 @@ fn verify_hash(buf: &[u8], expected: &HashBytes) -> Result<()> { } impl DiscIOWIA { - pub(crate) fn new(filename: &Path, options: &DiscIOOptions) -> Result { + pub fn new(filename: &Path, options: &OpenOptions) -> Result { let mut file = BufReader::new( File::open(filename).with_context(|| format!("Opening file {}", filename.display()))?, ); // Load & verify file header - let header_buf = <[u8; WIAFileHeader::STATIC_SIZE]>::from_reader(&mut file) - .context("Reading WIA/RVZ file header")?; - let header = WIAFileHeader::from_reader(&mut header_buf.as_slice()) - .context("Parsing WIA/RVZ file header")?; - verify_hash( - &header_buf[..WIAFileHeader::STATIC_SIZE - HashBytes::STATIC_SIZE], - &header.file_head_hash, - )?; - if header.version_compatible < 0x30000 { - return Err(Error::DiscFormat(format!( - "WIA/RVZ version {:#X} is not supported", - header.version_compatible - ))); - } - let is_rvz = header.magic == WIARVZMagic::Rvz; - // println!("Header: {:?}", header); + let header: WIAFileHeader = read_from(&mut file).context("Reading WIA/RVZ file header")?; + header.validate()?; + let is_rvz = header.is_rvz(); + // log::debug!("Header: {:?}", header); // Load & verify disc header - let disc_buf = read_bytes(&mut file, header.disc_size as usize) + let mut disc_buf: Vec = read_vec(&mut file, header.disc_size.get() as usize) .context("Reading WIA/RVZ disc header")?; verify_hash(&disc_buf, &header.disc_hash)?; - let disc = WIADisc::from_reader(&mut SizedRead::new(&disc_buf)) - .context("Parsing WIA/RVZ disc header")?; - // println!("Disc: {:?}", disc); - if disc.partition_type_size != WIAPartition::STATIC_SIZE as u32 { - return Err(Error::DiscFormat(format!( - "WIA partition type size is {}, expected {}", - disc.partition_type_size, - WIAPartition::STATIC_SIZE - ))); + disc_buf.resize(size_of::(), 0); + let mut disc = WIADisc::read_from(disc_buf.as_slice()).unwrap(); + disc.validate()?; + if !options.rebuild_encryption { + // If we're not re-encrypting, disable partition encryption in disc header + disc.disc_head[0x61] = 1; } + // log::debug!("Disc: {:?}", disc); + + // Read NKit header if present (after disc header) + let nkit_header = NKitHeader::try_read_from(&mut file); // Load & verify partition headers - file.seek(SeekFrom::Start(disc.partition_offset)) + file.seek(SeekFrom::Start(disc.partition_offset.get())) .context("Seeking to WIA/RVZ partition headers")?; - let partition_buf = - read_bytes(&mut file, disc.partition_type_size as usize * disc.num_partitions as usize) - .context("Reading WIA/RVZ partition headers")?; - verify_hash(&partition_buf, &disc.partition_hash)?; - let partitions = read_vec(&mut partition_buf.as_slice(), disc.num_partitions as usize) - .context("Parsing WIA/RVZ partition headers")?; - // println!("Partitions: {:?}", partitions); + let partitions: Vec = read_vec(&mut file, disc.num_partitions.get() as usize) + .context("Reading WIA/RVZ partition headers")?; + verify_hash(partitions.as_slice().as_bytes(), &disc.partition_hash)?; + // log::debug!("Partitions: {:?}", partitions); // Create decompressor let mut decompressor = Decompressor::new(&disc)?; // Load raw data headers - let raw_data = { - file.seek(SeekFrom::Start(disc.raw_data_offset)) + let raw_data: Vec = { + file.seek(SeekFrom::Start(disc.raw_data_offset.get())) .context("Seeking to WIA/RVZ raw data headers")?; - let mut reader = decompressor.wrap((&mut file).take(disc.raw_data_size as u64))?; - read_vec(&mut reader, disc.num_raw_data as usize) + let mut reader = decompressor + .wrap((&mut file).take(disc.raw_data_size.get() as u64)) + .context("Creating WIA/RVZ decompressor")?; + read_vec(&mut reader, disc.num_raw_data.get() as usize) .context("Reading WIA/RVZ raw data headers")? - // println!("Raw data: {:?}", raw_data); }; + // log::debug!("Raw data: {:?}", raw_data); // Load group headers - let mut groups = Vec::with_capacity(disc.num_groups as usize); - { - file.seek(SeekFrom::Start(disc.group_offset)) + let groups = { + file.seek(SeekFrom::Start(disc.group_offset.get())) .context("Seeking to WIA/RVZ group headers")?; - let mut reader = decompressor.wrap((&mut file).take(disc.group_size as u64))?; - let bytes = read_bytes( - &mut reader, - disc.num_groups as usize - * if is_rvz { RVZGroup::STATIC_SIZE } else { WIAGroup::STATIC_SIZE }, - ) - .context("Reading WIA/RVZ group headers")?; - let mut slice = bytes.as_slice(); - for i in 0..disc.num_groups { - if is_rvz { - groups.push( - RVZGroup::from_reader(&mut slice) - .with_context(|| format!("Parsing RVZ group header {}", i))?, - ); - } else { - groups.push( - WIAGroup::from_reader(&mut slice) - .with_context(|| format!("Parsing WIA group header {}", i))? - .into(), - ); - } + let mut reader = decompressor + .wrap((&mut file).take(disc.group_size.get() as u64)) + .context("Creating WIA/RVZ decompressor")?; + if is_rvz { + read_vec(&mut reader, disc.num_groups.get() as usize) + .context("Reading WIA/RVZ group headers")? + } else { + let wia_groups: Vec = + read_vec(&mut reader, disc.num_groups.get() as usize) + .context("Reading WIA/RVZ group headers")?; + wia_groups.into_iter().map(RVZGroup::from).collect() } - // println!("Groups: {:?}", groups); - } + // log::debug!("Groups: {:?}", groups); + }; let mut disc_io = Self { header, @@ -1070,8 +647,9 @@ impl DiscIOWIA { raw_data, groups, filename: filename.to_owned(), - encrypt: options.rebuild_hashes, + encrypt: options.rebuild_encryption, hash_tables: vec![], + nkit_header, }; if options.rebuild_hashes { disc_io.rebuild_hashes()?; @@ -1084,60 +662,68 @@ impl DiscIOWIA { p.partition_data .iter() .find(|pd| { - let start = pd.first_sector as u64 * SECTOR_SIZE as u64; - let end = start + pd.num_sectors as u64 * SECTOR_SIZE as u64; + let start = pd.first_sector.get() as u64 * SECTOR_SIZE as u64; + let end = start + pd.num_sectors.get() as u64 * SECTOR_SIZE as u64; offset >= start && offset < end }) .map(|pd| (p_idx, pd)) }) { - let start = pd.first_sector as u64 * SECTOR_SIZE as u64; - let group_index = (offset - start) / self.disc.chunk_size as u64; - if group_index >= pd.num_groups as u64 { + let start = pd.first_sector.get() as u64 * SECTOR_SIZE as u64; + let group_index = (offset - start) / self.disc.chunk_size.get() as u64; + if group_index >= pd.num_groups.get() as u64 { return None; } - let disc_offset = start + group_index * self.disc.chunk_size as u64; - let chunk_size = (self.disc.chunk_size as u64 * BLOCK_SIZE as u64) / SECTOR_SIZE as u64; + let disc_offset = start + group_index * self.disc.chunk_size.get() as u64; + let chunk_size = + (self.disc.chunk_size.get() as u64 * BLOCK_SIZE as u64) / SECTOR_SIZE as u64; let partition_offset = group_index * chunk_size; - let partition_end = pd.num_sectors as u64 * BLOCK_SIZE as u64; - self.groups.get(pd.group_index as usize + group_index as usize).map(|g| GroupResult { - disc_offset, - partition_offset, - group: g.clone(), - partition_index: Some(p_idx), - chunk_size: chunk_size as u32, - partition_end, + let partition_end = pd.num_sectors.get() as u64 * BLOCK_SIZE as u64; + self.groups.get(pd.group_index.get() as usize + group_index as usize).map(|g| { + GroupResult { + disc_offset, + partition_offset, + group: g.clone(), + partition_index: Some(p_idx), + chunk_size: chunk_size as u32, + partition_end, + } }) } else if let Some(d) = self.raw_data.iter().find(|d| { - let start = d.raw_data_offset & !0x7FFF; - let end = d.raw_data_offset + d.raw_data_size; + let start = d.raw_data_offset.get() & !0x7FFF; + let end = d.raw_data_offset.get() + d.raw_data_size.get(); offset >= start && offset < end }) { - let start = d.raw_data_offset & !0x7FFF; - let end = d.raw_data_offset + d.raw_data_size; - let group_index = (offset - start) / self.disc.chunk_size as u64; - if group_index >= d.num_groups as u64 { + let start = d.raw_data_offset.get() & !0x7FFF; + let end = d.raw_data_offset.get() + d.raw_data_size.get(); + let group_index = (offset - start) / self.disc.chunk_size.get() as u64; + if group_index >= d.num_groups.get() as u64 { return None; } - let disc_offset = start + group_index * self.disc.chunk_size as u64; - self.groups.get(d.group_index as usize + group_index as usize).map(|g| GroupResult { - disc_offset, - partition_offset: disc_offset, - group: g.clone(), - partition_index: None, - chunk_size: self.disc.chunk_size, - partition_end: end, + let disc_offset = start + group_index * self.disc.chunk_size.get() as u64; + self.groups.get(d.group_index.get() as usize + group_index as usize).map(|g| { + GroupResult { + disc_offset, + partition_offset: disc_offset, + group: g.clone(), + partition_index: None, + chunk_size: self.disc.chunk_size.get(), + partition_end: end, + } }) } else { None } } - pub(crate) fn rebuild_hashes(&mut self) -> Result<()> { + pub fn rebuild_hashes(&mut self) -> Result<()> { const NUM_H0_HASHES: usize = BLOCK_SIZE / HASHES_SIZE; - const H0_HASHES_SIZE: usize = HashBytes::STATIC_SIZE * NUM_H0_HASHES; + const H0_HASHES_SIZE: usize = size_of::() * NUM_H0_HASHES; + + let start = Instant::now(); // Precompute hashes for zeroed sectors. - let zero_h0_hash = hash_bytes(&[0u8; HASHES_SIZE]); + const ZERO_H0_BYTES: &[u8] = &[0u8; HASHES_SIZE]; + let zero_h0_hash = hash_bytes(ZERO_H0_BYTES); let mut zero_h1_hash = Sha1::new(); for _ in 0..NUM_H0_HASHES { zero_h1_hash.update(zero_h0_hash); @@ -1145,80 +731,100 @@ impl DiscIOWIA { let zero_h1_hash: HashBytes = zero_h1_hash.finalize().into(); let mut hash_tables = Vec::with_capacity(self.partitions.len()); - let mut stream = - WIAReadStream::new(self, 0, false).context("Creating WIA/RVZ read stream")?; for part in &self.partitions { - let first_sector = part.partition_data[0].first_sector; - if first_sector + part.partition_data[0].num_sectors - != part.partition_data[1].first_sector + let first_sector = part.partition_data[0].first_sector.get(); + if first_sector + part.partition_data[0].num_sectors.get() + != part.partition_data[1].first_sector.get() { return Err(Error::DiscFormat(format!( "Partition data is not contiguous: {}..{} != {}", first_sector, - first_sector + part.partition_data[0].num_sectors, - part.partition_data[1].first_sector + first_sector + part.partition_data[0].num_sectors.get(), + part.partition_data[1].first_sector.get() ))); } - let part_sectors = - part.partition_data[0].num_sectors + part.partition_data[1].num_sectors; - let num_sectors = part_sectors.next_multiple_of(64) as usize; - let num_subgroups = num_sectors / 8; - let num_groups = num_subgroups / 8; - println!( + let part_sectors = + part.partition_data[0].num_sectors.get() + part.partition_data[1].num_sectors.get(); + let hash_table = HashTable::new(part_sectors); + log::debug!( "Rebuilding hashes: {} sectors, {} subgroups, {} groups", - num_sectors, num_subgroups, num_groups + hash_table.h1_hashes.len(), + hash_table.h2_hashes.len(), + hash_table.h3_hashes.len() ); - let mut hash_table = HashTable { - h1_hashes: vec![HashBytes::default(); num_sectors], - h2_hashes: vec![HashBytes::default(); num_subgroups], - h3_hashes: vec![HashBytes::default(); num_groups], - }; - let mut h0_buf = [0u8; H0_HASHES_SIZE]; - for h3_index in 0..num_groups { - let mut h3_hasher = Sha1::new(); - for h2_index in h3_index * 8..h3_index * 8 + 8 { - let mut h2_hasher = Sha1::new(); - for h1_index in h2_index * 8..h2_index * 8 + 8 { - let h1_hash = if h1_index >= part_sectors as usize { - zero_h1_hash - } else { - let sector = first_sector + h1_index as u32; - stream - .seek(SeekFrom::Start(sector as u64 * SECTOR_SIZE as u64)) - .with_context(|| format!("Seeking to sector {}", sector))?; - stream - .read_exact(&mut h0_buf) - .with_context(|| format!("Reading sector {}", sector))?; - hash_bytes(&h0_buf) - }; - hash_table.h1_hashes[h1_index] = h1_hash; - h2_hasher.update(h1_hash); + let group_count = hash_table.h3_hashes.len(); + let mutex = Arc::new(Mutex::new(hash_table)); + (0..group_count).into_par_iter().try_for_each_init( + || (WIAReadStream::new(self, false), mutex.clone()), + |(stream, mutex), h3_index| -> Result<()> { + let stream = stream.as_mut().map_err(|_| { + Error::DiscFormat("Failed to create read stream".to_string()) + })?; + let mut result = HashResult { + h1_hashes: [HashBytes::default(); 64], + h2_hashes: [HashBytes::default(); 8], + h3_hash: HashBytes::default(), + }; + let mut h0_buf = [0u8; H0_HASHES_SIZE]; + let mut h3_hasher = Sha1::new(); + for h2_index in 0..8 { + let mut h2_hasher = Sha1::new(); + for h1_index in 0..8 { + let part_sector = + h1_index as u32 + h2_index as u32 * 8 + h3_index as u32 * 64; + let h1_hash = if part_sector >= part_sectors { + zero_h1_hash + } else { + let sector = first_sector + part_sector; + stream + .seek(SeekFrom::Start(sector as u64 * SECTOR_SIZE as u64)) + .with_context(|| format!("Seeking to sector {}", sector))?; + stream + .read_exact(&mut h0_buf) + .with_context(|| format!("Reading sector {}", sector))?; + hash_bytes(&h0_buf) + }; + result.h1_hashes[h1_index + h2_index * 8] = h1_hash; + h2_hasher.update(h1_hash); + } + let h2_hash = h2_hasher.finalize().into(); + result.h2_hashes[h2_index] = h2_hash; + h3_hasher.update(h2_hash); } - let h2_hash = h2_hasher.finalize().into(); - hash_table.h2_hashes[h2_index] = h2_hash; - h3_hasher.update(h2_hash); - } - hash_table.h3_hashes[h3_index] = h3_hasher.finalize().into(); - } + result.h3_hash = h3_hasher.finalize().into(); + let mut hash_table = mutex.lock().map_err(|_| "Failed to lock mutex")?; + hash_table.extend(h3_index, &result); + Ok(()) + }, + )?; + let hash_table = Arc::try_unwrap(mutex) + .map_err(|_| "Failed to unwrap Arc")? + .into_inner() + .map_err(|_| "Failed to lock mutex")?; hash_tables.push(hash_table); } self.hash_tables = hash_tables; + log::info!("Rebuilt hashes in {:?}", start.elapsed()); Ok(()) } } impl DiscIO for DiscIOWIA { - fn begin_read_stream(&mut self, offset: u64) -> io::Result> { - Ok(Box::new(WIAReadStream::new(self, offset, self.encrypt)?)) + fn open(&self) -> Result> { + Ok(Box::new(WIAReadStream::new(self, self.encrypt)?)) } - fn has_wii_crypto(&self) -> bool { self.encrypt && self.disc.disc_type == DiscType::Wii } + fn meta(&self) -> Result { + Ok(self.nkit_header.as_ref().map(DiscMeta::from).unwrap_or_default()) + } + + fn disc_size(&self) -> Option { Some(self.header.iso_file_size.get()) } } -pub(crate) struct WIAReadStream<'a> { +pub struct WIAReadStream<'a> { /// The disc IO. disc_io: &'a DiscIOWIA, /// The currently open file handle. @@ -1250,36 +856,36 @@ where } let num_exception_list = (chunk_size as usize).div_ceil(0x200000); - // println!("Num exception list: {:?}", num_exception_list); - let exception_lists = read_vec::(reader, num_exception_list)?; - for list in &exception_lists { - if !list.exceptions.is_empty() { - println!("Exception list: {:?}", list); + // log::debug!("Num exception list: {:?}", num_exception_list); + let mut exception_lists = Vec::with_capacity(num_exception_list); + for i in 0..num_exception_list { + let num_exceptions = read_u16_be(reader)?; + let exceptions: Vec = read_vec(reader, num_exceptions as usize)?; + if !exceptions.is_empty() { + log::debug!("Exception list {}: {:?}", i, exceptions); } + exception_lists.push(exceptions); } Ok(exception_lists) } impl<'a> WIAReadStream<'a> { - pub(crate) fn new(disc_io: &'a DiscIOWIA, offset: u64, encrypt: bool) -> io::Result { - let result = match disc_io.group_for_offset(offset) { - Some(v) => v, - None => return Err(io::Error::from(io::ErrorKind::InvalidInput)), - }; - let file = BufReader::new(File::open(&disc_io.filename)?); - let decompressor = Decompressor::new(&disc_io.disc) - .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; - let mut stream = Self { + pub fn new(disc_io: &'a DiscIOWIA, encrypt: bool) -> Result { + let file = BufReader::new( + File::open(&disc_io.filename) + .with_context(|| format!("Opening file {}", disc_io.filename.display()))?, + ); + let decompressor = Decompressor::new(&disc_io.disc)?; + let stream = Self { disc_io, file, - offset, - group_offset: result.disc_offset, + offset: 0, + group_offset: u64::MAX, group_data: Vec::new(), exception_lists: vec![], decompressor, encrypt, }; - stream.read_group(result)?; // Initialize group data Ok(stream) } @@ -1307,7 +913,7 @@ impl<'a> WIAReadStream<'a> { /// Reads new group data into the buffer, handling decompression and RVZ packing. fn read_group(&mut self, result: GroupResult) -> io::Result<()> { // Special case for all-zero data - if result.group.data_size == 0 { + if result.group.data_size() == 0 { self.exception_lists.clear(); let size = min(result.chunk_size as u64, result.partition_end - result.partition_offset) as usize; @@ -1317,18 +923,18 @@ impl<'a> WIAReadStream<'a> { } self.group_data = Vec::with_capacity(result.chunk_size as usize); - let group_data_start = result.group.data_offset as u64 * 4; + let group_data_start = result.group.data_offset.get() as u64 * 4; self.file.seek(SeekFrom::Start(group_data_start))?; - let mut reader = (&mut self.file).take_seek(result.group.data_size as u64); + let mut reader = (&mut self.file).take_seek(result.group.data_size() as u64); let uncompressed_exception_lists = - matches!(self.disc_io.disc.compression, Compression::None | Compression::Purge) - || !result.group.is_compressed; + matches!(self.disc_io.disc.compression(), Compression::None | Compression::Purge) + || !result.group.is_compressed(); if uncompressed_exception_lists { self.exception_lists = read_exception_lists( &mut reader, result.partition_index, - self.disc_io.disc.chunk_size, // result.chunk_size? + self.disc_io.disc.chunk_size.get(), // result.chunk_size? )?; // Align to 4 let rem = reader.stream_position()? % 4; @@ -1336,35 +942,30 @@ impl<'a> WIAReadStream<'a> { reader.seek(SeekFrom::Current((4 - rem) as i64))?; } } - let mut reader: Box = - if result.group.is_compressed && self.disc_io.disc.compression != Compression::None { - self.decompressor - .wrap(reader) - .map_err(|v| io::Error::new(io::ErrorKind::InvalidData, v))? - } else { - Box::new(reader) - }; + let mut reader: Box = if result.group.is_compressed() { + self.decompressor.wrap(reader)? + } else { + Box::new(reader) + }; if !uncompressed_exception_lists { self.exception_lists = read_exception_lists( reader.as_mut(), result.partition_index, - self.disc_io.disc.chunk_size, // result.chunk_size? + self.disc_io.disc.chunk_size.get(), // result.chunk_size? )?; } - if result.group.rvz_packed_size > 0 { + if result.group.rvz_packed_size.get() > 0 { // Decode RVZ packed data let mut lfg = LaggedFibonacci::default(); loop { let mut size_bytes = [0u8; 4]; - let read = reader.read(&mut size_bytes)?; - if read == 0 { - break; - } else if read < 4 { - return Err(io::Error::new( - io::ErrorKind::UnexpectedEof, - "Failed to read RVZ packed size", - )); + match reader.read_exact(&mut size_bytes) { + Ok(_) => {} + Err(e) if e.kind() == io::ErrorKind::UnexpectedEof => break, + Err(e) => { + return Err(io::Error::new(e.kind(), "Failed to read RVZ packed size")); + } } let size = u32::from_be_bytes(size_bytes); let cur_data_len = self.group_data.len(); @@ -1396,7 +997,7 @@ impl<'a> WIAReadStream<'a> { fn recalculate_hashes(&mut self, result: GroupResult) -> io::Result<()> { let Some(partition_index) = result.partition_index else { - // Data not inside of a Wii partition + // Data not inside a Wii partition return Ok(()); }; let hash_table = self.disc_io.hash_tables.get(partition_index); @@ -1422,12 +1023,14 @@ impl<'a> WIAReadStream<'a> { array_ref_mut![out, n * 20, 20].copy_from_slice(&hash); } + // Copy data + array_ref_mut![out, 0x400, BLOCK_SIZE].copy_from_slice(data); + // Rebuild H1 and H2 hashes if available - let mut data_copied = false; if let Some(hash_table) = hash_table { let partition = &self.disc_io.partitions[partition_index]; let part_sector = (result.disc_offset / SECTOR_SIZE as u64) as usize + i - - partition.partition_data[0].first_sector as usize; + - partition.partition_data[0].first_sector.get() as usize; let h1_start = part_sector & !7; for i in 0..8 { array_ref_mut![out, 0x280 + i * 20, 20] @@ -1439,47 +1042,13 @@ impl<'a> WIAReadStream<'a> { .copy_from_slice(&hash_table.h2_hashes[h2_start + i]); } - // if result.disc_offset == 0x9150000 { - // println!("Validating hashes for sector {}: {:X?}", part_sector, result); - // // Print H0 hashes - // for i in 0..31 { - // println!("H0 hash {} {:x}", i, as_digest(array_ref![out, i * 20, 20])); - // } - // // Print H1 hashes - // for i in 0..8 { - // println!( - // "H1 hash {} {:x}", - // i, - // as_digest(array_ref![out, 0x280 + i * 20, 20]) - // ); - // } - // // Print H2 hashes - // for i in 0..8 { - // println!( - // "H2 hash {} {:x}", - // i, - // as_digest(array_ref![out, 0x340 + i * 20, 20]) - // ); - // } - // } - if self.encrypt { // Re-encrypt hashes and data - let key = (&partition.partition_key).into(); - Aes128Cbc::new(key, &Block::from([0u8; 16])) - .encrypt_padded_mut::(&mut out[..HASHES_SIZE], HASHES_SIZE) - .expect("Failed to encrypt hashes"); - Aes128Cbc::new(key, &Block::from(*array_ref![out, 0x3d0, 16])) - .encrypt_padded_b2b_mut::(data, &mut out[HASHES_SIZE..]) - .expect("Failed to encrypt data"); - data_copied = true; + aes_encrypt(&partition.partition_key, [0u8; 16], &mut out[..HASHES_SIZE]); + let iv = *array_ref![out, 0x3d0, 16]; + aes_encrypt(&partition.partition_key, iv, &mut out[HASHES_SIZE..]); } } - - if !data_copied { - // Copy decrypted data - array_ref_mut![out, 0x400, BLOCK_SIZE].copy_from_slice(data); - } } self.group_data = out; @@ -1524,8 +1093,8 @@ impl<'a> Seek for WIAReadStream<'a> { fn seek(&mut self, pos: SeekFrom) -> io::Result { self.offset = match pos { SeekFrom::Start(v) => v, - SeekFrom::End(v) => (self.stable_stream_len()? as i64 + v) as u64, - SeekFrom::Current(v) => (self.offset as i64 + v) as u64, + SeekFrom::End(v) => self.disc_io.header.iso_file_size.get().saturating_add_signed(v), + SeekFrom::Current(v) => self.offset.saturating_add_signed(v), }; self.check_group()?; Ok(self.offset) @@ -1535,7 +1104,9 @@ impl<'a> Seek for WIAReadStream<'a> { } impl<'a> ReadStream for WIAReadStream<'a> { - fn stable_stream_len(&mut self) -> io::Result { Ok(self.disc_io.header.iso_file_size) } + fn stable_stream_len(&mut self) -> io::Result { + Ok(self.disc_io.header.iso_file_size.get()) + } fn as_dyn(&mut self) -> &mut dyn ReadStream { self } } diff --git a/src/lib.rs b/src/lib.rs index b7e09db..4f619a6 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,4 +1,4 @@ -#![warn(missing_docs, rustdoc::missing_doc_code_examples)] +// #![warn(missing_docs, rustdoc::missing_doc_code_examples)] //! Library for traversing & reading GameCube and Wii disc images. //! //! Based on the C++ library [nod](https://github.com/AxioDL/nod), @@ -16,22 +16,17 @@ //! ```no_run //! use std::io::Read; //! -//! use nod::{ -//! disc::{new_disc_base, PartHeader}, -//! fst::NodeType, -//! io::{new_disc_io, DiscIOOptions}, -//! }; +//! use nod::{Disc, PartitionKind}; //! //! fn main() -> nod::Result<()> { -//! let options = DiscIOOptions::default(); -//! let mut disc_io = new_disc_io("path/to/file.iso".as_ref(), &options)?; -//! let disc_base = new_disc_base(disc_io.as_mut())?; -//! let mut partition = disc_base.get_data_partition(disc_io.as_mut(), false)?; -//! let header = partition.read_header()?; -//! if let Some(NodeType::File(node)) = header.find_node("/MP3/Worlds.txt") { +//! let disc = Disc::new("path/to/file.iso")?; +//! let mut partition = disc.open_partition_kind(PartitionKind::Data)?; +//! let meta = partition.meta()?; +//! let fst = meta.fst()?; +//! if let Some((_, node)) = fst.find("/MP3/Worlds.txt") { //! let mut s = String::new(); //! partition -//! .begin_file_stream(node) +//! .open_file(node) //! .expect("Failed to open file stream") //! .read_to_string(&mut s) //! .expect("Failed to read file"); @@ -40,11 +35,24 @@ //! Ok(()) //! } //! ``` -pub mod disc; -pub mod fst; -pub mod io; -pub mod streams; -pub mod util; + +use std::path::Path; + +use disc::DiscBase; +pub use disc::{ + AppLoaderHeader, DiscHeader, DolHeader, PartitionBase, PartitionHeader, PartitionInfo, + PartitionKind, PartitionMeta, BI2_SIZE, BOOT_SIZE, +}; +pub use fst::{Fst, Node, NodeKind}; +use io::DiscIO; +pub use io::DiscMeta; +pub use streams::ReadStream; + +mod disc; +mod fst; +mod io; +mod streams; +mod util; /// Error types for nod. #[derive(thiserror::Error, Debug)] @@ -55,19 +63,22 @@ pub enum Error { /// A general I/O error. #[error("I/O error: {0}")] Io(String, #[source] std::io::Error), + /// An unknown error. + #[error("error: {0}")] + Other(String), +} + +impl From<&str> for Error { + fn from(s: &str) -> Error { Error::Other(s.to_string()) } +} + +impl From for Error { + fn from(s: String) -> Error { Error::Other(s) } } /// Helper result type for [`Error`]. pub type Result = core::result::Result; -impl From for Error { - fn from(_: aes::cipher::block_padding::UnpadError) -> Self { unreachable!() } -} - -impl From for Error { - fn from(_: base16ct::Error) -> Self { unreachable!() } -} - pub trait ErrorContext { fn context(self, context: impl Into) -> Error; } @@ -95,3 +106,72 @@ where E: ErrorContext self.map_err(|e| e.context(f())) } } + +#[derive(Default, Debug, Clone)] +pub struct OpenOptions { + /// Wii: Validate partition data hashes while reading the disc image if present. + pub validate_hashes: bool, + /// Wii: Rebuild partition data hashes for the disc image if the underlying format + /// does not store them. (e.g. WIA/RVZ) + pub rebuild_hashes: bool, + /// Wii: Rebuild partition data encryption if the underlying format stores data decrypted. + /// (e.g. WIA/RVZ, NFS) + /// + /// Unnecessary if only opening a disc partition stream, which will already provide a decrypted + /// stream. In this case, this will cause unnecessary processing. + /// + /// Only valid in combination with `rebuild_hashes`, as the data encryption is derived from the + /// partition data hashes. + pub rebuild_encryption: bool, +} + +pub struct Disc { + io: Box, + base: Box, + options: OpenOptions, +} + +impl Disc { + /// Opens a disc image from a file path. + pub fn new>(path: P) -> Result { + Disc::new_with_options(path, &OpenOptions::default()) + } + + /// Opens a disc image from a file path with custom options. + pub fn new_with_options>(path: P, options: &OpenOptions) -> Result { + let mut io = io::open(path.as_ref(), options)?; + let base = disc::new(io.as_mut())?; + Ok(Disc { io, base, options: options.clone() }) + } + + /// The disc's header. + pub fn header(&self) -> &DiscHeader { self.base.header() } + + /// Returns extra metadata included in the disc file format, if any. + pub fn meta(&self) -> Result { self.io.meta() } + + /// The disc's size in bytes or an estimate if not stored by the format. + pub fn disc_size(&self) -> u64 { self.base.disc_size() } + + /// A list of partitions on the disc. + /// + /// For GameCube discs, this will return a single data partition spanning the entire disc. + pub fn partitions(&self) -> Vec { self.base.partitions() } + + /// Opens a new read stream for the base disc image. + /// + /// Generally does _not_ need to be used directly. Opening a partition will provide a + /// decrypted stream instead. + pub fn open(&self) -> Result> { self.io.open() } + + /// Opens a new, decrypted partition read stream for the specified partition index. + pub fn open_partition(&self, index: usize) -> Result> { + self.base.open_partition(self.io.as_ref(), index, &self.options) + } + + /// Opens a new partition read stream for the first partition matching + /// the specified type. + pub fn open_partition_kind(&self, kind: PartitionKind) -> Result> { + self.base.open_partition_kind(self.io.as_ref(), kind, &self.options) + } +} diff --git a/src/streams.rs b/src/streams.rs index ca036a7..cd08974 100644 --- a/src/streams.rs +++ b/src/streams.rs @@ -3,8 +3,7 @@ use std::{ fs::File, io, - io::{Read, Seek, SeekFrom}, - ops::DerefMut, + io::{BufReader, Read, Seek, SeekFrom}, }; /// Creates a fixed-size array reference from a slice. @@ -31,6 +30,14 @@ macro_rules! array_ref_mut { }}; } +/// Compile-time assertion. +#[macro_export] +macro_rules! static_assert { + ($condition:expr) => { + const _: () = core::assert!($condition); + }; +} + /// A helper trait for seekable read streams. pub trait ReadStream: Read + Seek { /// Replace with [`Read.stream_len`] when stabilized. @@ -65,12 +72,20 @@ impl ReadStream for File { fn as_dyn(&mut self) -> &mut dyn ReadStream { self } } +impl ReadStream for BufReader +where T: ReadStream +{ + fn stable_stream_len(&mut self) -> io::Result { self.get_mut().stable_stream_len() } + + fn as_dyn(&mut self) -> &mut dyn ReadStream { self } +} + trait WindowedReadStream: ReadStream { fn base_stream(&mut self) -> &mut dyn ReadStream; fn window(&self) -> (u64, u64); } -/// An window into an existing [`ReadStream`], with ownership of the underlying stream. +/// A window into an existing [`ReadStream`], with ownership of the underlying stream. pub struct OwningWindowedReadStream<'a> { /// The base stream. pub base: Box, @@ -111,7 +126,7 @@ impl<'a> SharedWindowedReadStream<'a> { } #[inline(always)] -fn windowed_read(stream: &mut dyn WindowedReadStream, buf: &mut [u8]) -> io::Result { +fn windowed_read(stream: &mut impl WindowedReadStream, buf: &mut [u8]) -> io::Result { let pos = stream.stream_position()?; let size = stream.stable_stream_len()?; if pos == size { @@ -125,7 +140,7 @@ fn windowed_read(stream: &mut dyn WindowedReadStream, buf: &mut [u8]) -> io::Res } #[inline(always)] -fn windowed_seek(stream: &mut dyn WindowedReadStream, pos: SeekFrom) -> io::Result { +fn windowed_seek(stream: &mut impl WindowedReadStream, pos: SeekFrom) -> io::Result { let (begin, end) = stream.window(); let result = stream.base_stream().seek(match pos { SeekFrom::Start(p) => SeekFrom::Start(begin + p), @@ -158,7 +173,7 @@ impl<'a> ReadStream for OwningWindowedReadStream<'a> { } impl<'a> WindowedReadStream for OwningWindowedReadStream<'a> { - fn base_stream(&mut self) -> &mut dyn ReadStream { self.base.deref_mut() } + fn base_stream(&mut self) -> &mut dyn ReadStream { self.base.as_dyn() } fn window(&self) -> (u64, u64) { (self.begin, self.end) } } @@ -219,8 +234,8 @@ impl Seek for ByteReadStream<'_> { fn seek(&mut self, pos: SeekFrom) -> io::Result { let new_pos = match pos { SeekFrom::Start(v) => v, - SeekFrom::End(v) => (self.bytes.len() as i64 + v) as u64, - SeekFrom::Current(v) => (self.position as i64 + v) as u64, + SeekFrom::End(v) => (self.bytes.len() as u64).saturating_add_signed(v), + SeekFrom::Current(v) => self.position.saturating_add_signed(v), }; if new_pos > self.bytes.len() as u64 { Err(io::Error::from(io::ErrorKind::UnexpectedEof)) diff --git a/src/util/compress.rs b/src/util/compress.rs new file mode 100644 index 0000000..9f1b172 --- /dev/null +++ b/src/util/compress.rs @@ -0,0 +1,82 @@ +use std::{io, io::Read}; + +use crate::{Error, Result}; + +/// Decodes the LZMA Properties byte (lc/lp/pb). +/// See `lzma_lzma_lclppb_decode` in `liblzma/lzma/lzma_decoder.c`. +#[cfg(feature = "compress-lzma")] +pub fn lzma_lclppb_decode(options: &mut liblzma::stream::LzmaOptions, byte: u8) -> Result<()> { + let mut d = byte as u32; + if d >= (9 * 5 * 5) { + return Err(Error::DiscFormat(format!("Invalid LZMA props byte: {}", d))); + } + options.literal_context_bits(d % 9); + d /= 9; + options.position_bits(d / 5); + options.literal_position_bits(d % 5); + Ok(()) +} + +/// Decodes LZMA properties. +/// See `lzma_lzma_props_decode` in `liblzma/lzma/lzma_decoder.c`. +#[cfg(feature = "compress-lzma")] +pub fn lzma_props_decode(props: &[u8]) -> Result { + use crate::array_ref; + if props.len() != 5 { + return Err(Error::DiscFormat(format!("Invalid LZMA props length: {}", props.len()))); + } + let mut options = liblzma::stream::LzmaOptions::new(); + lzma_lclppb_decode(&mut options, props[0])?; + options.dict_size(u32::from_le_bytes(*array_ref!(props, 1, 4))); + Ok(options) +} + +/// Decodes LZMA2 properties. +/// See `lzma_lzma2_props_decode` in `liblzma/lzma/lzma2_decoder.c`. +#[cfg(feature = "compress-lzma")] +pub fn lzma2_props_decode(props: &[u8]) -> Result { + use std::cmp::Ordering; + if props.len() != 1 { + return Err(Error::DiscFormat(format!("Invalid LZMA2 props length: {}", props.len()))); + } + let d = props[0] as u32; + let mut options = liblzma::stream::LzmaOptions::new(); + options.dict_size(match d.cmp(&40) { + Ordering::Greater => { + return Err(Error::DiscFormat(format!("Invalid LZMA2 props byte: {}", d))); + } + Ordering::Equal => u32::MAX, + Ordering::Less => (2 | (d & 1)) << (d / 2 + 11), + }); + Ok(options) +} + +/// Creates a new raw LZMA decoder with the given options. +#[cfg(feature = "compress-lzma")] +pub fn new_lzma_decoder( + reader: R, + options: &liblzma::stream::LzmaOptions, +) -> io::Result> +where + R: Read, +{ + let mut filters = liblzma::stream::Filters::new(); + filters.lzma1(options); + let stream = liblzma::stream::Stream::new_raw_decoder(&filters).map_err(io::Error::from)?; + Ok(liblzma::read::XzDecoder::new_stream(reader, stream)) +} + +/// Creates a new raw LZMA2 decoder with the given options. +#[cfg(feature = "compress-lzma")] +pub fn new_lzma2_decoder( + reader: R, + options: &liblzma::stream::LzmaOptions, +) -> io::Result> +where + R: Read, +{ + let mut filters = liblzma::stream::Filters::new(); + filters.lzma2(options); + let stream = liblzma::stream::Stream::new_raw_decoder(&filters).map_err(io::Error::from)?; + Ok(liblzma::read::XzDecoder::new_stream(reader, stream)) +} diff --git a/src/util/lfg.rs b/src/util/lfg.rs index df38ebf..46119df 100644 --- a/src/util/lfg.rs +++ b/src/util/lfg.rs @@ -1,12 +1,19 @@ use std::{cmp::min, io, io::Read}; -pub(crate) const LFG_K: usize = 521; -pub(crate) const LFG_J: usize = 32; -pub(crate) const SEED_SIZE: usize = 17; +use zerocopy::{transmute_ref, AsBytes}; + +use crate::disc::SECTOR_SIZE; + +pub const LFG_K: usize = 521; +pub const LFG_J: usize = 32; +pub const SEED_SIZE: usize = 17; /// Lagged Fibonacci generator for Wii partition junk data. -/// https://github.com/dolphin-emu/dolphin/blob/master/docs/WiaAndRvz.md#prng-algorithm -pub(crate) struct LaggedFibonacci { +/// +/// References (license CC0-1.0): +/// https://github.com/dolphin-emu/dolphin/blob/a0f555648c27ec0c928f6b1e1fcad5e2d7c4d0c4/docs/WiaAndRvz.md +/// https://github.com/dolphin-emu/dolphin/blob/a0f555648c27ec0c928f6b1e1fcad5e2d7c4d0c4/Source/Core/DiscIO/LaggedFibonacciGenerator.cpp +pub struct LaggedFibonacci { buffer: [u32; LFG_K], position: usize, } @@ -21,6 +28,8 @@ impl LaggedFibonacci { self.buffer[i] = (self.buffer[i - 17] << 23) ^ (self.buffer[i - 16] >> 9) ^ self.buffer[i - 1]; } + // Instead of doing the "shift by 18 instead of 16" oddity when actually outputting the data, + // we can do the shifting (and byteswapping) at this point to make the output code simpler. for x in self.buffer.iter_mut() { *x = ((*x & 0xFF00FFFF) | (*x >> 2 & 0x00FF0000)).swap_bytes(); } @@ -29,9 +38,32 @@ impl LaggedFibonacci { } } - pub(crate) fn init_with_reader(&mut self, reader: &mut R) -> io::Result<()> + pub fn init_with_seed(&mut self, init: [u8; 4], disc_num: u8, partition_offset: u64) { + let seed = u32::from_be_bytes([ + init[2], + init[1], + init[3].wrapping_add(init[2]), + init[0].wrapping_add(init[1]), + ]) ^ disc_num as u32; + let sector = (partition_offset / SECTOR_SIZE as u64) as u32; + let mut n = seed.wrapping_mul(0x260BCD5) ^ sector.wrapping_mul(0x1EF29123); + for i in 0..SEED_SIZE { + let mut v = 0u32; + for _ in 0..LFG_J { + n = n.wrapping_mul(0x5D588B65).wrapping_add(1); + v = (v >> 1) | (n & 0x80000000); + } + self.buffer[i] = v; + } + self.buffer[16] ^= self.buffer[0] >> 9 ^ self.buffer[16] << 23; + self.position = 0; + self.init(); + self.skip((partition_offset % SECTOR_SIZE as u64) as usize); + } + + pub fn init_with_reader(&mut self, reader: &mut R) -> io::Result<()> where R: Read + ?Sized { - reader.read_exact(bytemuck::cast_slice_mut(&mut self.buffer[..SEED_SIZE]))?; + reader.read_exact(self.buffer[..SEED_SIZE].as_bytes_mut())?; for x in self.buffer[..SEED_SIZE].iter_mut() { *x = u32::from_be(*x); } @@ -40,7 +72,7 @@ impl LaggedFibonacci { Ok(()) } - pub(crate) fn forward(&mut self) { + pub fn forward(&mut self) { for i in 0..LFG_J { self.buffer[i] ^= self.buffer[i + LFG_K - LFG_J]; } @@ -49,7 +81,7 @@ impl LaggedFibonacci { } } - pub(crate) fn skip(&mut self, n: usize) { + pub fn skip(&mut self, n: usize) { self.position += n; while self.position >= LFG_K * 4 { self.forward(); @@ -57,15 +89,11 @@ impl LaggedFibonacci { } } - #[inline] - fn bytes(&self) -> &[u8; LFG_K * 4] { - unsafe { &*(self.buffer.as_ptr() as *const [u8; LFG_K * 4]) } - } - - pub(crate) fn fill(&mut self, mut buf: &mut [u8]) { + pub fn fill(&mut self, mut buf: &mut [u8]) { while !buf.is_empty() { let len = min(buf.len(), LFG_K * 4 - self.position); - buf[..len].copy_from_slice(&self.bytes()[self.position..self.position + len]); + let bytes: &[u8; LFG_K * 4] = transmute_ref!(&self.buffer); + buf[..len].copy_from_slice(&bytes[self.position..self.position + len]); self.position += len; buf = &mut buf[len..]; if self.position == LFG_K * 4 { @@ -75,3 +103,32 @@ impl LaggedFibonacci { } } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_init_with_seed_1() { + let mut lfg = LaggedFibonacci::default(); + lfg.init_with_seed([0x47, 0x41, 0x4c, 0x45], 0, 0x600000); + let mut buf = [0u8; 16]; + lfg.fill(&mut buf); + assert_eq!(buf, [ + 0xE9, 0x47, 0x67, 0xBD, 0x41, 0x50, 0x4D, 0x5D, 0x61, 0x48, 0xB1, 0x99, 0xA0, 0x12, + 0x0C, 0xBA + ]); + } + + #[test] + fn test_init_with_seed_2() { + let mut lfg = LaggedFibonacci::default(); + lfg.init_with_seed([0x47, 0x41, 0x4c, 0x45], 0, 0x608000); + let mut buf = [0u8; 16]; + lfg.fill(&mut buf); + assert_eq!(buf, [ + 0xE2, 0xBB, 0xBD, 0x77, 0xDA, 0xB2, 0x22, 0x42, 0x1C, 0x0C, 0x0B, 0xFC, 0xAC, 0x06, + 0xEA, 0xD0 + ]); + } +} diff --git a/src/util/mod.rs b/src/util/mod.rs index ab1b635..681ca76 100644 --- a/src/util/mod.rs +++ b/src/util/mod.rs @@ -1,5 +1,6 @@ use std::ops::{Div, Rem}; +pub(crate) mod compress; pub(crate) mod lfg; pub(crate) mod reader; pub(crate) mod take_seek; diff --git a/src/util/reader.rs b/src/util/reader.rs index f26fff3..d8e063f 100644 --- a/src/util/reader.rs +++ b/src/util/reader.rs @@ -1,243 +1,60 @@ -use std::{ffi::CString, io, io::Read}; +use std::{io, io::Read}; -use io::Write; +use zerocopy::{AsBytes, FromBytes, FromZeroes}; -pub(crate) const DYNAMIC_SIZE: usize = 0; - -pub(crate) const fn struct_size(fields: [usize; N]) -> usize { - let mut result = 0; - let mut i = 0; - while i < N { - let size = fields[i]; - if size == DYNAMIC_SIZE { - // Dynamically sized - return DYNAMIC_SIZE; - } - result += size; - i += 1; - } - result -} - -pub(crate) fn skip_bytes(reader: &mut R) -> io::Result<()> -where R: Read + ?Sized { - let mut buf = [0u8; N]; - reader.read_exact(&mut buf)?; - Ok(()) -} - -pub(crate) trait FromReader: Sized { - type Args<'a>; - - const STATIC_SIZE: usize; - - fn from_reader_args(reader: &mut R, args: Self::Args<'_>) -> io::Result - where R: Read + ?Sized; - - fn from_reader<'a, R>(reader: &mut R) -> io::Result - where - R: Read + ?Sized, - Self::Args<'a>: Default, - { - Self::from_reader_args(reader, Default::default()) - } -} - -macro_rules! impl_from_reader { - ($($t:ty),*) => { - $( - impl FromReader for $t { - type Args<'a> = (); - - const STATIC_SIZE: usize = std::mem::size_of::(); - - fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result where R: Read + ?Sized{ - let mut buf = [0u8; Self::STATIC_SIZE]; - reader.read_exact(&mut buf)?; - Ok(Self::from_be_bytes(buf)) - } - } - )* - }; -} - -impl_from_reader!(u8, u16, u32, u64, u128, i8, i16, i32, i64, i128); - -#[repr(transparent)] -pub struct U24(pub u32); - -impl FromReader for U24 { - type Args<'a> = (); - - const STATIC_SIZE: usize = 3; - - fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result - where R: Read + ?Sized { - let mut buf = [0u8; 4]; - reader.read_exact(&mut buf[1..])?; - Ok(U24(u32::from_be_bytes(buf))) - } -} - -impl FromReader for [u8; N] { - type Args<'a> = (); - - const STATIC_SIZE: usize = N; - - fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result - where R: Read + ?Sized { - let mut buf = [0u8; N]; - reader.read_exact(&mut buf)?; - Ok(buf) - } -} - -impl FromReader for [u32; N] { - type Args<'a> = (); - - const STATIC_SIZE: usize = N * u32::STATIC_SIZE; - - fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result - where R: Read + ?Sized { - let mut buf = [0u32; N]; - reader.read_exact(unsafe { - std::slice::from_raw_parts_mut(buf.as_mut_ptr() as *mut u8, Self::STATIC_SIZE) - })?; - for x in buf.iter_mut() { - *x = u32::from_be(*x); - } - Ok(buf) - } -} - -impl FromReader for CString { - type Args<'a> = (); - - const STATIC_SIZE: usize = DYNAMIC_SIZE; - - fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result - where R: Read + ?Sized { - let mut buf = Vec::new(); - loop { - let mut byte = [0u8; 1]; - reader.read_exact(&mut byte)?; - buf.push(byte[0]); - if byte[0] == 0 { - break; - } - } - Ok(unsafe { CString::from_vec_with_nul_unchecked(buf) }) - } -} - -pub(crate) fn read_bytes(reader: &mut R, count: usize) -> io::Result> -where R: Read + ?Sized { - let mut buf = vec![0u8; count]; - reader.read_exact(&mut buf)?; - Ok(buf) -} - -pub(crate) fn read_vec<'a, T, R>(reader: &mut R, count: usize) -> io::Result> +#[inline(always)] +pub fn read_from(reader: &mut R) -> io::Result where - T: FromReader, + T: FromBytes + FromZeroes + AsBytes, R: Read + ?Sized, - ::Args<'a>: Default, { - let mut vec = Vec::with_capacity(count); - if T::STATIC_SIZE != DYNAMIC_SIZE { - // Read the entire buffer at once - let buf = read_bytes(reader, T::STATIC_SIZE * count)?; - let mut slice = buf.as_slice(); - for _ in 0..count { - vec.push(T::from_reader(&mut slice)?); - } - } else { - for _ in 0..count { - vec.push(T::from_reader(reader)?); - } - } - Ok(vec) + let mut ret = ::new_zeroed(); + reader.read_exact(ret.as_bytes_mut())?; + Ok(ret) } -pub(crate) trait ToWriter: Sized { - fn to_writer(&self, writer: &mut W) -> io::Result<()> - where W: Write + ?Sized; - - fn to_bytes(&self) -> io::Result> { - let mut buf = vec![0u8; self.write_size()]; - self.to_writer(&mut buf.as_mut_slice())?; - Ok(buf) - } - - fn write_size(&self) -> usize; -} - -macro_rules! impl_to_writer { - ($($t:ty),*) => { - $( - impl ToWriter for $t { - fn to_writer(&self, writer: &mut W) -> io::Result<()> - where W: Write + ?Sized { - writer.write_all(&self.to_be_bytes()) - } - - fn to_bytes(&self) -> io::Result> { - Ok(self.to_be_bytes().to_vec()) - } - - fn write_size(&self) -> usize { - std::mem::size_of::() - } - } - )* - }; -} - -impl_to_writer!(u8, u16, u32, u64, u128, i8, i16, i32, i64, i128); - -impl ToWriter for U24 { - fn to_writer(&self, writer: &mut W) -> io::Result<()> - where W: Write + ?Sized { - writer.write_all(&self.0.to_be_bytes()[1..]) - } - - fn write_size(&self) -> usize { 3 } -} - -impl ToWriter for [u8; N] { - fn to_writer(&self, writer: &mut W) -> io::Result<()> - where W: Write + ?Sized { - writer.write_all(self) - } - - fn write_size(&self) -> usize { N } -} - -impl ToWriter for &[u8] { - fn to_writer(&self, writer: &mut W) -> io::Result<()> - where W: Write + ?Sized { - writer.write_all(self) - } - - fn write_size(&self) -> usize { self.len() } -} - -impl ToWriter for Vec { - fn to_writer(&self, writer: &mut W) -> io::Result<()> - where W: Write + ?Sized { - writer.write_all(self) - } - - fn write_size(&self) -> usize { self.len() } -} - -pub(crate) fn write_vec(writer: &mut W, vec: &[T]) -> io::Result<()> +#[inline(always)] +pub fn read_vec(reader: &mut R, count: usize) -> io::Result> where - T: ToWriter, - W: Write + ?Sized, + T: FromBytes + FromZeroes + AsBytes, + R: Read + ?Sized, { - for item in vec { - item.to_writer(writer)?; - } - Ok(()) + let mut ret = ::new_vec_zeroed(count); + reader.read_exact(ret.as_mut_slice().as_bytes_mut())?; + Ok(ret) +} + +#[inline(always)] +pub fn read_box_slice(reader: &mut R, count: usize) -> io::Result> +where + T: FromBytes + FromZeroes + AsBytes, + R: Read + ?Sized, +{ + let mut ret = ::new_box_slice_zeroed(count); + reader.read_exact(ret.as_mut().as_bytes_mut())?; + Ok(ret) +} + +#[inline(always)] +pub fn read_u16_be(reader: &mut R) -> io::Result +where R: Read + ?Sized { + let mut buf = [0u8; 2]; + reader.read_exact(&mut buf)?; + Ok(u16::from_be_bytes(buf)) +} + +#[inline(always)] +pub fn read_u32_be(reader: &mut R) -> io::Result +where R: Read + ?Sized { + let mut buf = [0u8; 4]; + reader.read_exact(&mut buf)?; + Ok(u32::from_be_bytes(buf)) +} + +#[inline(always)] +pub fn read_u64_be(reader: &mut R) -> io::Result +where R: Read + ?Sized { + let mut buf = [0u8; 8]; + reader.read_exact(&mut buf)?; + Ok(u64::from_be_bytes(buf)) }