diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 3320223..01f39f2 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -2,59 +2,174 @@ name: Build on: [ push, pull_request ] +env: + BUILD_PROFILE: release-lto + CARGO_BIN_NAME: nodtool + CARGO_TARGET_DIR: target + jobs: check: name: Check runs-on: ubuntu-latest strategy: matrix: - toolchain: [ stable, 1.51.0, nightly ] + toolchain: [ stable, 1.56.0, nightly ] env: RUSTFLAGS: -D warnings steps: - - uses: actions/checkout@v2 - - uses: actions-rs/toolchain@v1 + - name: Checkout + uses: actions/checkout@v3 + - name: Setup Rust toolchain + uses: dtolnay/rust-toolchain@master with: - profile: minimal toolchain: ${{ matrix.toolchain }} - override: true components: rustfmt, clippy + - name: Cargo check + run: cargo check --all-features --all-targets + - name: Cargo clippy + run: cargo clippy --all-features --all-targets + + fmt: + name: Format + runs-on: ubuntu-latest + env: + RUSTFLAGS: -D warnings + steps: + - name: Checkout + uses: actions/checkout@v3 + - name: Setup Rust toolchain + # We use nightly options in rustfmt.toml + uses: dtolnay/rust-toolchain@nightly + with: + components: rustfmt + - name: Cargo fmt + run: cargo fmt --all --check + + deny: + name: Deny + runs-on: ubuntu-latest + strategy: + matrix: + checks: + - advisories + - bans licenses sources + # Prevent new advisories from failing CI + continue-on-error: ${{ matrix.checks == 'advisories' }} + steps: + - uses: actions/checkout@v3 - uses: EmbarkStudios/cargo-deny-action@v1 - - uses: actions-rs/cargo@v1 with: - command: check - args: --all-features - - uses: actions-rs/cargo@v1 - with: - command: clippy - args: --all-features + command: check ${{ matrix.checks }} + + test: + name: Test + strategy: + matrix: + platform: [ ubuntu-latest, windows-latest, macos-latest ] + fail-fast: false + runs-on: ${{ matrix.platform }} + steps: + - name: Checkout + uses: actions/checkout@v3 + - name: Setup Rust toolchain + uses: dtolnay/rust-toolchain@stable + - name: Cargo test + run: cargo test --release --all-features build: name: Build strategy: matrix: - platform: [ ubuntu-latest, macos-latest, windows-latest ] - toolchain: [ stable, 1.51.0, nightly ] + include: + - platform: ubuntu-latest + target: x86_64-unknown-linux-musl + name: linux-x86_64 + build: zigbuild + features: asm + - platform: ubuntu-latest + target: i686-unknown-linux-musl + name: linux-i686 + build: zigbuild + features: asm + - platform: ubuntu-latest + target: aarch64-unknown-linux-musl + name: linux-aarch64 + build: zigbuild + features: nightly + - platform: ubuntu-latest + target: armv7-unknown-linux-musleabi + name: linux-armv7l + build: zigbuild + features: default + - platform: windows-latest + target: x86_64-pc-windows-msvc + name: windows-x86_64 + build: build + features: asm + - platform: windows-latest + target: aarch64-pc-windows-msvc + name: windows-arm64 + build: build + features: nightly + - platform: macos-latest + target: x86_64-apple-darwin + name: macos-x86_64 + build: build + features: asm + - platform: macos-latest + target: aarch64-apple-darwin + name: macos-arm64 + build: build + features: nightly fail-fast: false runs-on: ${{ matrix.platform }} steps: - - uses: actions/checkout@v2 - - uses: actions-rs/toolchain@v1 + - name: Checkout + uses: actions/checkout@v3 + - name: Install dependencies + if: matrix.packages != '' + run: | + sudo apt-get -y update + sudo apt-get -y install ${{ matrix.packages }} + - name: Install cargo-zigbuild + if: matrix.build == 'zigbuild' + run: pip install ziglang==0.11.0 cargo-zigbuild==0.18.3 + - name: Setup Rust toolchain + uses: dtolnay/rust-toolchain@nightly with: - profile: minimal - toolchain: ${{ matrix.toolchain }} - override: true - - uses: actions-rs/cargo@v1 + targets: ${{ matrix.target }} + - name: Cargo build + run: cargo ${{ matrix.build }} --profile ${{ env.BUILD_PROFILE }} --target ${{ matrix.target }} --bin ${{ env.CARGO_BIN_NAME }} --features ${{ matrix.features }} + - name: Upload artifacts + uses: actions/upload-artifact@v3 with: - command: test - args: --release --all-features - - uses: actions-rs/cargo@v1 - with: - command: build - args: --release --all-features - - uses: actions/upload-artifact@v2 - with: - name: ${{ matrix.platform }}-${{ matrix.toolchain }} + name: ${{ matrix.name }} path: | - target/release/nodtool - target/release/nodtool.exe + ${{ env.CARGO_TARGET_DIR }}/${{ env.BUILD_PROFILE }}/${{ env.CARGO_BIN_NAME }} + ${{ env.CARGO_TARGET_DIR }}/${{ env.BUILD_PROFILE }}/${{ env.CARGO_BIN_NAME }}.exe + ${{ env.CARGO_TARGET_DIR }}/${{ matrix.target }}/${{ env.BUILD_PROFILE }}/${{ env.CARGO_BIN_NAME }} + ${{ env.CARGO_TARGET_DIR }}/${{ matrix.target }}/${{ env.BUILD_PROFILE }}/${{ env.CARGO_BIN_NAME }}.exe + if-no-files-found: error + + release: + name: Release + if: startsWith(github.ref, 'refs/tags/') + runs-on: ubuntu-latest + needs: [ build ] + steps: + - name: Download artifacts + uses: actions/download-artifact@v3 + with: + path: artifacts + - name: Rename artifacts + working-directory: artifacts + run: | + mkdir ../out + for i in */*/$BUILD_PROFILE/$CARGO_BIN_NAME*; do + mv "$i" "../out/$(sed -E "s/([^/]+)\/[^/]+\/$BUILD_PROFILE\/($CARGO_BIN_NAME)/\2-\1/" <<< "$i")" + done + ls -R ../out + - name: Release + uses: softprops/action-gh-release@v1 + with: + files: out/* diff --git a/Cargo.toml b/Cargo.toml index 29d33d0..4769dc7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,7 +1,8 @@ [package] name = "nod" -version = "0.1.2" -edition = "2018" +version = "0.2.0" +edition = "2021" +rust-version = "1.56.0" authors = ["Luke Street "] license = "MIT OR Apache-2.0" repository = "https://github.com/encounter/nod-rs" @@ -17,16 +18,33 @@ categories = ["command-line-utilities", "parser-implementations"] name = "nodtool" path = "src/bin.rs" -[profile.release] -lto = true +[profile.release-lto] +inherits = "release" +lto = "thin" +strip = "debuginfo" + +[features] +default = ["compress-bzip2", "compress-zstd"] #, "compress-lzma" +asm = ["md-5/asm", "sha1/asm"] +compress-bzip2 = ["bzip2"] +compress-zstd = ["zstd"] +#compress-lzma = ["xz2"] +nightly = ["crc32fast/nightly"] [dependencies] -aes = "0.7.5" -anyhow = "1.0.53" -binrw = "0.8.4" -block-modes = "0.8.1" -clap = "2.34.0" -encoding_rs = "0.8.30" +aes = "0.8.3" +argh = "0.1.12" +argh_derive = "0.1.12" +base16ct = "0.2.0" +binrw = "0.13.3" +bytemuck = "1.14.1" +bzip2 = { version = "0.4.4", optional = true } +cbc = "0.1.2" +crc32fast = "1.3.2" +encoding_rs = "0.8.33" file-size = "1.0.3" -sha-1 = "0.10.0" -thiserror = "1.0.30" +md-5 = "0.10.6" +sha1 = "0.10.6" +thiserror = "1.0.56" +xz2 = { version = "0.1.7", optional = true } +zstd = { version = "0.13.0", optional = true } diff --git a/README.md b/README.md index 0a8fd30..0f4a360 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,9 @@ Based on the C++ library [nod](https://github.com/AxioDL/nod), but does not currently support authoring. Currently supported file formats: -- ISO +- ISO (GCM) +- WIA / RVZ +- WBFS - NFS (Wii U VC files, e.g. `hif_000000.nfs`) ### CLI tool @@ -34,20 +36,28 @@ nodtool extract /path/to/game/content/hif_000000.nfs [outdir] ### Library example Opening a disc image and reading a file: + ```rust -use nod::disc::{new_disc_base, PartHeader}; -use nod::fst::NodeType; -use nod::io::new_disc_io; use std::io::Read; -let mut disc_io = new_disc_io("path/to/file".as_ref())?; -let disc_base = new_disc_base(disc_io.as_mut())?; -let mut partition = disc_base.get_data_partition(disc_io.as_mut())?; -let header = partition.read_header()?; -if let Some(NodeType::File(node)) = header.find_node("/MP3/Worlds.txt") { - let mut s = String::new(); - partition.begin_file_stream(node)?.read_to_string(&mut s); - println!(s); +use nod::{ + disc::{new_disc_base, PartHeader}, + fst::NodeType, + io::{new_disc_io, DiscIOOptions}, +}; + +fn main() -> nod::Result<()> { + let options = DiscIOOptions::default(); + let mut disc_io = new_disc_io("path/to/file.iso".as_ref(), &options)?; + let disc_base = new_disc_base(disc_io.as_mut())?; + let mut partition = disc_base.get_data_partition(disc_io.as_mut(), false)?; + let header = partition.read_header()?; + if let Some(NodeType::File(node)) = header.find_node("/MP3/Worlds.txt") { + let mut s = String::new(); + partition.begin_file_stream(node)?.read_to_string(&mut s).expect("Failed to read file"); + println!("{}", s); + } + Ok(()) } ``` diff --git a/rustfmt.toml b/rustfmt.toml index 0a9eda5..a8c3a96 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -6,3 +6,4 @@ reorder_impl_items = true use_field_init_shorthand = true use_small_heuristics = "Max" where_single_line = true +format_code_in_doc_comments = true diff --git a/src/bin.rs b/src/bin.rs index c010b4d..9b48752 100644 --- a/src/bin.rs +++ b/src/bin.rs @@ -1,74 +1,205 @@ use std::{ - env, fs, io, - io::BufWriter, + error::Error, + fs, + fs::File, + io, + io::{BufWriter, Write}, path::{Path, PathBuf}, }; -use clap::{clap_app, AppSettings}; +use argh_derive::FromArgs; use nod::{ - disc::{new_disc_base, PartReadStream}, + disc::{new_disc_base, PartHeader, PartReadStream, PartitionType}, fst::NodeType, - io::{has_extension, new_disc_io}, - Result, + io::{has_extension, new_disc_io, DiscIOOptions}, + Result, ResultContext, }; +use sha1::Digest; -fn main() -> Result<()> { - let matches = clap_app!(nodtool => - (settings: &[ - AppSettings::SubcommandRequiredElseHelp, - AppSettings::GlobalVersion, - AppSettings::DeriveDisplayOrder, - AppSettings::VersionlessSubcommands, - ]) - (global_settings: &[ - AppSettings::ColoredHelp, - AppSettings::UnifiedHelpMessage, - ]) - (version: env!("CARGO_PKG_VERSION")) - (author: "Luke Street ") - (about: "Tool for reading GameCube and Wii disc images.") - (long_about: "Tool for reading GameCube and Wii disc images. +#[derive(FromArgs, Debug)] +/// Tool for reading GameCube and Wii disc images. +struct TopLevel { + #[argh(subcommand)] + command: SubCommand, +} -Based on , original authors: -Jack Andersen (jackoalan) -Phillip Stephens (Antidote)") - (@subcommand extract => - (about: "Extract GameCube & Wii disc images") - (@arg FILE: +required "Path to disc image (ISO or NFS)") - (@arg DIR: "Output directory (optional)") - (@arg quiet: -q "Quiet output") - (@arg validate: -h "Validate disc hashes (Wii only)") - ) - ) - .get_matches(); - if let Some(matches) = matches.subcommand_matches("extract") { - let file: PathBuf = PathBuf::from(matches.value_of("FILE").unwrap()); - let output_dir: PathBuf; - if let Some(dir) = matches.value_of("DIR") { - output_dir = PathBuf::from(dir); - } else if has_extension(file.as_path(), "nfs") { - // Special logic to extract from content/hif_*.nfs to extracted/.. - if let Some(parent) = file.parent() { - output_dir = parent.with_file_name("extracted"); - } else { - output_dir = file.with_extension(""); - } - } else { - output_dir = file.with_extension(""); +#[derive(FromArgs, Debug)] +#[argh(subcommand)] +enum SubCommand { + Extract(ExtractArgs), + Convert(ConvertArgs), + Verify(VerifyArgs), +} + +#[derive(FromArgs, Debug)] +/// Extract a disc image. +#[argh(subcommand, name = "extract")] +struct ExtractArgs { + #[argh(positional)] + /// path to disc image (ISO or NFS) + file: PathBuf, + #[argh(positional)] + /// output directory (optional) + out: Option, + #[argh(switch, short = 'q')] + /// quiet output + quiet: bool, + #[argh(switch, short = 'h')] + /// validate disc hashes (Wii only) + validate: bool, +} + +#[derive(FromArgs, Debug)] +/// Extract a disc image. +#[argh(subcommand, name = "convert")] +struct ConvertArgs { + #[argh(positional)] + /// path to disc image + file: PathBuf, + #[argh(positional)] + /// output ISO file + out: PathBuf, +} + +#[derive(FromArgs, Debug)] +/// Verifies a disc image. +#[argh(subcommand, name = "verify")] +struct VerifyArgs { + #[argh(positional)] + /// path to disc image + file: PathBuf, +} + +fn main() { + let args: TopLevel = argh::from_env(); + let result = match args.command { + SubCommand::Convert(c_args) => convert(c_args), + SubCommand::Extract(c_args) => extract(c_args), + SubCommand::Verify(c_args) => verify(c_args), + }; + if let Err(e) = result { + eprintln!("Failed: {}", e); + if let Some(source) = e.source() { + eprintln!("Caused by: {}", source); } - let mut disc_io = new_disc_io(file.as_path())?; - let disc_base = new_disc_base(disc_io.as_mut())?; - let mut partition = - disc_base.get_data_partition(disc_io.as_mut(), matches.is_present("validate"))?; - let header = partition.read_header()?; - extract_node( - header.root_node(), - partition.as_mut(), - output_dir.as_path(), - matches.is_present("quiet"), - )?; + std::process::exit(1); } - Result::Ok(()) +} + +fn convert(args: ConvertArgs) -> Result<()> { convert_and_verify(&args.file, Some(&args.out)) } + +fn verify(args: VerifyArgs) -> Result<()> { convert_and_verify(&args.file, None) } + +fn convert_and_verify(in_file: &Path, out_file: Option<&Path>) -> Result<()> { + println!("Loading {}", in_file.display()); + let mut disc_io = new_disc_io(in_file, &DiscIOOptions { rebuild_hashes: true })?; + let disc_base = new_disc_base(disc_io.as_mut())?; + let header = disc_base.get_header(); + println!( + "\nGame ID: {}{}{}{}{}{}", + header.game_id[0] as char, + header.game_id[1] as char, + header.game_id[2] as char, + header.game_id[3] as char, + header.game_id[4] as char, + header.game_id[5] as char + ); + println!("Game title: {}", header.game_title); + println!("Disc num: {}", header.disc_num); + println!("Disc version: {}", header.disc_version); + + let mut stream = disc_io.begin_read_stream(0).context("Creating disc read stream")?; + let mut crc = crc32fast::Hasher::new(); + let mut md5 = md5::Md5::new(); + let mut sha1 = sha1::Sha1::new(); + + let mut file = if let Some(out_file) = out_file { + Some( + File::create(out_file) + .with_context(|| format!("Creating file {}", out_file.display()))?, + ) + } else { + None + }; + + const BUFFER_SIZE: usize = 1015808; // LCM(0x8000, 0x7C00) + let mut buf = vec![0u8; BUFFER_SIZE]; + let mut total_read = 0u64; + loop { + let read = stream.read(&mut buf).with_context(|| { + format!("Reading {} bytes at disc offset {}", BUFFER_SIZE, total_read) + })?; + if read == 0 { + break; + } + let slice = &buf[..read]; + crc.update(slice); + md5.update(slice); + sha1.update(slice); + if let Some(file) = &mut file { + file.write_all(slice).with_context(|| { + format!("Writing {} bytes at offset {}", slice.len(), total_read) + })?; + } + total_read += read as u64; + } + + println!(); + println!("CRC32: {:08x}", crc.finalize()); + println!("MD5: {:032x}", md5.finalize()); + println!("SHA-1: {:040x}", sha1.finalize()); + if let (Some(path), Some(file)) = (out_file, &mut file) { + file.flush().context("Flushing output file")?; + println!("Wrote {} to {}", file_size::fit_4(total_read), path.display()); + } + Ok(()) +} + +fn extract(args: ExtractArgs) -> Result<()> { + let output_dir: PathBuf; + if let Some(dir) = args.out { + output_dir = dir; + } else if has_extension(&args.file, "nfs") { + // Special logic to extract from content/hif_*.nfs to extracted/.. + if let Some(parent) = args.file.parent() { + output_dir = parent.with_file_name("extracted"); + } else { + output_dir = args.file.with_extension(""); + } + } else { + output_dir = args.file.with_extension(""); + } + let mut disc_io = new_disc_io(&args.file, &DiscIOOptions { rebuild_hashes: args.validate })?; + let disc_base = new_disc_base(disc_io.as_mut())?; + let mut partition = + disc_base.get_partition(disc_io.as_mut(), PartitionType::Data, args.validate)?; + let header = partition.read_header()?; + extract_sys_files(header.as_ref(), &output_dir.join("sys"), args.quiet)?; + extract_node(header.root_node(), partition.as_mut(), &output_dir.join("files"), args.quiet)?; + Ok(()) +} + +fn extract_sys_files(header: &dyn PartHeader, out_dir: &Path, quiet: bool) -> Result<()> { + fs::create_dir_all(out_dir) + .with_context(|| format!("Creating output directory {}", out_dir.display()))?; + extract_file(header.boot_bytes(), &out_dir.join("boot.bin"), quiet)?; + extract_file(header.bi2_bytes(), &out_dir.join("bi2.bin"), quiet)?; + extract_file(header.apploader_bytes(), &out_dir.join("apploader.img"), quiet)?; + extract_file(header.fst_bytes(), &out_dir.join("fst.bin"), quiet)?; + extract_file(header.dol_bytes(), &out_dir.join("main.dol"), quiet)?; + Ok(()) +} + +fn extract_file(bytes: &[u8], out_path: &Path, quiet: bool) -> Result<()> { + if !quiet { + println!( + "Extracting {} (size: {})", + out_path.display(), + file_size::fit_4(bytes.len() as u64) + ); + } + fs::write(out_path, bytes).with_context(|| format!("Writing file {}", out_path.display()))?; + Ok(()) } fn extract_node( @@ -76,37 +207,49 @@ fn extract_node( partition: &mut dyn PartReadStream, base_path: &Path, quiet: bool, -) -> io::Result<()> { +) -> Result<()> { match node { NodeType::File(v) => { - let mut file_path = base_path.to_owned(); - file_path.push(v.name.as_ref()); + let mut file_path = base_path.to_path_buf(); + file_path.push(v.name.as_str()); if !quiet { println!( "Extracting {} (size: {})", - file_path.to_string_lossy(), + file_path.display(), file_size::fit_4(v.length as u64) ); } - let file = fs::File::create(file_path)?; + let file = File::create(&file_path) + .with_context(|| format!("Creating file {}", file_path.display()))?; let mut buf_writer = BufWriter::with_capacity(partition.ideal_buffer_size(), file); - io::copy(&mut partition.begin_file_stream(v)?, &mut buf_writer)?; + let mut stream = partition.begin_file_stream(v).with_context(|| { + format!( + "Opening file {} on disc for reading (offset {}, size {})", + v.name, v.offset, v.length + ) + })?; + io::copy(&mut stream, &mut buf_writer) + .with_context(|| format!("Extracting file {}", file_path.display()))?; + buf_writer.flush().with_context(|| format!("Flushing file {}", file_path.display()))?; } NodeType::Directory(v, c) => { if v.name.is_empty() { - fs::create_dir_all(base_path)?; + fs::create_dir_all(base_path).with_context(|| { + format!("Creating output directory {}", base_path.display()) + })?; for x in c { extract_node(x, partition, base_path, quiet)?; } } else { - let mut new_base = base_path.to_owned(); - new_base.push(v.name.as_ref()); - fs::create_dir_all(&new_base)?; + let mut new_base = base_path.to_path_buf(); + new_base.push(v.name.as_str()); + fs::create_dir_all(&new_base) + .with_context(|| format!("Creating output directory {}", new_base.display()))?; for x in c { extract_node(x, partition, new_base.as_path(), quiet)?; } } } } - io::Result::Ok(()) + Ok(()) } diff --git a/src/disc/gcn.rs b/src/disc/gcn.rs index 27ac840..997879f 100644 --- a/src/disc/gcn.rs +++ b/src/disc/gcn.rs @@ -1,16 +1,20 @@ use std::{ io, - io::{Read, Seek, SeekFrom}, + io::{Cursor, Read, Seek, SeekFrom}, }; -use binrw::{BinRead, BinReaderExt}; - use crate::{ - disc::{BI2Header, DiscBase, DiscIO, Header, PartHeader, PartReadStream, BUFFER_SIZE}, - div_rem, - fst::{find_node, node_parser, Node, NodeKind, NodeType}, + disc::{ + AppLoaderHeader, DiscBase, DiscIO, DolHeader, Header, PartHeader, PartReadStream, + PartitionHeader, PartitionType, SECTOR_SIZE, + }, + fst::{find_node, read_fst, Node, NodeKind, NodeType}, streams::{ReadStream, SharedWindowedReadStream}, - Result, + util::{ + div_rem, + reader::{read_bytes, FromReader}, + }, + Error, Result, ResultContext, }; pub(crate) struct DiscGCN { @@ -18,7 +22,7 @@ pub(crate) struct DiscGCN { } impl DiscGCN { - pub(crate) fn new(header: Header) -> Result { Result::Ok(DiscGCN { header }) } + pub(crate) fn new(header: Header) -> Result { Ok(DiscGCN { header }) } } impl DiscBase for DiscGCN { @@ -29,37 +33,62 @@ impl DiscBase for DiscGCN { disc_io: &'a mut dyn DiscIO, _validate_hashes: bool, ) -> Result> { - Result::Ok(Box::from(GCPartReadStream { - stream: disc_io.begin_read_stream(0)?, + let stream = disc_io.begin_read_stream(0).context("Opening data partition stream")?; + Ok(Box::from(GCPartReadStream { + stream, offset: 0, - cur_block: u64::MAX, - buf: [0; BUFFER_SIZE], + cur_block: u32::MAX, + buf: [0; SECTOR_SIZE], })) } + + fn get_partition<'a>( + &self, + disc_io: &'a mut dyn DiscIO, + part_type: PartitionType, + _validate_hashes: bool, + ) -> Result> { + if part_type == PartitionType::Data { + Ok(Box::from(GCPartReadStream { + stream: disc_io.begin_read_stream(0).context("Opening partition read stream")?, + offset: 0, + cur_block: u32::MAX, + buf: [0; SECTOR_SIZE], + })) + } else { + Err(Error::DiscFormat(format!( + "Invalid partition type {:?} for GameCube disc", + part_type + ))) + } + } } struct GCPartReadStream<'a> { stream: Box, offset: u64, - cur_block: u64, - buf: [u8; BUFFER_SIZE], + cur_block: u32, + buf: [u8; SECTOR_SIZE], } impl<'a> Read for GCPartReadStream<'a> { fn read(&mut self, buf: &mut [u8]) -> io::Result { - let (mut block, mut block_offset) = div_rem(self.offset as usize, BUFFER_SIZE); + let (block, block_offset) = div_rem(self.offset, SECTOR_SIZE as u64); + let mut block = block as u32; + let mut block_offset = block_offset as usize; + let mut rem = buf.len(); let mut read: usize = 0; while rem > 0 { - if block != self.cur_block as usize { + if block != self.cur_block { self.stream.read_exact(&mut self.buf)?; - self.cur_block = block as u64; + self.cur_block = block; } let mut cache_size = rem; - if cache_size + block_offset > BUFFER_SIZE { - cache_size = BUFFER_SIZE - block_offset; + if cache_size + block_offset > SECTOR_SIZE { + cache_size = SECTOR_SIZE - block_offset; } buf[read..read + cache_size] @@ -71,7 +100,7 @@ impl<'a> Read for GCPartReadStream<'a> { } self.offset += buf.len() as u64; - io::Result::Ok(buf.len()) + Ok(buf.len()) } } @@ -82,15 +111,15 @@ impl<'a> Seek for GCPartReadStream<'a> { SeekFrom::End(v) => (self.stable_stream_len()? as i64 + v) as u64, SeekFrom::Current(v) => (self.offset as i64 + v) as u64, }; - let block = self.offset / BUFFER_SIZE as u64; - if block != self.cur_block { - self.stream.seek(SeekFrom::Start(block * BUFFER_SIZE as u64))?; - self.cur_block = u64::MAX; + let block = self.offset / SECTOR_SIZE as u64; + if block as u32 != self.cur_block { + self.stream.seek(SeekFrom::Start(block * SECTOR_SIZE as u64))?; + self.cur_block = u32::MAX; } - io::Result::Ok(self.offset) + Ok(self.offset) } - fn stream_position(&mut self) -> io::Result { io::Result::Ok(self.offset) } + fn stream_position(&mut self) -> io::Result { Ok(self.offset) } } impl<'a> ReadStream for GCPartReadStream<'a> { @@ -102,28 +131,129 @@ impl<'a> ReadStream for GCPartReadStream<'a> { impl<'a> PartReadStream for GCPartReadStream<'a> { fn begin_file_stream(&mut self, node: &Node) -> io::Result { assert_eq!(node.kind, NodeKind::File); - io::Result::Ok(self.new_window(node.offset as u64, node.length as u64)?) + self.new_window(node.offset as u64, node.length as u64) } fn read_header(&mut self) -> Result> { - self.seek(SeekFrom::Start(0))?; - Result::Ok(Box::from(self.read_be::()?)) + self.seek(SeekFrom::Start(0)).context("Seeking to partition header")?; + Ok(Box::from(read_part_header(self)?)) } - fn ideal_buffer_size(&self) -> usize { BUFFER_SIZE } + fn ideal_buffer_size(&self) -> usize { SECTOR_SIZE } } -#[derive(Clone, Debug, PartialEq, BinRead)] +const BOOT_SIZE: usize = Header::STATIC_SIZE + PartitionHeader::STATIC_SIZE; +const BI2_SIZE: usize = 0x2000; + +#[derive(Clone, Debug)] pub(crate) struct GCPartition { + raw_boot: [u8; BOOT_SIZE], + raw_bi2: [u8; BI2_SIZE], + raw_apploader: Vec, + raw_fst: Vec, + raw_dol: Vec, + // Parsed header: Header, - bi2_header: BI2Header, - #[br(seek_before = SeekFrom::Start(header.fst_off as u64))] - #[br(parse_with = node_parser)] + partition_header: PartitionHeader, + apploader_header: AppLoaderHeader, root_node: NodeType, + dol_header: DolHeader, +} + +fn read_part_header(reader: &mut R) -> Result +where R: Read + Seek + ?Sized { + // boot.bin + let raw_boot = <[u8; BOOT_SIZE]>::from_reader(reader).context("Reading boot.bin")?; + let mut boot_bytes = raw_boot.as_slice(); + let header = Header::from_reader(&mut boot_bytes).context("Parsing disc header")?; + let partition_header = + PartitionHeader::from_reader(&mut boot_bytes).context("Parsing partition header")?; + debug_assert_eq!(boot_bytes.len(), 0, "failed to consume boot.bin"); + + // bi2.bin + let raw_bi2 = <[u8; BI2_SIZE]>::from_reader(reader).context("Reading bi2.bin")?; + + // apploader.bin + let mut raw_apploader = + read_bytes(reader, AppLoaderHeader::STATIC_SIZE).context("Reading apploader header")?; + let apploader_header = AppLoaderHeader::from_reader(&mut raw_apploader.as_slice()) + .context("Parsing apploader header")?; + raw_apploader.resize( + AppLoaderHeader::STATIC_SIZE + + apploader_header.size as usize + + apploader_header.trailer_size as usize, + 0, + ); + reader + .read_exact(&mut raw_apploader[AppLoaderHeader::STATIC_SIZE..]) + .context("Reading apploader")?; + + // fst.bin + reader + .seek(SeekFrom::Start(partition_header.fst_off as u64)) + .context("Seeking to FST offset")?; + let raw_fst = read_bytes(reader, partition_header.fst_sz as usize).with_context(|| { + format!( + "Reading partition FST (offset {}, size {})", + partition_header.fst_off, partition_header.fst_sz + ) + })?; + let root_node = read_fst(&mut Cursor::new(&*raw_fst))?; + + // main.dol + reader + .seek(SeekFrom::Start(partition_header.dol_off as u64)) + .context("Seeking to DOL offset")?; + let mut raw_dol = read_bytes(reader, DolHeader::STATIC_SIZE).context("Reading DOL header")?; + let dol_header = + DolHeader::from_reader(&mut raw_dol.as_slice()).context("Parsing DOL header")?; + let dol_size = dol_header + .text_offs + .iter() + .zip(&dol_header.text_sizes) + .map(|(offs, size)| offs + size) + .chain( + dol_header.data_offs.iter().zip(&dol_header.data_sizes).map(|(offs, size)| offs + size), + ) + .max() + .unwrap_or(DolHeader::STATIC_SIZE as u32); + raw_dol.resize(dol_size as usize, 0); + reader.read_exact(&mut raw_dol[DolHeader::STATIC_SIZE..]).context("Reading DOL")?; + + Ok(GCPartition { + raw_boot, + raw_bi2, + raw_apploader, + raw_fst, + raw_dol, + header, + partition_header, + apploader_header, + root_node, + dol_header, + }) } impl PartHeader for GCPartition { fn root_node(&self) -> &NodeType { &self.root_node } fn find_node(&self, path: &str) -> Option<&NodeType> { find_node(&self.root_node, path) } + + fn boot_bytes(&self) -> &[u8] { &self.raw_boot } + + fn bi2_bytes(&self) -> &[u8] { &self.raw_bi2 } + + fn apploader_bytes(&self) -> &[u8] { &self.raw_apploader } + + fn fst_bytes(&self) -> &[u8] { &self.raw_fst } + + fn dol_bytes(&self) -> &[u8] { &self.raw_dol } + + fn disc_header(&self) -> &Header { &self.header } + + fn partition_header(&self) -> &PartitionHeader { &self.partition_header } + + fn apploader_header(&self) -> &AppLoaderHeader { &self.apploader_header } + + fn dol_header(&self) -> &DolHeader { &self.dol_header } } diff --git a/src/disc/mod.rs b/src/disc/mod.rs index 6dd4176..5da9a24 100644 --- a/src/disc/mod.rs +++ b/src/disc/mod.rs @@ -1,22 +1,21 @@ //! Disc type related logic (GameCube, Wii) -use std::{fmt::Debug, io}; - -use binrw::{BinRead, BinReaderExt, NullString}; +use std::{ffi::CStr, fmt::Debug, io, io::Read}; use crate::{ disc::{gcn::DiscGCN, wii::DiscWii}, fst::{Node, NodeType}, io::DiscIO, streams::{ReadStream, SharedWindowedReadStream}, - Error, Result, + util::reader::{skip_bytes, struct_size, FromReader}, + Error, Result, ResultContext, }; pub(crate) mod gcn; pub(crate) mod wii; /// Shared GameCube & Wii disc header -#[derive(Clone, Debug, PartialEq, BinRead)] +#[derive(Clone, Debug, PartialEq)] pub struct Header { /// Game ID (e.g. GM8E01 for Metroid Prime) pub game_id: [u8; 6], @@ -28,24 +27,67 @@ pub struct Header { pub audio_streaming: u8, /// Audio streaming buffer size pub audio_stream_buf_size: u8, - #[br(pad_before(14))] /// If this is a Wii disc, this will be 0x5D1C9EA3 pub wii_magic: u32, /// If this is a GameCube disc, this will be 0xC2339F3D pub gcn_magic: u32, /// Game title - #[br(pad_size_to(64), map = NullString::into_string)] pub game_title: String, /// Disable hash verification pub disable_hash_verification: u8, /// Disable disc encryption and H3 hash table loading and verification pub disable_disc_enc: u8, +} + +fn from_c_str(bytes: &[u8]) -> io::Result { + CStr::from_bytes_until_nul(bytes) + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))? + .to_str() + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) + .map(|s| s.to_string()) +} + +impl FromReader for Header { + type Args<'a> = (); + + const STATIC_SIZE: usize = 0x400; + + fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result + where R: Read + ?Sized { + let game_id = <[u8; 6]>::from_reader(reader)?; + let disc_num = u8::from_reader(reader)?; + let disc_version = u8::from_reader(reader)?; + let audio_streaming = u8::from_reader(reader)?; + let audio_stream_buf_size = u8::from_reader(reader)?; + skip_bytes::<14, _>(reader)?; // padding + let wii_magic = u32::from_reader(reader)?; + let gcn_magic = u32::from_reader(reader)?; + let game_title = from_c_str(&<[u8; 64]>::from_reader(reader)?)?; + let disable_hash_verification = u8::from_reader(reader)?; + let disable_disc_enc = u8::from_reader(reader)?; + skip_bytes::<926, _>(reader)?; // padding + Ok(Self { + game_id, + disc_num, + disc_version, + audio_streaming, + audio_stream_buf_size, + wii_magic, + gcn_magic, + game_title, + disable_hash_verification, + disable_disc_enc, + }) + } +} + +/// Partition header +#[derive(Clone, Debug, PartialEq)] +pub struct PartitionHeader { /// Debug monitor offset - #[br(pad_before(0x39e))] pub debug_mon_off: u32, /// Debug monitor load address pub debug_load_addr: u32, - #[br(pad_before(0x18))] /// Offset to main DOL (Wii: >> 2) pub dol_off: u32, /// Offset to file system table (Wii: >> 2) @@ -59,28 +101,130 @@ pub struct Header { /// User position pub user_position: u32, /// User size - #[br(pad_after(4))] pub user_sz: u32, } -#[derive(Debug, PartialEq, BinRead, Copy, Clone)] -pub(crate) struct BI2Header { - pub(crate) debug_monitor_size: i32, - pub(crate) sim_mem_size: i32, - pub(crate) arg_offset: u32, - pub(crate) debug_flag: u32, - pub(crate) trk_address: u32, - pub(crate) trk_size: u32, - pub(crate) country_code: u32, - pub(crate) unk1: u32, - pub(crate) unk2: u32, - pub(crate) unk3: u32, - pub(crate) dol_limit: u32, - #[br(pad_after(0x1fd0))] - pub(crate) unk4: u32, +impl FromReader for PartitionHeader { + type Args<'a> = (); + + const STATIC_SIZE: usize = struct_size([ + u32::STATIC_SIZE, // debug_mon_off + u32::STATIC_SIZE, // debug_load_addr + 0x18, // padding + u32::STATIC_SIZE, // dol_off + u32::STATIC_SIZE, // fst_off + u32::STATIC_SIZE, // fst_sz + u32::STATIC_SIZE, // fst_max_sz + u32::STATIC_SIZE, // fst_memory_address + u32::STATIC_SIZE, // user_position + u32::STATIC_SIZE, // user_sz + 4, // padding + ]); + + fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result + where R: Read + ?Sized { + let debug_mon_off = u32::from_reader(reader)?; + let debug_load_addr = u32::from_reader(reader)?; + skip_bytes::<0x18, _>(reader)?; // padding + let dol_off = u32::from_reader(reader)?; + let fst_off = u32::from_reader(reader)?; + let fst_sz = u32::from_reader(reader)?; + let fst_max_sz = u32::from_reader(reader)?; + let fst_memory_address = u32::from_reader(reader)?; + let user_position = u32::from_reader(reader)?; + let user_sz = u32::from_reader(reader)?; + skip_bytes::<4, _>(reader)?; // padding + Ok(Self { + debug_mon_off, + debug_load_addr, + dol_off, + fst_off, + fst_sz, + fst_max_sz, + fst_memory_address, + user_position, + user_sz, + }) + } } -pub(crate) const BUFFER_SIZE: usize = 0x8000; +#[derive(Debug, PartialEq, Clone)] +pub struct AppLoaderHeader { + pub date: String, + pub entry_point: u32, + pub size: u32, + pub trailer_size: u32, +} + +impl FromReader for AppLoaderHeader { + type Args<'a> = (); + + const STATIC_SIZE: usize = struct_size([ + 16, // date + u32::STATIC_SIZE, // entry_point + u32::STATIC_SIZE, // size + u32::STATIC_SIZE, // trailer_size + 4, // padding + ]); + + fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result + where R: Read + ?Sized { + let date = from_c_str(&<[u8; 16]>::from_reader(reader)?)?; + let entry_point = u32::from_reader(reader)?; + let size = u32::from_reader(reader)?; + let trailer_size = u32::from_reader(reader)?; + skip_bytes::<4, _>(reader)?; // padding + Ok(Self { date, entry_point, size, trailer_size }) + } +} + +pub const DOL_MAX_TEXT_SECTIONS: usize = 7; +pub const DOL_MAX_DATA_SECTIONS: usize = 11; + +#[derive(Debug, Clone)] +pub struct DolHeader { + pub text_offs: [u32; DOL_MAX_TEXT_SECTIONS], + pub data_offs: [u32; DOL_MAX_DATA_SECTIONS], + pub text_addrs: [u32; DOL_MAX_TEXT_SECTIONS], + pub data_addrs: [u32; DOL_MAX_DATA_SECTIONS], + pub text_sizes: [u32; DOL_MAX_TEXT_SECTIONS], + pub data_sizes: [u32; DOL_MAX_DATA_SECTIONS], + pub bss_addr: u32, + pub bss_size: u32, + pub entry_point: u32, +} + +impl FromReader for DolHeader { + type Args<'a> = (); + + const STATIC_SIZE: usize = 0x100; + + fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result + where R: Read + ?Sized { + let result = Self { + text_offs: <_>::from_reader(reader)?, + data_offs: <_>::from_reader(reader)?, + text_addrs: <_>::from_reader(reader)?, + data_addrs: <_>::from_reader(reader)?, + text_sizes: <_>::from_reader(reader)?, + data_sizes: <_>::from_reader(reader)?, + bss_addr: <_>::from_reader(reader)?, + bss_size: <_>::from_reader(reader)?, + entry_point: <_>::from_reader(reader)?, + }; + skip_bytes::<0x1C, _>(reader)?; // padding + Ok(result) + } +} + +#[derive(Debug, PartialEq, Eq, Copy, Clone)] +pub enum PartitionType { + Data, + Update, + Channel, +} + +pub(crate) const SECTOR_SIZE: usize = 0x8000; /// Contains a disc's header & partition information. pub trait DiscBase: Send + Sync { @@ -95,19 +239,35 @@ pub trait DiscBase: Send + Sync { /// /// Basic usage: /// ```no_run - /// use nod::disc::new_disc_base; - /// use nod::io::new_disc_io; + /// use nod::{ + /// disc::new_disc_base, + /// io::{new_disc_io, DiscIOOptions}, + /// }; /// - /// let mut disc_io = new_disc_io("path/to/file".as_ref())?; + /// # fn main() -> nod::Result<()> { + /// let options = DiscIOOptions::default(); + /// let mut disc_io = new_disc_io("path/to/file.iso".as_ref(), &options)?; /// let disc_base = new_disc_base(disc_io.as_mut())?; /// let mut partition = disc_base.get_data_partition(disc_io.as_mut(), false)?; - /// # Ok::<(), nod::Error>(()) + /// # Ok(()) + /// # } /// ``` fn get_data_partition<'a>( &self, disc_io: &'a mut dyn DiscIO, validate_hashes: bool, ) -> Result>; + + /// Opens a new partition read stream for the first partition matching + /// the specified type. + /// + /// `validate_hashes`: Validate Wii disc hashes while reading (slow!) + fn get_partition<'a>( + &self, + disc_io: &'a mut dyn DiscIO, + part_type: PartitionType, + validate_hashes: bool, + ) -> Result>; } /// Creates a new [`DiscBase`] instance. @@ -116,23 +276,31 @@ pub trait DiscBase: Send + Sync { /// /// Basic usage: /// ```no_run -/// use nod::io::new_disc_io; -/// use nod::disc::new_disc_base; +/// use nod::{ +/// disc::new_disc_base, +/// io::{new_disc_io, DiscIOOptions}, +/// }; /// -/// let mut disc_io = new_disc_io("path/to/file".as_ref())?; +/// # fn main() -> nod::Result<()> { +/// let options = DiscIOOptions::default(); +/// let mut disc_io = new_disc_io("path/to/file.iso".as_ref(), &options)?; /// let disc_base = new_disc_base(disc_io.as_mut())?; /// disc_base.get_header(); -/// # Ok::<(), nod::Error>(()) +/// # Ok(()) +/// # } /// ``` pub fn new_disc_base(disc_io: &mut dyn DiscIO) -> Result> { - let mut stream = disc_io.begin_read_stream(0)?; - let header: Header = stream.read_be()?; + let mut stream = disc_io.begin_read_stream(0).context("Opening disc stream")?; + let header_bytes = + <[u8; Header::STATIC_SIZE]>::from_reader(&mut stream).context("Reading disc header")?; + let header = + Header::from_reader(&mut header_bytes.as_slice()).context("Parsing disc header")?; if header.wii_magic == 0x5D1C9EA3 { - Result::Ok(Box::from(DiscWii::new(stream.as_mut(), header)?)) + Ok(Box::from(DiscWii::new(stream.as_mut(), header)?)) } else if header.gcn_magic == 0xC2339F3D { - Result::Ok(Box::from(DiscGCN::new(header)?)) + Ok(Box::from(DiscGCN::new(header)?)) } else { - Result::Err(Error::DiscFormat("Invalid GC/Wii magic".to_string())) + Err(Error::DiscFormat(format!("Invalid GC/Wii magic: {:#010X}", header.wii_magic))) } } @@ -145,21 +313,27 @@ pub trait PartReadStream: ReadStream { /// /// Basic usage: /// ```no_run - /// use nod::disc::{new_disc_base, PartHeader}; - /// use nod::fst::NodeType; - /// use nod::io::new_disc_io; /// use std::io::Read; /// - /// let mut disc_io = new_disc_io("path/to/file".as_ref())?; - /// let disc_base = new_disc_base(disc_io.as_mut())?; - /// let mut partition = disc_base.get_data_partition(disc_io.as_mut(), false)?; - /// let header = partition.read_header()?; - /// if let Some(NodeType::File(node)) = header.find_node("/MP3/Worlds.txt") { - /// let mut s = String::new(); - /// partition.begin_file_stream(node)?.read_to_string(&mut s); - /// println!("{}", s); + /// use nod::{ + /// disc::{new_disc_base, PartHeader}, + /// fst::NodeType, + /// io::{new_disc_io, DiscIOOptions}, + /// }; + /// + /// fn main() -> nod::Result<()> { + /// let options = DiscIOOptions::default(); + /// let mut disc_io = new_disc_io("path/to/file.iso".as_ref(), &options)?; + /// let disc_base = new_disc_base(disc_io.as_mut())?; + /// let mut partition = disc_base.get_data_partition(disc_io.as_mut(), false)?; + /// let header = partition.read_header()?; + /// if let Some(NodeType::File(node)) = header.find_node("/MP3/Worlds.txt") { + /// let mut s = String::new(); + /// partition.begin_file_stream(node)?.read_to_string(&mut s).expect("Failed to read file"); + /// println!("{}", s); + /// } + /// Ok(()) /// } - /// # Ok::<(), nod::Error>(()) /// ``` fn begin_file_stream(&mut self, node: &Node) -> io::Result; @@ -183,21 +357,53 @@ pub trait PartHeader: Debug + Send + Sync { /// /// Basic usage: /// ```no_run - /// use nod::disc::{new_disc_base, PartHeader}; - /// use nod::fst::NodeType; - /// use nod::io::new_disc_io; + /// use nod::{ + /// disc::{new_disc_base, PartHeader}, + /// fst::NodeType, + /// io::{new_disc_io, DiscIOOptions}, + /// }; /// - /// let mut disc_io = new_disc_io("path/to/file".as_ref())?; - /// let disc_base = new_disc_base(disc_io.as_mut())?; - /// let mut partition = disc_base.get_data_partition(disc_io.as_mut(), false)?; - /// let header = partition.read_header()?; - /// if let Some(NodeType::File(node)) = header.find_node("/MP1/Metroid1.pak") { - /// println!("{}", node.name); + /// fn main() -> nod::Result<()> { + /// let options = DiscIOOptions::default(); + /// let mut disc_io = new_disc_io("path/to/file.iso".as_ref(), &options)?; + /// let disc_base = new_disc_base(disc_io.as_mut())?; + /// let mut partition = disc_base.get_data_partition(disc_io.as_mut(), false)?; + /// let header = partition.read_header()?; + /// if let Some(NodeType::File(node)) = header.find_node("/MP1/Metroid1.pak") { + /// println!("{}", node.name); + /// } + /// if let Some(NodeType::Directory(node, children)) = header.find_node("/MP1") { + /// println!("Number of files: {}", children.len()); + /// } + /// Ok(()) /// } - /// if let Some(NodeType::Directory(node, children)) = header.find_node("/MP1") { - /// println!("Number of files: {}", children.len()); - /// } - /// # Ok::<(), nod::Error>(()) /// ``` fn find_node(&self, path: &str) -> Option<&NodeType>; + + /// Disc and partition header (boot.bin) + fn boot_bytes(&self) -> &[u8]; + + /// Debug and region information (bi2.bin) + fn bi2_bytes(&self) -> &[u8]; + + /// Apploader (apploader.bin) + fn apploader_bytes(&self) -> &[u8]; + + /// File system table (fst.bin) + fn fst_bytes(&self) -> &[u8]; + + /// Main binary (main.dol) + fn dol_bytes(&self) -> &[u8]; + + /// Disc header + fn disc_header(&self) -> &Header; + + /// Partition header + fn partition_header(&self) -> &PartitionHeader; + + /// Apploader header + fn apploader_header(&self) -> &AppLoaderHeader; + + /// DOL header + fn dol_header(&self) -> &DolHeader; } diff --git a/src/disc/wii.rs b/src/disc/wii.rs index 8dd7114..7ed0542 100644 --- a/src/disc/wii.rs +++ b/src/disc/wii.rs @@ -3,24 +3,33 @@ use std::{ io::{Read, Seek, SeekFrom}, }; -use aes::{Aes128, Block, NewBlockCipher}; -use binrw::{BinRead, BinReaderExt}; -use block_modes::{block_padding::NoPadding, BlockMode, Cbc}; +use aes::{ + cipher::{block_padding::NoPadding, BlockDecryptMut, KeyIvInit}, + Aes128, Block, +}; use sha1::{digest, Digest, Sha1}; use crate::{ array_ref, - disc::{BI2Header, DiscBase, DiscIO, Header, PartHeader, PartReadStream, BUFFER_SIZE}, - div_rem, - fst::{find_node, node_parser, Node, NodeKind, NodeType}, + disc::{ + AppLoaderHeader, DiscBase, DiscIO, DolHeader, Header, PartHeader, PartReadStream, + PartitionHeader, PartitionType, SECTOR_SIZE, + }, + fst::{find_node, Node, NodeKind, NodeType}, streams::{wrap_windowed, OwningWindowedReadStream, ReadStream, SharedWindowedReadStream}, - Error, Result, + util::{ + div_rem, + reader::{skip_bytes, struct_size, FromReader}, + }, + Error, Result, ResultContext, }; -type Aes128Cbc = Cbc; +pub(crate) const HASHES_SIZE: usize = 0x400; +pub(crate) const BLOCK_SIZE: usize = SECTOR_SIZE - HASHES_SIZE; // 0x7C00 + +/// AES-128-CBC decryptor +type Aes128Cbc = cbc::Decryptor; -const BLOCK_SIZE: usize = 0x7c00; -const BUFFER_OFFSET: usize = BUFFER_SIZE - BLOCK_SIZE; #[rustfmt::skip] const COMMON_KEYS: [[u8; 16]; 2] = [ /* Normal */ @@ -29,82 +38,186 @@ const COMMON_KEYS: [[u8; 16]; 2] = [ [0x63, 0xb8, 0x2b, 0xb4, 0xf4, 0x61, 0x4e, 0x2e, 0x13, 0xf2, 0xfe, 0xfb, 0xba, 0x4c, 0x9b, 0x7e], ]; -#[derive(Debug, PartialEq, BinRead)] -#[br(repr = u32)] -enum WiiPartType { - Data, - Update, - Channel, -} - -#[derive(Debug, PartialEq, BinRead)] -#[br(repr = u32)] +#[derive(Debug, PartialEq, Eq, Copy, Clone)] enum SigType { - Rsa4096 = 0x00010000, - Rsa2048 = 0x00010001, - EllipticalCurve = 0x00010002, + Rsa4096, + Rsa2048, + EllipticalCurve, } -#[derive(Debug, PartialEq, BinRead)] -#[br(repr = u32)] +impl FromReader for SigType { + type Args<'a> = (); + + const STATIC_SIZE: usize = u32::STATIC_SIZE; + + fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result + where R: Read + ?Sized { + match u32::from_reader(reader)? { + 0x00010000 => Ok(SigType::Rsa4096), + 0x00010001 => Ok(SigType::Rsa2048), + 0x00010002 => Ok(SigType::EllipticalCurve), + _ => Err(io::Error::new(io::ErrorKind::InvalidData, "invalid signature type")), + } + } +} + +impl SigType { + fn size(self) -> usize { + match self { + SigType::Rsa4096 => 512, + SigType::Rsa2048 => 256, + SigType::EllipticalCurve => 64, + } + } +} + +#[derive(Debug, PartialEq, Eq, Copy, Clone)] enum KeyType { - Rsa4096 = 0x00000000, - Rsa2048 = 0x00000001, + Rsa4096, + Rsa2048, } -#[derive(Debug, PartialEq, BinRead)] +impl FromReader for KeyType { + type Args<'a> = (); + + const STATIC_SIZE: usize = u32::STATIC_SIZE; + + fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result + where R: Read + ?Sized { + match u32::from_reader(reader)? { + 0x00000000 => Ok(KeyType::Rsa4096), + 0x00000001 => Ok(KeyType::Rsa2048), + _ => Err(io::Error::new(io::ErrorKind::InvalidData, "invalid key type")), + } + } +} + +impl KeyType { + fn size(self) -> usize { + match self { + KeyType::Rsa4096 => 512, + KeyType::Rsa2048 => 256, + } + } +} + +#[derive(Debug, PartialEq)] struct WiiPart { - #[br(map = | x: u32 | (x as u64) << 2)] + // #[br(map = |x: u32| (x as u64) << 2)] part_data_off: u64, - part_type: WiiPartType, - #[br(restore_position, args(part_data_off))] + part_type: PartitionType, + // #[br(restore_position, args(part_data_off))] part_header: WiiPartitionHeader, } -#[derive(Debug, PartialEq, BinRead)] +#[derive(Debug, PartialEq)] struct WiiPartInfo { - #[br(seek_before = SeekFrom::Start(0x40000))] + // #[br(seek_before = SeekFrom::Start(0x40000))] part_count: u32, - #[br(map = | x: u32 | (x as u64) << 2)] + // #[br(map = |x: u32| (x as u64) << 2)] part_info_off: u64, - #[br(seek_before = SeekFrom::Start(part_info_off), count = part_count)] + // #[br(seek_before = SeekFrom::Start(part_info_off), count = part_count)] parts: Vec, } -#[derive(Debug, PartialEq, BinRead)] +#[derive(Debug, PartialEq, Default)] struct TicketTimeLimit { enable_time_limit: u32, time_limit: u32, } -#[derive(Debug, PartialEq, BinRead)] +impl FromReader for TicketTimeLimit { + type Args<'a> = (); + + const STATIC_SIZE: usize = struct_size([ + u32::STATIC_SIZE, // enable_time_limit + u32::STATIC_SIZE, // time_limit + ]); + + fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result + where R: Read + ?Sized { + let enable_time_limit = u32::from_reader(reader)?; + let time_limit = u32::from_reader(reader)?; + Ok(TicketTimeLimit { enable_time_limit, time_limit }) + } +} + +#[derive(Debug, PartialEq)] struct Ticket { sig_type: SigType, - #[br(count = 256)] - sig: Vec, - #[br(pad_before = 60, count = 64)] - sig_issuer: Vec, - #[br(count = 60)] - ecdh: Vec, - #[br(pad_before = 3)] + sig: [u8; 256], + sig_issuer: [u8; 64], + ecdh: [u8; 60], enc_key: [u8; 16], - #[br(pad_before = 1)] ticket_id: [u8; 8], console_id: [u8; 4], title_id: [u8; 8], - #[br(pad_before = 2)] ticket_version: u16, permitted_titles_mask: u32, permit_mask: u32, title_export_allowed: u8, common_key_idx: u8, - #[br(pad_before = 48, count = 64)] - content_access_permissions: Vec, - #[br(pad_before = 2, count = 8)] - time_limits: Vec, + content_access_permissions: [u8; 64], + time_limits: [TicketTimeLimit; 8], } -#[derive(Debug, PartialEq, BinRead)] +impl FromReader for Ticket { + type Args<'a> = (); + + const STATIC_SIZE: usize = 0x2A4; + + fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result + where R: Read + ?Sized { + let sig_type = SigType::from_reader(reader)?; + let sig = <[u8; 256]>::from_reader(reader)?; + skip_bytes::<0x3C, _>(reader)?; + let sig_issuer = <[u8; 64]>::from_reader(reader)?; + let ecdh = <[u8; 60]>::from_reader(reader)?; + skip_bytes::<3, _>(reader)?; + let enc_key = <[u8; 16]>::from_reader(reader)?; + skip_bytes::<1, _>(reader)?; + let ticket_id = <[u8; 8]>::from_reader(reader)?; + let console_id = <[u8; 4]>::from_reader(reader)?; + let title_id = <[u8; 8]>::from_reader(reader)?; + skip_bytes::<2, _>(reader)?; + let ticket_version = u16::from_reader(reader)?; + let permitted_titles_mask = u32::from_reader(reader)?; + let permit_mask = u32::from_reader(reader)?; + let title_export_allowed = u8::from_reader(reader)?; + let common_key_idx = u8::from_reader(reader)?; + skip_bytes::<48, _>(reader)?; + let content_access_permissions = <[u8; 64]>::from_reader(reader)?; + let time_limits = [ + TicketTimeLimit::from_reader(reader)?, + TicketTimeLimit::from_reader(reader)?, + TicketTimeLimit::from_reader(reader)?, + TicketTimeLimit::from_reader(reader)?, + TicketTimeLimit::from_reader(reader)?, + TicketTimeLimit::from_reader(reader)?, + TicketTimeLimit::from_reader(reader)?, + TicketTimeLimit::from_reader(reader)?, + ]; + Ok(Ticket { + sig_type, + sig, + sig_issuer, + ecdh, + enc_key, + ticket_id, + console_id, + title_id, + ticket_version, + permitted_titles_mask, + permit_mask, + title_export_allowed, + common_key_idx, + content_access_permissions, + time_limits, + }) + } +} + +#[derive(Debug, PartialEq)] struct TmdContent { id: u32, index: u16, @@ -113,78 +226,89 @@ struct TmdContent { hash: [u8; 20], } -#[derive(Debug, PartialEq, BinRead)] +#[derive(Debug, PartialEq)] struct Tmd { sig_type: SigType, - #[br(count = 256)] + // #[br(count = 256)] sig: Vec, - #[br(pad_before = 60, count = 64)] + // #[br(pad_before = 60, count = 64)] sig_issuer: Vec, version: u8, ca_crl_version: u8, signer_crl_version: u8, - #[br(pad_before = 1)] + // #[br(pad_before = 1)] ios_id_major: u32, ios_id_minor: u32, title_id_major: u32, - title_id_minor: [char; 4], + title_id_minor: [u8; 4], title_type: u32, group_id: u16, - #[br(pad_before = 62)] + // #[br(pad_before = 62)] access_flags: u32, title_version: u16, num_contents: u16, - #[br(pad_after = 2)] + // #[br(pad_after = 2)] boot_idx: u16, - #[br(count = num_contents)] + // #[br(count = num_contents)] contents: Vec, } -#[derive(Debug, PartialEq, BinRead)] +#[derive(Debug, PartialEq)] struct Certificate { sig_type: SigType, - #[br(count = if sig_type == SigType::Rsa4096 { 512 } else if sig_type == SigType::Rsa2048 { 256 } else if sig_type == SigType::EllipticalCurve { 64 } else { 0 })] + // #[br(count = sig_size(sig_type))] sig: Vec, - #[br(pad_before = 60, count = 64)] + // #[br(pad_before = 60, count = 64)] issuer: Vec, key_type: KeyType, - #[br(count = 64)] + // #[br(count = 64)] subject: Vec, - #[br(count = if key_type == KeyType::Rsa4096 { 512 } else if key_type == KeyType::Rsa2048 { 256 } else { 0 })] + // #[br(count = key_size(key_type))] key: Vec, modulus: u32, - #[br(pad_after = 52)] + // #[br(pad_after = 52)] pub_exp: u32, } -#[derive(Debug, PartialEq, BinRead)] -#[br(import(partition_off: u64))] +#[derive(Debug, PartialEq)] +// #[br(import(partition_off: u64))] struct WiiPartitionHeader { - #[br(seek_before = SeekFrom::Start(partition_off))] + // #[br(seek_before = SeekFrom::Start(partition_off))] ticket: Ticket, tmd_size: u32, - #[br(map = | x: u32 | ((x as u64) << 2) + partition_off)] + // #[br(map = |x: u32| ((x as u64) << 2) + partition_off)] tmd_off: u64, cert_chain_size: u32, - #[br(map = | x: u32 | ((x as u64) << 2) + partition_off)] + // #[br(map = |x: u32| ((x as u64) << 2) + partition_off)] cert_chain_off: u64, - #[br(map = | x: u32 | ((x as u64) << 2) + partition_off)] + // #[br(map = |x: u32| ((x as u64) << 2) + partition_off)] global_hash_table_off: u64, - #[br(map = | x: u32 | ((x as u64) << 2) + partition_off)] + // #[br(map = |x: u32| ((x as u64) << 2) + partition_off)] data_off: u64, - #[br(map = | x: u32 | (x as u64) << 2)] + // #[br(map = |x: u32| (x as u64) << 2)] data_size: u64, - #[br(seek_before = SeekFrom::Start(tmd_off))] + // #[br(seek_before = SeekFrom::Start(tmd_off))] tmd: Tmd, - #[br(seek_before = SeekFrom::Start(cert_chain_off))] + // #[br(seek_before = SeekFrom::Start(cert_chain_off))] ca_cert: Certificate, tmd_cert: Certificate, ticket_cert: Certificate, - #[br(seek_before = SeekFrom::Start(global_hash_table_off), count = 0x18000)] + // #[br(seek_before = SeekFrom::Start(global_hash_table_off), count = 0x18000)] h3_data: Vec, } +impl FromReader for WiiPartitionHeader { + type Args<'a> = u64; + + const STATIC_SIZE: usize = Ticket::STATIC_SIZE; + + fn from_reader_args(reader: &mut R, args: Self::Args<'_>) -> io::Result + where R: Read + ?Sized { + todo!() + } +} + pub(crate) struct DiscWii { header: Header, part_info: WiiPartInfo, @@ -192,9 +316,9 @@ pub(crate) struct DiscWii { impl DiscWii { pub(crate) fn new(mut stream: &mut dyn ReadStream, header: Header) -> Result { - let mut disc = DiscWii { header, part_info: stream.read_be()? }; + let mut disc = DiscWii { header, part_info: todo!() }; // stream.read_be()? disc.decrypt_partition_keys()?; - Result::Ok(disc) + Ok(disc) } } @@ -204,13 +328,10 @@ impl DiscWii { let ticket = &mut part.part_header.ticket; let mut iv: [u8; 16] = [0; 16]; iv[..8].copy_from_slice(&ticket.title_id); - Aes128Cbc::new( - Aes128::new(&COMMON_KEYS[ticket.common_key_idx as usize].into()), - &iv.into(), - ) - .decrypt(&mut ticket.enc_key)?; + Aes128Cbc::new(&COMMON_KEYS[ticket.common_key_idx as usize].into(), &iv.into()) + .decrypt_padded_mut::(&mut ticket.enc_key)?; } - Result::Ok(()) + Ok(()) } } @@ -226,48 +347,74 @@ impl DiscBase for DiscWii { .part_info .parts .iter() - .find(|v| v.part_type == WiiPartType::Data) + .find(|v| v.part_type == PartitionType::Data) .ok_or_else(|| Error::DiscFormat("Failed to locate data partition".to_string()))?; let data_off = part.part_header.data_off; let has_crypto = disc_io.has_wii_crypto(); + let base = disc_io + .begin_read_stream(data_off) + .map_err(|e| Error::Io("Opening data partition stream".to_string(), e))?; + let stream = wrap_windowed(base, data_off, part.part_header.data_size) + .context("Wrapping data partition stream")?; let result = Box::new(WiiPartReadStream { - stream: wrap_windowed( - disc_io.begin_read_stream(data_off)?, - data_off, - part.part_header.data_size, - )?, - crypto: if has_crypto { - Aes128::new(&part.part_header.ticket.enc_key.into()).into() - } else { - Option::None - }, + stream, + crypto: if has_crypto { Some(part.part_header.ticket.enc_key) } else { None }, offset: 0, - cur_block: u64::MAX, + cur_block: u32::MAX, buf: [0; 0x8000], validate_hashes, }); - Result::Ok(result) + Ok(result) + } + + fn get_partition<'a>( + &self, + disc_io: &'a mut dyn DiscIO, + part_type: PartitionType, + validate_hashes: bool, + ) -> Result> { + let part = + self.part_info.parts.iter().find(|v| v.part_type == part_type).ok_or_else(|| { + Error::DiscFormat(format!("Failed to locate {:?} partition", part_type)) + })?; + let data_off = part.part_header.data_off; + let has_crypto = disc_io.has_wii_crypto(); + let base = disc_io + .begin_read_stream(data_off) + .with_context(|| format!("Opening {:?} partition stream", part_type))?; + let stream = wrap_windowed(base, data_off, part.part_header.data_size) + .with_context(|| format!("Wrapping {:?} partition stream", part_type))?; + let result = Box::new(WiiPartReadStream { + stream, + crypto: if has_crypto { Some(part.part_header.ticket.enc_key) } else { None }, + offset: 0, + cur_block: u32::MAX, + buf: [0; 0x8000], + validate_hashes, + }); + Ok(result) } } struct WiiPartReadStream<'a> { stream: OwningWindowedReadStream<'a>, - crypto: Option, + crypto: Option<[u8; 16]>, offset: u64, - cur_block: u64, - buf: [u8; BUFFER_SIZE], + cur_block: u32, + buf: [u8; SECTOR_SIZE], validate_hashes: bool, } impl<'a> PartReadStream for WiiPartReadStream<'a> { fn begin_file_stream(&mut self, node: &Node) -> io::Result { assert_eq!(node.kind, NodeKind::File); - io::Result::Ok(self.new_window((node.offset as u64) << 2, node.length as u64)?) + self.new_window((node.offset as u64) << 2, node.length as u64) } fn read_header(&mut self) -> Result> { - self.seek(SeekFrom::Start(0))?; - Result::Ok(Box::from(self.read_be::()?)) + self.seek(SeekFrom::Start(0)).context("Seeking to partition header")?; + todo!() + // Ok(Box::from(self.read_be::()?)) } fn ideal_buffer_size(&self) -> usize { BLOCK_SIZE } @@ -276,25 +423,24 @@ impl<'a> PartReadStream for WiiPartReadStream<'a> { #[inline(always)] fn as_digest(slice: &[u8; 20]) -> digest::Output { (*slice).into() } -fn decrypt_block(part: &mut WiiPartReadStream, cluster: usize) -> io::Result<()> { +fn decrypt_block(part: &mut WiiPartReadStream, cluster: u32) -> io::Result<()> { part.stream.read_exact(&mut part.buf)?; - if part.crypto.is_some() { + if let Some(key) = &part.crypto { // Fetch IV before decrypting header - let iv = Block::from(*array_ref![part.buf, 0x3d0, 16]); + let iv_bytes = array_ref![part.buf, 0x3d0, 16]; + let iv = Block::from(*iv_bytes); // Don't need to decrypt header if we're not validating hashes if part.validate_hashes { - Aes128Cbc::new(part.crypto.as_ref().unwrap().clone(), &Block::from([0; 16])) - .decrypt(&mut part.buf[..BUFFER_OFFSET]) + Aes128Cbc::new(key.into(), &Block::from([0; 16])) + .decrypt_padded_mut::(&mut part.buf[..HASHES_SIZE]) .expect("Failed to decrypt header"); } - Aes128Cbc::new(part.crypto.as_ref().unwrap().clone(), &iv) - .decrypt(&mut part.buf[BUFFER_OFFSET..]) + Aes128Cbc::new(key.into(), &iv) + .decrypt_padded_mut::(&mut part.buf[HASHES_SIZE..]) .expect("Failed to decrypt block"); } - if part.validate_hashes && part.crypto.is_some() - /* FIXME NFS validation? */ - { - let (mut group, sub_group) = div_rem(cluster, 8); + if part.validate_hashes { + let (mut group, sub_group) = div_rem(cluster as usize, 8); group %= 8; // H0 hashes for i in 0..31 { @@ -303,12 +449,7 @@ fn decrypt_block(part: &mut WiiPartReadStream, cluster: usize) -> io::Result<()> let expected = as_digest(array_ref![part.buf, i * 20, 20]); let output = hash.finalize(); if output != expected { - panic!( - "Invalid hash! (block {:?}) {:?}\n\texpected {:?}", - i, - output.as_slice(), - expected - ); + panic!("Invalid H0 hash! (block {:?}) {:x}\n\texpected {:x}", i, output, expected); } } // H1 hash @@ -319,10 +460,8 @@ fn decrypt_block(part: &mut WiiPartReadStream, cluster: usize) -> io::Result<()> let output = hash.finalize(); if output != expected { panic!( - "Invalid hash! (subgroup {:?}) {:?}\n\texpected {:?}", - sub_group, - output.as_slice(), - expected + "Invalid H1 hash! (subgroup {:?}) {:x}\n\texpected {:x}", + sub_group, output, expected ); } } @@ -334,27 +473,28 @@ fn decrypt_block(part: &mut WiiPartReadStream, cluster: usize) -> io::Result<()> let output = hash.finalize(); if output != expected { panic!( - "Invalid hash! (group {:?}) {:?}\n\texpected {:?}", - group, - output.as_slice(), - expected + "Invalid H2 hash! (group {:?}) {:x}\n\texpected {:x}", + group, output, expected ); } } } - io::Result::Ok(()) + Ok(()) } impl<'a> Read for WiiPartReadStream<'a> { fn read(&mut self, buf: &mut [u8]) -> io::Result { - let (mut block, mut block_offset) = div_rem(self.offset as usize, BLOCK_SIZE); + let (block, block_offset) = div_rem(self.offset, BLOCK_SIZE as u64); + let mut block = block as u32; + let mut block_offset = block_offset as usize; + let mut rem = buf.len(); let mut read: usize = 0; while rem > 0 { - if block != self.cur_block as usize { + if block != self.cur_block { decrypt_block(self, block)?; - self.cur_block = block as u64; + self.cur_block = block; } let mut cache_size = rem; @@ -363,7 +503,7 @@ impl<'a> Read for WiiPartReadStream<'a> { } buf[read..read + cache_size].copy_from_slice( - &self.buf[BUFFER_OFFSET + block_offset..BUFFER_OFFSET + block_offset + cache_size], + &self.buf[HASHES_SIZE + block_offset..HASHES_SIZE + block_offset + cache_size], ); read += cache_size; rem -= cache_size; @@ -372,13 +512,13 @@ impl<'a> Read for WiiPartReadStream<'a> { } self.offset += buf.len() as u64; - io::Result::Ok(buf.len()) + Ok(buf.len()) } } #[inline(always)] fn to_block_size(v: u64) -> u64 { - (v / BUFFER_SIZE as u64) * BLOCK_SIZE as u64 + (v % BUFFER_SIZE as u64) + (v / SECTOR_SIZE as u64) * BLOCK_SIZE as u64 + (v % SECTOR_SIZE as u64) } impl<'a> Seek for WiiPartReadStream<'a> { @@ -388,31 +528,33 @@ impl<'a> Seek for WiiPartReadStream<'a> { SeekFrom::End(v) => (self.stable_stream_len()? as i64 + v) as u64, SeekFrom::Current(v) => (self.offset as i64 + v) as u64, }; - let block = self.offset / BLOCK_SIZE as u64; + let block = (self.offset / BLOCK_SIZE as u64) as u32; if block != self.cur_block { - self.stream.seek(SeekFrom::Start(block * BUFFER_SIZE as u64))?; - self.cur_block = u64::MAX; + self.stream.seek(SeekFrom::Start(block as u64 * SECTOR_SIZE as u64))?; + self.cur_block = u32::MAX; } - io::Result::Ok(self.offset) + Ok(self.offset) } - fn stream_position(&mut self) -> io::Result { io::Result::Ok(self.offset) } + fn stream_position(&mut self) -> io::Result { Ok(self.offset) } } impl<'a> ReadStream for WiiPartReadStream<'a> { fn stable_stream_len(&mut self) -> io::Result { - io::Result::Ok(to_block_size(self.stream.stable_stream_len()?)) + Ok(to_block_size(self.stream.stable_stream_len()?)) } fn as_dyn(&mut self) -> &mut dyn ReadStream { self } } -#[derive(Clone, Debug, PartialEq, BinRead)] +#[derive(Clone, Debug, PartialEq)] pub(crate) struct WiiPartition { header: Header, - bi2_header: BI2Header, - #[br(seek_before = SeekFrom::Start((header.fst_off as u64) << 2))] - #[br(parse_with = node_parser)] + // #[br(seek_before = SeekFrom::Start(0x400))] + part_header: PartitionHeader, + // bi2_header: BI2Header, + // #[br(seek_before = SeekFrom::Start((part_header.fst_off as u64) << 2))] + // #[br(parse_with = node_parser)] root_node: NodeType, } @@ -420,4 +562,22 @@ impl PartHeader for WiiPartition { fn root_node(&self) -> &NodeType { &self.root_node } fn find_node(&self, path: &str) -> Option<&NodeType> { find_node(&self.root_node, path) } + + fn boot_bytes(&self) -> &[u8] { todo!() } + + fn bi2_bytes(&self) -> &[u8] { todo!() } + + fn apploader_bytes(&self) -> &[u8] { todo!() } + + fn fst_bytes(&self) -> &[u8] { todo!() } + + fn dol_bytes(&self) -> &[u8] { todo!() } + + fn disc_header(&self) -> &Header { todo!() } + + fn partition_header(&self) -> &PartitionHeader { todo!() } + + fn apploader_header(&self) -> &AppLoaderHeader { todo!() } + + fn dol_header(&self) -> &DolHeader { todo!() } } diff --git a/src/fst.rs b/src/fst.rs index c5ce0f3..b4c9654 100644 --- a/src/fst.rs +++ b/src/fst.rs @@ -1,10 +1,18 @@ //! Disc file system types -use std::io::{Read, Seek, SeekFrom}; +use std::{ + ffi::CString, + io, + io::{Read, Seek, SeekFrom}, +}; -use binrw::{binread, BinReaderExt, BinResult, NullString, ReadOptions}; use encoding_rs::SHIFT_JIS; +use crate::{ + util::reader::{struct_size, FromReader, DYNAMIC_SIZE, U24}, + Result, ResultContext, +}; + /// File system node kind. #[derive(Clone, Debug, PartialEq)] pub enum NodeKind { @@ -14,17 +22,30 @@ pub enum NodeKind { Directory, } +impl FromReader for NodeKind { + type Args<'a> = (); + + const STATIC_SIZE: usize = 1; + + fn from_reader_args(_reader: &mut R, _args: Self::Args<'_>) -> io::Result + where R: Read + ?Sized { + match u8::from_reader(_reader)? { + 0 => Ok(NodeKind::File), + 1 => Ok(NodeKind::Directory), + _ => Err(io::Error::new(io::ErrorKind::InvalidData, "invalid node kind")), + } + } +} + /// An individual file system node. -#[binread] #[derive(Clone, Debug, PartialEq)] pub struct Node { - #[br(temp)] - type_and_name_offset: u32, - /// File system node type. - #[br(calc = if (type_and_name_offset >> 24) != 0 { NodeKind::Directory } else { NodeKind::File })] pub kind: NodeKind, + /// Offset in the string table to the filename. + pub name_offset: u32, + /// For files, this is the partition offset of the file data. (Wii: >> 2) /// /// For directories, this is the children start offset in the FST. @@ -37,11 +58,28 @@ pub struct Node { /// Number of child files and directories recursively is `length - offset`. pub length: u32, - #[br(calc = type_and_name_offset & 0xffffff)] - name_offset: u32, - #[br(ignore)] /// The node name. - pub name: Box, + pub name: String, +} + +impl FromReader for Node { + type Args<'a> = (); + + const STATIC_SIZE: usize = struct_size([ + NodeKind::STATIC_SIZE, // type + U24::STATIC_SIZE, // name_offset + u32::STATIC_SIZE, // offset + u32::STATIC_SIZE, // length + ]); + + fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result + where R: Read + ?Sized { + let kind = NodeKind::from_reader(reader)?; + let name_offset = U24::from_reader(reader)?.0; + let offset = u32::from_reader(reader)?; + let length = u32::from_reader(reader)?; + Ok(Node { kind, offset, length, name_offset, name: Default::default() }) + } } /// Contains a file system node, and if a directory, its children. @@ -53,74 +91,77 @@ pub enum NodeType { Directory(Node, Vec), } -fn read_node(reader: &mut R, ro: &ReadOptions, i: &mut u32) -> BinResult { - let node = reader.read_type::(ro.endian())?; - *i += 1; - BinResult::Ok(if node.kind == NodeKind::Directory { - let mut children: Vec = Vec::new(); - children.reserve((node.length - *i) as usize); - while *i < node.length { - children.push(read_node(reader, ro, i)?); - } - NodeType::Directory(node, children) - } else { - NodeType::File(node) - }) +impl FromReader for NodeType { + type Args<'a> = &'a mut u32; + + const STATIC_SIZE: usize = DYNAMIC_SIZE; + + fn from_reader_args(reader: &mut R, idx: &mut u32) -> io::Result + where R: Read + ?Sized { + let node = Node::from_reader(reader)?; + *idx += 1; + Ok(if node.kind == NodeKind::Directory { + let mut children = Vec::with_capacity((node.length - *idx) as usize); + while *idx < node.length { + children.push(NodeType::from_reader_args(reader, idx)?); + } + NodeType::Directory(node, children) + } else { + NodeType::File(node) + }) + } } -fn read_node_name( +fn read_node_name( reader: &mut R, - ro: &ReadOptions, - base: u64, + string_base: u64, node: &mut NodeType, root: bool, -) -> BinResult<()> { - let mut decode_name = |v: &mut Node| -> BinResult<()> { +) -> io::Result<()> +where + R: Read + Seek + ?Sized, +{ + let mut decode_name = |v: &mut Node| -> io::Result<()> { if !root { - let offset = base + v.name_offset as u64; + let offset = string_base + v.name_offset as u64; reader.seek(SeekFrom::Start(offset))?; - let null_string = reader.read_type::(ro.endian())?; - let (res, _, errors) = SHIFT_JIS.decode(&*null_string.0); + + let c_string = CString::from_reader(reader)?; + let (decoded, _, errors) = SHIFT_JIS.decode(c_string.as_bytes()); if errors { - return BinResult::Err(binrw::Error::Custom { - pos: offset, - err: Box::new("Failed to decode node name"), - }); + return Err(io::Error::new(io::ErrorKind::InvalidData, "invalid shift-jis")); } - v.name = res.into(); + v.name = decoded.into_owned(); } - BinResult::Ok(()) + Ok(()) }; match node { - NodeType::File(v) => { - decode_name(v)?; + NodeType::File(inner) => { + decode_name(inner)?; } - NodeType::Directory(v, c) => { - decode_name(v)?; - for x in c { - read_node_name(reader, ro, base, x, false)?; + NodeType::Directory(inner, children) => { + decode_name(inner)?; + for child in children { + read_node_name(reader, string_base, child, false)?; } } } - BinResult::Ok(()) + Ok(()) } -pub(crate) fn node_parser( - reader: &mut R, - ro: &ReadOptions, - _: (), -) -> BinResult { - let mut node = read_node(reader, ro, &mut 0)?; - let base = reader.stream_position()?; - read_node_name(reader, ro, base, &mut node, true)?; - BinResult::Ok(node) +pub(crate) fn read_fst(reader: &mut R) -> Result +where R: Read + Seek + ?Sized { + let mut node = NodeType::from_reader_args(reader, &mut 0).context("Parsing FST nodes")?; + let string_base = reader.stream_position().context("Reading FST end position")?; + read_node_name(reader, string_base, &mut node, true).context("Reading FST node names")?; + Ok(node) } fn matches_name(node: &NodeType, name: &str) -> bool { match node { - NodeType::File(v) => v.name.as_ref().eq_ignore_ascii_case(name), + NodeType::File(v) => v.name.as_str().eq_ignore_ascii_case(name), NodeType::Directory(v, _) => { - v.name.is_empty() /* root */ || v.name.as_ref().eq_ignore_ascii_case(name) + v.name.is_empty() /* root */ || v.name.as_str().eq_ignore_ascii_case(name) } } } @@ -132,7 +173,7 @@ pub(crate) fn find_node<'a>(mut node: &'a NodeType, path: &str) -> Option<&'a No if matches_name(node, current.unwrap()) { match node { NodeType::File(_) => { - return if split.next().is_none() { Option::Some(node) } else { Option::None }; + return if split.next().is_none() { Some(node) } else { None }; } NodeType::Directory(v, c) => { // Find child @@ -140,11 +181,7 @@ pub(crate) fn find_node<'a>(mut node: &'a NodeType, path: &str) -> Option<&'a No current = split.next(); } if current.is_none() || current.unwrap().is_empty() { - return if split.next().is_none() { - Option::Some(node) - } else { - Option::None - }; + return if split.next().is_none() { Some(node) } else { None }; } for x in c { if matches_name(x, current.unwrap()) { @@ -158,5 +195,5 @@ pub(crate) fn find_node<'a>(mut node: &'a NodeType, path: &str) -> Option<&'a No break; } } - Option::None + None } diff --git a/src/io/iso.rs b/src/io/iso.rs index 742d480..116fbf8 100644 --- a/src/io/iso.rs +++ b/src/io/iso.rs @@ -13,7 +13,7 @@ pub(crate) struct DiscIOISO { impl DiscIOISO { pub(crate) fn new(filename: &Path) -> Result { - Result::Ok(DiscIOISO { filename: filename.to_owned() }) + Ok(DiscIOISO { filename: filename.to_owned() }) } } @@ -21,21 +21,25 @@ impl DiscIO for DiscIOISO { fn begin_read_stream(&mut self, offset: u64) -> io::Result> { let mut file = File::open(&*self.filename)?; file.seek(SeekFrom::Start(offset))?; - io::Result::Ok(Box::from(file)) + Ok(Box::from(file)) } } -pub(crate) struct DiscIOISOStream { +pub(crate) struct DiscIOISOStream +where T: ReadStream + Sized +{ pub(crate) stream: T, } -impl DiscIOISOStream { - pub(crate) fn new(stream: T) -> Result> { - Result::Ok(DiscIOISOStream { stream }) - } +impl DiscIOISOStream +where T: ReadStream + Sized +{ + pub(crate) fn new(stream: T) -> Result> { Ok(DiscIOISOStream { stream }) } } -impl DiscIO for DiscIOISOStream { +impl DiscIO for DiscIOISOStream +where T: ReadStream + Sized + Send + Sync +{ fn begin_read_stream<'a>(&'a mut self, offset: u64) -> io::Result> { let size = self.stream.stable_stream_len()?; let mut stream = self.stream.new_window(0, size)?; diff --git a/src/io/mod.rs b/src/io/mod.rs index 9cd5617..0dc71c6 100644 --- a/src/io/mod.rs +++ b/src/io/mod.rs @@ -6,6 +6,7 @@ use crate::{ io::{ iso::{DiscIOISO, DiscIOISOStream}, nfs::DiscIONFS, + wia::DiscIOWIA, }, streams::{ByteReadStream, ReadStream}, Error, Result, @@ -13,6 +14,13 @@ use crate::{ pub(crate) mod iso; pub(crate) mod nfs; +pub(crate) mod wia; + +#[derive(Default, Debug, Clone)] +pub struct DiscIOOptions { + /// Rebuild hashes for the disc image. + pub rebuild_hashes: bool, +} /// Abstraction over supported disc file types. pub trait DiscIO: Send + Sync { @@ -30,43 +38,40 @@ pub trait DiscIO: Send + Sync { /// /// Basic usage: /// ```no_run -/// use nod::io::new_disc_io; +/// use nod::io::{new_disc_io, DiscIOOptions}; /// -/// let mut disc_io = new_disc_io("path/to/file".as_ref())?; -/// # Ok::<(), nod::Error>(()) +/// # fn main() -> nod::Result<()> { +/// let options = DiscIOOptions::default(); +/// let mut disc_io = new_disc_io("path/to/file.iso".as_ref(), &options)?; +/// # Ok(()) +/// # } /// ``` -pub fn new_disc_io(filename: &Path) -> Result> { +pub fn new_disc_io(filename: &Path, options: &DiscIOOptions) -> Result> { let path_result = fs::canonicalize(filename); if let Err(err) = path_result { - return Result::Err(Error::Io( - format!("Failed to open {}", filename.to_string_lossy()), - err, - )); + return Err(Error::Io(format!("Failed to open {}", filename.display()), err)); } let path = path_result.as_ref().unwrap(); let meta = fs::metadata(path); if let Err(err) = meta { - return Result::Err(Error::Io( - format!("Failed to open {}", filename.to_string_lossy()), - err, - )); + return Err(Error::Io(format!("Failed to open {}", filename.display()), err)); } if !meta.unwrap().is_file() { - return Result::Err(Error::DiscFormat(format!( - "Input is not a file: {}", - filename.to_string_lossy() - ))); + return Err(Error::DiscFormat(format!("Input is not a file: {}", filename.display()))); } if has_extension(path, "iso") { - Result::Ok(Box::from(DiscIOISO::new(path)?)) + Ok(Box::from(DiscIOISO::new(path)?)) } else if has_extension(path, "nfs") { - if matches!(path.parent(), Some(parent) if parent.is_dir()) { - Result::Ok(Box::from(DiscIONFS::new(path.parent().unwrap())?)) - } else { - Result::Err(Error::DiscFormat("Failed to locate NFS parent directory".to_string())) + match path.parent() { + Some(parent) if parent.is_dir() => { + Ok(Box::from(DiscIONFS::new(path.parent().unwrap())?)) + } + _ => Err(Error::DiscFormat("Failed to locate NFS parent directory".to_string())), } + } else if has_extension(path, "wia") || has_extension(path, "rvz") { + Ok(Box::from(DiscIOWIA::new(path, options)?)) } else { - Result::Err(Error::DiscFormat("Unknown file type".to_string())) + Err(Error::DiscFormat("Unknown file type".to_string())) } } @@ -78,9 +83,11 @@ pub fn new_disc_io(filename: &Path) -> Result> { /// ```no_run /// use nod::io::new_disc_io_from_buf; /// -/// # #[allow(non_upper_case_globals)] const buf: [u8; 0] = []; -/// let mut disc_io = new_disc_io_from_buf(&buf)?; -/// # Ok::<(), nod::Error>(()) +/// # fn main() -> nod::Result<()> { +/// # #[allow(non_upper_case_globals)] const buf: &[u8] = &[0u8; 0]; +/// let mut disc_io = new_disc_io_from_buf(buf)?; +/// # Ok(()) +/// # } /// ``` pub fn new_disc_io_from_buf(buf: &[u8]) -> Result> { new_disc_io_from_stream(ByteReadStream { bytes: buf, position: 0 }) @@ -92,11 +99,15 @@ pub fn new_disc_io_from_buf(buf: &[u8]) -> Result> { /// /// Basic usage: /// ```no_run -/// use nod::io::new_disc_io_from_buf; +/// use nod::io::new_disc_io_from_stream; +/// use nod::streams::ByteReadStream; /// -/// # #[allow(non_upper_case_globals)] const buf: [u8; 0] = []; -/// let mut disc_io = new_disc_io_from_buf(&buf)?; -/// # Ok::<(), nod::Error>(()) +/// # fn main() -> nod::Result<()> { +/// # #[allow(non_upper_case_globals)] const buf: &[u8] = &[0u8; 0]; +/// let stream = ByteReadStream { bytes: buf, position: 0 }; +/// let mut disc_io = new_disc_io_from_stream(stream)?; +/// # Ok(()) +/// # } /// ``` pub fn new_disc_io_from_stream<'a, T: 'a + ReadStream + Sized + Send + Sync>( stream: T, @@ -107,11 +118,8 @@ pub fn new_disc_io_from_stream<'a, T: 'a + ReadStream + Sized + Send + Sync>( /// Helper function for checking a file extension. #[inline(always)] pub fn has_extension(filename: &Path, extension: &str) -> bool { - if let Some(ext) = filename.extension() { - // TODO use with Rust 1.53+ - // ext.eq_ignore_ascii_case(extension) - ext.to_str().unwrap_or("").eq_ignore_ascii_case(extension) - } else { - false + match filename.extension() { + Some(ext) => ext.eq_ignore_ascii_case(extension), + None => false, } } diff --git a/src/io/nfs.rs b/src/io/nfs.rs index 3095f07..7700f53 100644 --- a/src/io/nfs.rs +++ b/src/io/nfs.rs @@ -1,36 +1,87 @@ use std::{ fs::File, io, - io::{Read, Seek, SeekFrom}, + io::{BufReader, Read, Seek, SeekFrom}, path::{Component, Path, PathBuf}, }; -use aes::{Aes128, NewBlockCipher}; -use binrw::{binread, BinRead, BinReaderExt}; -use block_modes::{block_padding::NoPadding, BlockMode, Cbc}; +use aes::{ + cipher::{block_padding::NoPadding, BlockDecryptMut, KeyIvInit}, + Aes128, +}; -use crate::{disc::BUFFER_SIZE, io::DiscIO, streams::ReadStream, Error, Result}; +use crate::{ + disc::SECTOR_SIZE, + io::DiscIO, + streams::ReadStream, + util::reader::{read_vec, struct_size, FromReader}, + Error, Result, ResultContext, +}; -type Aes128Cbc = Cbc; +type Aes128Cbc = cbc::Decryptor; -#[derive(Clone, Debug, PartialEq, BinRead)] +#[derive(Clone, Debug, PartialEq)] pub(crate) struct LBARange { pub(crate) start_block: u32, pub(crate) num_blocks: u32, } -#[binread] +impl FromReader for LBARange { + type Args<'a> = (); + + const STATIC_SIZE: usize = struct_size([ + u32::STATIC_SIZE, // start_block + u32::STATIC_SIZE, // num_blocks + ]); + + fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result + where R: Read + ?Sized { + Ok(LBARange { + start_block: u32::from_reader(reader)?, + num_blocks: u32::from_reader(reader)?, + }) + } +} + +type MagicBytes = [u8; 4]; + #[derive(Clone, Debug, PartialEq)] -#[br(magic = b"EGGS", assert(end_magic == * b"SGGE"))] pub(crate) struct NFSHeader { pub(crate) version: u32, pub(crate) unk1: u32, pub(crate) unk2: u32, - pub(crate) lba_range_count: u32, - #[br(count = 61)] pub(crate) lba_ranges: Vec, - #[br(temp)] - pub(crate) end_magic: [u8; 4], +} + +impl FromReader for NFSHeader { + type Args<'a> = (); + + const STATIC_SIZE: usize = struct_size([ + MagicBytes::STATIC_SIZE, // magic + u32::STATIC_SIZE, // version + u32::STATIC_SIZE, // unk1 + u32::STATIC_SIZE, // unk2 + u32::STATIC_SIZE, // lba_range_count + LBARange::STATIC_SIZE * 61, // lba_ranges + MagicBytes::STATIC_SIZE, // end_magic + ]); + + fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result + where R: Read + ?Sized { + if MagicBytes::from_reader(reader)? != *b"EGGS" { + return Err(io::Error::new(io::ErrorKind::InvalidData, "Invalid NFS magic")); + } + let version = u32::from_reader(reader)?; + let unk1 = u32::from_reader(reader)?; + let unk2 = u32::from_reader(reader)?; + let lba_range_count = u32::from_reader(reader)?; + let mut lba_ranges = read_vec(reader, 61)?; + lba_ranges.truncate(lba_range_count as usize); + if MagicBytes::from_reader(reader)? != *b"SGGE" { + return Err(io::Error::new(io::ErrorKind::InvalidData, "Invalid NFS end magic")); + } + Ok(NFSHeader { version, unk1, unk2, lba_ranges }) + } } #[derive(Clone, Copy, Debug, PartialEq)] @@ -49,11 +100,8 @@ impl Default for Fbo { impl NFSHeader { pub(crate) fn calculate_num_files(&self) -> u32 { - let total_block_count = self - .lba_ranges - .iter() - .take(self.lba_range_count as usize) - .fold(0u32, |acc, range| acc + range.num_blocks); + let total_block_count = + self.lba_ranges.iter().fold(0u32, |acc, range| acc + range.num_blocks); (((total_block_count as u64) * 0x8000u64 + (0x200u64 + 0xF9FFFFFu64)) / 0xFA00000u64) as u32 } @@ -62,7 +110,7 @@ impl NFSHeader { let block_off = (offset % 0x8000) as u32; let mut block = u32::MAX; let mut physical_block = 0u32; - for range in self.lba_ranges.iter().take(self.lba_range_count as usize) { + for range in self.lba_ranges.iter() { if block_div >= range.start_block && block_div - range.start_block < range.num_blocks { block = physical_block + (block_div - range.start_block); break; @@ -85,17 +133,16 @@ pub(crate) struct DiscIONFS { impl DiscIONFS { pub(crate) fn new(directory: &Path) -> Result { - let mut disc_io = - DiscIONFS { directory: directory.to_owned(), key: [0; 16], header: Option::None }; + let mut disc_io = DiscIONFS { directory: directory.to_owned(), key: [0; 16], header: None }; disc_io.validate_files()?; - Result::Ok(disc_io) + Ok(disc_io) } } pub(crate) struct NFSReadStream<'a> { disc_io: &'a DiscIONFS, file: Option, - crypto: Aes128, + crypto: [u8; 16], // Physical address - all UINT32_MAX indicates logical zero block phys_addr: Fbo, // Logical address @@ -104,18 +151,21 @@ pub(crate) struct NFSReadStream<'a> { // Block is typically one ahead of the presently decrypted block. cur_file: u32, cur_block: u32, - buf: [u8; BUFFER_SIZE], + buf: [u8; SECTOR_SIZE], } impl<'a> NFSReadStream<'a> { fn set_cur_file(&mut self, cur_file: u32) -> Result<()> { if cur_file >= self.disc_io.header.as_ref().unwrap().calculate_num_files() { - return Result::Err(Error::DiscFormat("Out of bounds NFS file access".to_string())); + return Err(Error::DiscFormat(format!("Out of bounds NFS file access: {}", cur_file))); } self.cur_file = cur_file; self.cur_block = u32::MAX; - self.file = Option::from(File::open(self.disc_io.get_nfs(cur_file)?)?); - Result::Ok(()) + let path = self.disc_io.get_nfs(cur_file)?; + self.file = Option::from( + File::open(&path).with_context(|| format!("Opening file {}", path.display()))?, + ); + Ok(()) } fn set_cur_block(&mut self, cur_block: u32) -> io::Result<()> { @@ -123,22 +173,22 @@ impl<'a> NFSReadStream<'a> { self.file .as_ref() .unwrap() - .seek(SeekFrom::Start(self.cur_block as u64 * BUFFER_SIZE as u64 + 0x200u64))?; - io::Result::Ok(()) + .seek(SeekFrom::Start(self.cur_block as u64 * SECTOR_SIZE as u64 + 0x200u64))?; + Ok(()) } fn set_phys_addr(&mut self, phys_addr: Fbo) -> Result<()> { // If we're just changing the offset, nothing else needs to be done if self.phys_addr.file == phys_addr.file && self.phys_addr.block == phys_addr.block { self.phys_addr.offset = phys_addr.offset; - return Result::Ok(()); + return Ok(()); } self.phys_addr = phys_addr; // Set logical zero block if phys_addr.file == u32::MAX { self.buf.fill(0u8); - return Result::Ok(()); + return Ok(()); } // Make necessary file and block current with system @@ -146,17 +196,30 @@ impl<'a> NFSReadStream<'a> { self.set_cur_file(phys_addr.file)?; } if phys_addr.block != self.cur_block { - self.set_cur_block(phys_addr.block)?; + self.set_cur_block(phys_addr.block) + .with_context(|| format!("Seeking to NFS block {}", phys_addr.block))?; } // Read block, handling 0x200 overlap case if phys_addr.block == 7999 { - self.file.as_ref().unwrap().read_exact(&mut self.buf[..BUFFER_SIZE - 0x200])?; + self.file + .as_ref() + .unwrap() + .read_exact(&mut self.buf[..SECTOR_SIZE - 0x200]) + .context("Reading NFS block 7999 part 1")?; self.set_cur_file(self.cur_file + 1)?; - self.file.as_ref().unwrap().read_exact(&mut self.buf[BUFFER_SIZE - 0x200..])?; + self.file + .as_ref() + .unwrap() + .read_exact(&mut self.buf[SECTOR_SIZE - 0x200..]) + .context("Reading NFS block 7999 part 2")?; self.cur_block = 0; } else { - self.file.as_ref().unwrap().read_exact(&mut self.buf)?; + self.file + .as_ref() + .unwrap() + .read_exact(&mut self.buf) + .with_context(|| format!("Reading NFS block {}", phys_addr.block))?; self.cur_block += 1; } @@ -169,9 +232,10 @@ impl<'a> NFSReadStream<'a> { ((phys_addr.l_block >> 16) & 0xFF) as u8, ((phys_addr.l_block >> 24) & 0xFF) as u8, ]; - Aes128Cbc::new(self.crypto.clone(), &iv.into()).decrypt(&mut self.buf)?; + Aes128Cbc::new(self.crypto.as_ref().into(), &iv.into()) + .decrypt_padded_mut::(&mut self.buf)?; - Result::Ok(()) + Ok(()) } fn set_logical_addr(&mut self, addr: u64) -> Result<()> { @@ -187,20 +251,20 @@ impl<'a> Read for NFSReadStream<'a> { let mut read_size = rem; let block_offset: usize = if self.phys_addr.offset == u32::MAX { 0 } else { self.phys_addr.offset as usize }; - if read_size + block_offset > BUFFER_SIZE { - read_size = BUFFER_SIZE - block_offset + if read_size + block_offset > SECTOR_SIZE { + read_size = SECTOR_SIZE - block_offset } buf[read..read + read_size] .copy_from_slice(&self.buf[block_offset..block_offset + read_size]); read += read_size; rem -= read_size; self.offset += read_size as u64; - self.set_logical_addr(self.offset).map_err(|v| match v { - Error::Io(_, v) => v, + self.set_logical_addr(self.offset).map_err(|e| match e { + Error::Io(s, e) => io::Error::new(e.kind(), s), _ => io::Error::from(io::ErrorKind::Other), })?; } - io::Result::Ok(read) + Ok(read) } } @@ -215,10 +279,10 @@ impl<'a> Seek for NFSReadStream<'a> { Error::Io(_, v) => v, _ => io::Error::from(io::ErrorKind::Other), })?; - io::Result::Ok(self.offset) + Ok(self.offset) } - fn stream_position(&mut self) -> io::Result { io::Result::Ok(self.offset) } + fn stream_position(&mut self) -> io::Result { Ok(self.offset) } } impl<'a> ReadStream for NFSReadStream<'a> { @@ -229,15 +293,15 @@ impl<'a> ReadStream for NFSReadStream<'a> { impl DiscIO for DiscIONFS { fn begin_read_stream(&mut self, offset: u64) -> io::Result> { - io::Result::Ok(Box::from(NFSReadStream { + Ok(Box::from(NFSReadStream { disc_io: self, - file: Option::None, - crypto: Aes128::new(&self.key.into()), + file: None, + crypto: self.key, phys_addr: Fbo::default(), offset, cur_file: u32::MAX, cur_block: u32::MAX, - buf: [0; BUFFER_SIZE], + buf: [0; SECTOR_SIZE], })) } @@ -245,7 +309,8 @@ impl DiscIO for DiscIONFS { } impl DiscIONFS { - fn get_path>(&self, path: P) -> PathBuf { + fn get_path

(&self, path: P) -> PathBuf + where P: AsRef { let mut buf = self.directory.clone(); for component in path.as_ref().components() { match component { @@ -261,9 +326,9 @@ impl DiscIONFS { fn get_nfs(&self, num: u32) -> Result { let path = self.get_path(format!("hif_{:06}.nfs", num)); if path.exists() { - Result::Ok(path) + Ok(path) } else { - Result::Err(Error::DiscFormat(format!("Failed to locate {}", path.to_string_lossy()))) + Err(Error::DiscFormat(format!("Failed to locate {}", path.display()))) } } @@ -278,31 +343,32 @@ impl DiscIONFS { key_path = secondary_key_path.canonicalize(); } if key_path.is_err() { - return Result::Err(Error::DiscFormat(format!( + return Err(Error::DiscFormat(format!( "Failed to locate {} or {}", - primary_key_path.to_string_lossy(), - secondary_key_path.to_string_lossy() + primary_key_path.display(), + secondary_key_path.display() ))); } let resolved_path = key_path.unwrap(); File::open(resolved_path.as_path()) - .map_err(|v| { - Error::Io(format!("Failed to open {}", resolved_path.to_string_lossy()), v) - })? + .map_err(|v| Error::Io(format!("Failed to open {}", resolved_path.display()), v))? .read(&mut self.key) - .map_err(|v| { - Error::Io(format!("Failed to read {}", resolved_path.to_string_lossy()), v) - })?; + .map_err(|v| Error::Io(format!("Failed to read {}", resolved_path.display()), v))?; } { // Load header from first file - let header: NFSHeader = File::open(self.get_nfs(0)?)?.read_be()?; + let path = self.get_nfs(0)?; + let mut file = BufReader::new( + File::open(&path).with_context(|| format!("Opening file {}", path.display()))?, + ); + let header = NFSHeader::from_reader(&mut file) + .with_context(|| format!("Reading NFS header from file {}", path.display()))?; // Ensure remaining files exist for i in 1..header.calculate_num_files() { self.get_nfs(i)?; } - self.header = Option::from(header) + self.header = Option::from(header); } - Result::Ok(()) + Ok(()) } } diff --git a/src/io/wia.rs b/src/io/wia.rs new file mode 100644 index 0000000..9ee36df --- /dev/null +++ b/src/io/wia.rs @@ -0,0 +1,1541 @@ +use std::{ + cmp::min, + fs::File, + io, + io::{BufReader, Read, Seek, SeekFrom, Write}, + path::{Path, PathBuf}, +}; + +use aes::{ + cipher::{block_padding::NoPadding, BlockEncryptMut, KeyIvInit}, + Aes128, Block, +}; +use sha1::{Digest, Sha1}; + +use crate::{ + array_ref, array_ref_mut, + disc::{ + wii::{BLOCK_SIZE, HASHES_SIZE}, + SECTOR_SIZE, + }, + io::{DiscIO, DiscIOOptions}, + streams::ReadStream, + util::{ + lfg::LaggedFibonacci, + reader::{ + read_bytes, read_vec, struct_size, write_vec, FromReader, ToWriter, DYNAMIC_SIZE, + }, + take_seek::TakeSeekExt, + }, + Error, Result, ResultContext, +}; + +/// SHA-1 hash bytes +type HashBytes = [u8; 20]; + +/// AES key bytes +type KeyBytes = [u8; 16]; + +/// Magic bytes +type MagicBytes = [u8; 4]; + +/// AES-128-CBC encryptor +type Aes128Cbc = cbc::Encryptor; + +#[derive(Clone, Debug, Eq, PartialEq)] +pub(crate) enum WIARVZMagic { + Wia, + Rvz, +} + +impl FromReader for WIARVZMagic { + type Args<'a> = (); + + const STATIC_SIZE: usize = MagicBytes::STATIC_SIZE; + + fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result + where R: Read + ?Sized { + match &MagicBytes::from_reader(reader)? { + b"WIA\x01" => Ok(Self::Wia), + b"RVZ\x01" => Ok(Self::Rvz), + _ => Err(io::Error::new(io::ErrorKind::InvalidData, "Invalid WIA/RVZ magic")), + } + } +} + +impl ToWriter for WIARVZMagic { + fn to_writer(&self, writer: &mut W) -> io::Result<()> + where W: Write + ?Sized { + match self { + Self::Wia => b"WIA\x01".to_writer(writer), + Self::Rvz => b"RVZ\x01".to_writer(writer), + } + } + + fn write_size(&self) -> usize { Self::STATIC_SIZE } +} + +/// This struct is stored at offset 0x0 and is 0x48 bytes long. The wit source code says its format +/// will never be changed. +#[derive(Clone, Debug)] +pub(crate) struct WIAFileHeader { + pub(crate) magic: WIARVZMagic, + /// The WIA format version. + /// + /// A short note from the wit source code about how version numbers are encoded: + /// + /// ```c + /// //----------------------------------------------------- + /// // Format of version number: AABBCCDD = A.BB | A.BB.CC + /// // If D != 0x00 && D != 0xff => append: 'beta' D + /// //----------------------------------------------------- + /// ``` + pub(crate) version: u32, + /// If the reading program supports the version of WIA indicated here, it can read the file. + /// + /// [version](Self::version) can be higher than `version_compatible`. + pub(crate) version_compatible: u32, + /// The size of the [WIADisc] struct. + pub(crate) disc_size: u32, + /// The SHA-1 hash of the [WIADisc] struct. + /// + /// The number of bytes to hash is determined by [disc_size](Self::disc_size). + pub(crate) disc_hash: HashBytes, + /// The original size of the ISO. + pub(crate) iso_file_size: u64, + /// The size of this file. + pub(crate) wia_file_size: u64, + /// The SHA-1 hash of this struct, up to but not including `file_head_hash` itself. + pub(crate) file_head_hash: HashBytes, +} + +impl FromReader for WIAFileHeader { + type Args<'a> = (); + + const STATIC_SIZE: usize = struct_size([ + WIARVZMagic::STATIC_SIZE, // magic + u32::STATIC_SIZE, // version + u32::STATIC_SIZE, // version_compatible + u32::STATIC_SIZE, // disc_size + HashBytes::STATIC_SIZE, // disc_hash + u64::STATIC_SIZE, // iso_file_size + u64::STATIC_SIZE, // wia_file_size + HashBytes::STATIC_SIZE, // file_head_hash + ]); + + fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result + where R: Read + ?Sized { + Ok(Self { + magic: <_>::from_reader(reader)?, + version: <_>::from_reader(reader)?, + version_compatible: <_>::from_reader(reader)?, + disc_size: <_>::from_reader(reader)?, + disc_hash: <_>::from_reader(reader)?, + iso_file_size: <_>::from_reader(reader)?, + wia_file_size: <_>::from_reader(reader)?, + file_head_hash: <_>::from_reader(reader)?, + }) + } +} + +impl ToWriter for WIAFileHeader { + fn to_writer(&self, writer: &mut W) -> io::Result<()> + where W: Write + ?Sized { + let mut buf = [0u8; Self::STATIC_SIZE - HashBytes::STATIC_SIZE]; + let mut out = buf.as_mut(); + self.magic.to_writer(&mut out)?; + self.version.to_writer(&mut out)?; + self.version_compatible.to_writer(&mut out)?; + self.disc_size.to_writer(&mut out)?; + self.disc_hash.to_writer(&mut out)?; + self.iso_file_size.to_writer(&mut out)?; + self.wia_file_size.to_writer(&mut out)?; + buf.to_writer(writer)?; + // Calculate and write the hash + hash_bytes(&buf).to_writer(writer)?; + Ok(()) + } + + fn write_size(&self) -> usize { Self::STATIC_SIZE } +} + +/// Disc type +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub(crate) enum DiscType { + /// GameCube disc + GameCube = 1, + /// Wii disc + Wii = 2, +} + +impl FromReader for DiscType { + type Args<'a> = (); + + const STATIC_SIZE: usize = u32::STATIC_SIZE; + + fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result + where R: Read + ?Sized { + match u32::from_reader(reader)? { + 1 => Ok(Self::GameCube), + 2 => Ok(Self::Wii), + _ => Err(io::Error::new(io::ErrorKind::InvalidData, "Invalid disc type")), + } + } +} + +impl ToWriter for DiscType { + fn to_writer(&self, writer: &mut W) -> io::Result<()> + where W: Write + ?Sized { + match self { + Self::GameCube => 1u32.to_writer(writer), + Self::Wii => 2u32.to_writer(writer), + } + } + + fn write_size(&self) -> usize { Self::STATIC_SIZE } +} + +/// Compression type +#[non_exhaustive] +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub(crate) enum Compression { + /// No compression. + None = 0, + /// (WIA only) See [WIASegment] + Purge = 1, + /// BZIP2 compression + Bzip2 = 2, + /// LZMA compression + Lzma = 3, + /// LZMA2 compression + Lzma2 = 4, + /// (RVZ only) Zstandard compression + Zstandard = 5, +} + +impl FromReader for Compression { + type Args<'a> = (); + + const STATIC_SIZE: usize = u32::STATIC_SIZE; + + fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result + where R: Read + ?Sized { + match u32::from_reader(reader)? { + 0 => Ok(Self::None), + 1 => Ok(Self::Purge), + 2 => Ok(Self::Bzip2), + 3 => Ok(Self::Lzma), + 4 => Ok(Self::Lzma2), + 5 => Ok(Self::Zstandard), + _ => Err(io::Error::new(io::ErrorKind::InvalidData, "Invalid compression type")), + } + } +} + +impl ToWriter for Compression { + fn to_writer(&self, writer: &mut W) -> io::Result<()> + where W: Write + ?Sized { + match self { + Self::None => 0u32.to_writer(writer), + Self::Purge => 1u32.to_writer(writer), + Self::Bzip2 => 2u32.to_writer(writer), + Self::Lzma => 3u32.to_writer(writer), + Self::Lzma2 => 4u32.to_writer(writer), + Self::Zstandard => 5u32.to_writer(writer), + } + } + + fn write_size(&self) -> usize { Self::STATIC_SIZE } +} + +const DISC_HEAD_SIZE: usize = 0x80; + +/// This struct is stored at offset 0x48, immediately after [WIAFileHeader]. +#[derive(Clone, Debug)] +pub(crate) struct WIADisc { + /// The disc type. + pub(crate) disc_type: DiscType, + /// The compression type. + pub(crate) compression: Compression, + /// The compression level used by the compressor. + /// + /// The possible values are compressor-specific. + /// + /// RVZ only: + /// > This is signed (instead of unsigned) to support negative compression levels in + /// [Zstandard](Compression::Zstandard) (RVZ only). + pub(crate) compression_level: i32, + /// The size of the chunks that data is divided into. + /// + /// WIA only: + /// > Must be a multiple of 2 MiB. + /// + /// RVZ only: + /// > Chunk sizes smaller than 2 MiB are supported. The following applies when using a chunk size + /// smaller than 2 MiB: + /// > - The chunk size must be at least 32 KiB and must be a power of two. (Just like with WIA, + /// sizes larger than 2 MiB do not have to be a power of two, they just have to be an integer + /// multiple of 2 MiB.) + /// > - For Wii partition data, each chunk contains one [WIAExceptionList] which contains + /// exceptions for that chunk (and no other chunks). Offset 0 refers to the first hash of the + /// current chunk, not the first hash of the full 2 MiB of data. + pub(crate) chunk_size: u32, + /// The first 0x80 bytes of the disc image. + pub(crate) disc_head: [u8; DISC_HEAD_SIZE], + /// The number of [WIAPartition] structs. + pub(crate) num_partitions: u32, + /// The size of one [WIAPartition] struct. + /// + /// If this is smaller than the size of [WIAPartition], fill the missing bytes with 0x00. + pub(crate) partition_type_size: u32, + /// The offset in the file where the [WIAPartition] structs are stored (uncompressed). + pub(crate) partition_offset: u64, + /// The SHA-1 hash of the [WIAPartition] structs. + /// + /// The number of bytes to hash is determined by `num_partitions * partition_type_size`. + pub(crate) partition_hash: HashBytes, + /// The number of [WIARawData] structs. + pub(crate) num_raw_data: u32, + /// The offset in the file where the [WIARawData] structs are stored (compressed). + pub(crate) raw_data_offset: u64, + /// The total compressed size of the [WIARawData] structs. + pub(crate) raw_data_size: u32, + /// The number of [WIAGroup] structs. + pub(crate) num_groups: u32, + /// The offset in the file where the [WIAGroup] structs are stored (compressed). + pub(crate) group_offset: u64, + /// The total compressed size of the [WIAGroup] structs. + pub(crate) group_size: u32, + /// The number of used bytes in the [compr_data](Self::compr_data) array. + pub(crate) compr_data_len: u8, + /// Compressor specific data. + /// + /// If the compression method is [None](Compression::None), [Purge](Compression::Purge), + /// [Bzip2](Compression::Bzip2), or [Zstandard](Compression::Zstandard) (RVZ only), + /// [compr_data_len](Self::compr_data_len) is 0. If the compression method is + /// [Lzma](Compression::Lzma) or [Lzma2](Compression::Lzma2), the compressor specific data is + /// stored in the format used by the 7-Zip SDK. It needs to be converted if you are using e.g. + /// liblzma. + /// + /// For [Lzma](Compression::Lzma), the data is 5 bytes long. The first byte encodes the `lc`, + /// `pb`, and `lp` parameters, and the four other bytes encode the dictionary size in little + /// endian. + pub(crate) compr_data: [u8; 7], +} + +impl FromReader for WIADisc { + type Args<'a> = (); + + const STATIC_SIZE: usize = struct_size([ + DiscType::STATIC_SIZE, // disc_type + Compression::STATIC_SIZE, // compression + i32::STATIC_SIZE, // compression_level + u32::STATIC_SIZE, // chunk_size + DISC_HEAD_SIZE, // disc_head + u32::STATIC_SIZE, // num_partitions + u32::STATIC_SIZE, // partition_type_size + u64::STATIC_SIZE, // partition_offset + HashBytes::STATIC_SIZE, // partition_hash + u32::STATIC_SIZE, // num_raw_data + u64::STATIC_SIZE, // raw_data_offset + u32::STATIC_SIZE, // raw_data_size + u32::STATIC_SIZE, // num_groups + u64::STATIC_SIZE, // group_offset + u32::STATIC_SIZE, // group_size + u8::STATIC_SIZE, // compr_data_len + <[u8; 7]>::STATIC_SIZE, // compr_data + ]); + + fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result + where R: Read + ?Sized { + Ok(Self { + disc_type: <_>::from_reader(reader)?, + compression: <_>::from_reader(reader)?, + compression_level: <_>::from_reader(reader)?, + chunk_size: <_>::from_reader(reader)?, + disc_head: <_>::from_reader(reader)?, + num_partitions: <_>::from_reader(reader)?, + partition_type_size: <_>::from_reader(reader)?, + partition_offset: <_>::from_reader(reader)?, + partition_hash: <_>::from_reader(reader)?, + num_raw_data: <_>::from_reader(reader)?, + raw_data_offset: <_>::from_reader(reader)?, + raw_data_size: <_>::from_reader(reader)?, + num_groups: <_>::from_reader(reader)?, + group_offset: <_>::from_reader(reader)?, + group_size: <_>::from_reader(reader)?, + compr_data_len: <_>::from_reader(reader)?, + compr_data: <_>::from_reader(reader)?, + }) + } +} + +impl ToWriter for WIADisc { + fn to_writer(&self, writer: &mut W) -> io::Result<()> + where W: Write + ?Sized { + self.disc_type.to_writer(writer)?; + self.compression.to_writer(writer)?; + self.compression_level.to_writer(writer)?; + self.chunk_size.to_writer(writer)?; + self.disc_head.to_writer(writer)?; + self.num_partitions.to_writer(writer)?; + self.partition_type_size.to_writer(writer)?; + self.partition_offset.to_writer(writer)?; + self.partition_hash.to_writer(writer)?; + self.num_raw_data.to_writer(writer)?; + self.raw_data_offset.to_writer(writer)?; + self.raw_data_size.to_writer(writer)?; + self.num_groups.to_writer(writer)?; + self.group_offset.to_writer(writer)?; + self.group_size.to_writer(writer)?; + self.compr_data_len.to_writer(writer)?; + self.compr_data.to_writer(writer)?; + Ok(()) + } + + fn write_size(&self) -> usize { Self::STATIC_SIZE } +} + +#[derive(Clone, Debug)] +pub(crate) struct WIAPartitionData { + /// The sector on the disc at which this data starts. + /// One sector is 32 KiB (or 31 KiB excluding hashes). + pub(crate) first_sector: u32, + /// The number of sectors on the disc covered by this struct. + /// One sector is 32 KiB (or 31 KiB excluding hashes). + pub(crate) num_sectors: u32, + /// The index of the first [WIAGroup] struct that points to the data covered by this struct. + /// The other [WIAGroup] indices follow sequentially. + pub(crate) group_index: u32, + /// The number of [WIAGroup] structs used for this data. + pub(crate) num_groups: u32, +} + +impl FromReader for WIAPartitionData { + type Args<'a> = (); + + const STATIC_SIZE: usize = struct_size([ + u32::STATIC_SIZE, // first_sector + u32::STATIC_SIZE, // num_sectors + u32::STATIC_SIZE, // group_index + u32::STATIC_SIZE, // num_groups + ]); + + fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result + where R: Read + ?Sized { + Ok(Self { + first_sector: <_>::from_reader(reader)?, + num_sectors: <_>::from_reader(reader)?, + group_index: <_>::from_reader(reader)?, + num_groups: <_>::from_reader(reader)?, + }) + } +} + +impl ToWriter for WIAPartitionData { + fn to_writer(&self, writer: &mut W) -> io::Result<()> + where W: Write + ?Sized { + self.first_sector.to_writer(writer)?; + self.num_sectors.to_writer(writer)?; + self.group_index.to_writer(writer)?; + self.num_groups.to_writer(writer)?; + Ok(()) + } + + fn write_size(&self) -> usize { Self::STATIC_SIZE } +} + +/// This struct is used for keeping track of Wii partition data that on the actual disc is encrypted +/// and hashed. This does not include the unencrypted area at the beginning of partitions that +/// contains the ticket, TMD, certificate chain, and H3 table. So for a typical game partition, +/// `pd[0].first_sector * 0x8000` would be 0x0F820000, not 0x0F800000. +/// +/// Wii partition data is stored decrypted and with hashes removed. For each 0x8000 bytes on the +/// disc, 0x7C00 bytes are stored in the WIA file (prior to compression). If the hashes are desired, +/// the reading program must first recalculate the hashes as done when creating a Wii disc image +/// from scratch (see ), and must then apply the hash exceptions +/// which are stored along with the data (see the [WIAExceptionList] section). +#[derive(Clone, Debug)] +pub(crate) struct WIAPartition { + /// The title key for this partition (128-bit AES), which can be used for re-encrypting the + /// partition data. + /// + /// This key can be used directly, without decrypting it using the Wii common key. + pub(crate) partition_key: KeyBytes, + /// To quote the wit source code: `segment 0 is small and defined for management data (boot .. + /// fst). segment 1 takes the remaining data.` + /// + /// The point at which wit splits the two segments is the FST end offset rounded up to the next + /// 2 MiB. Giving the first segment a size which is not a multiple of 2 MiB is likely a bad idea + /// (unless the second segment has a size of 0). + pub(crate) partition_data: [WIAPartitionData; 2], +} + +impl FromReader for WIAPartition { + type Args<'a> = (); + + const STATIC_SIZE: usize = struct_size([ + KeyBytes::STATIC_SIZE, // partition_key + WIAPartitionData::STATIC_SIZE * 2, // partition_data + ]); + + fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result + where R: Read + ?Sized { + Ok(Self { + partition_key: <_>::from_reader(reader)?, + partition_data: [<_>::from_reader(reader)?, <_>::from_reader(reader)?], + }) + } +} + +impl ToWriter for WIAPartition { + fn to_writer(&self, writer: &mut W) -> io::Result<()> + where W: Write + ?Sized { + self.partition_key.to_writer(writer)?; + self.partition_data[0].to_writer(writer)?; + self.partition_data[1].to_writer(writer)?; + Ok(()) + } + + fn write_size(&self) -> usize { Self::STATIC_SIZE } +} + +/// This struct is used for keeping track of disc data that is not stored as [WIAPartition]. +/// The data is stored as is (other than compression being applied). +/// +/// The first [WIARawData] has `raw_data_offset` set to 0x80 and `raw_data_size` set to 0x4FF80, +/// but despite this, it actually contains 0x50000 bytes of data. (However, the first 0x80 bytes +/// should be read from [WIADisc] instead.) This should be handled by rounding the offset down to +/// the previous multiple of 0x8000 (and adding the equivalent amount to the size so that the end +/// offset stays the same), not by special casing the first [WIARawData]. +#[derive(Clone, Debug)] +pub(crate) struct WIARawData { + /// The offset on the disc at which this data starts. + pub(crate) raw_data_offset: u64, + /// The number of bytes on the disc covered by this struct. + pub(crate) raw_data_size: u64, + /// The index of the first [WIAGroup] struct that points to the data covered by this struct. + /// The other [WIAGroup] indices follow sequentially. + pub(crate) group_index: u32, + /// The number of [WIAGroup] structs used for this data. + pub(crate) num_groups: u32, +} + +impl FromReader for WIARawData { + type Args<'a> = (); + + const STATIC_SIZE: usize = struct_size([ + u64::STATIC_SIZE, // raw_data_offset + u64::STATIC_SIZE, // raw_data_size + u32::STATIC_SIZE, // group_index + u32::STATIC_SIZE, // num_groups + ]); + + fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result + where R: Read + ?Sized { + Ok(Self { + raw_data_offset: <_>::from_reader(reader)?, + raw_data_size: <_>::from_reader(reader)?, + group_index: <_>::from_reader(reader)?, + num_groups: <_>::from_reader(reader)?, + }) + } +} + +impl ToWriter for WIARawData { + fn to_writer(&self, writer: &mut W) -> io::Result<()> + where W: Write + ?Sized { + self.raw_data_offset.to_writer(writer)?; + self.raw_data_size.to_writer(writer)?; + self.group_index.to_writer(writer)?; + self.num_groups.to_writer(writer)?; + Ok(()) + } + + fn write_size(&self) -> usize { Self::STATIC_SIZE } +} + +/// This struct points directly to the actual disc data, stored compressed. +/// +/// The data is interpreted differently depending on whether the [WIAGroup] is referenced by a +/// [WIAPartitionData] or a [WIARawData] (see the [WIAPartition] section for details). +/// +/// A [WIAGroup] normally contains chunk_size bytes of decompressed data +/// (or `chunk_size / 0x8000 * 0x7C00` for Wii partition data when not counting hashes), not +/// counting any [WIAExceptionList] structs. However, the last [WIAGroup] of a [WIAPartitionData] +/// or [WIARawData] contains less data than that if `num_sectors * 0x8000` (for [WIAPartitionData]) +/// or `raw_data_size` (for [WIARawData]) is not evenly divisible by `chunk_size`. +#[derive(Clone, Debug)] +pub(crate) struct WIAGroup { + /// The offset in the file where the compressed data is stored. + /// + /// Stored as a `u32`, divided by 4. + pub(crate) data_offset: u32, + /// The size of the compressed data, including any [WIAExceptionList] structs. 0 is a special + /// case meaning that every byte of the decompressed data is 0x00 and the [WIAExceptionList] + /// structs (if there are supposed to be any) contain 0 exceptions. + pub(crate) data_size: u32, +} + +impl FromReader for WIAGroup { + type Args<'a> = (); + + const STATIC_SIZE: usize = struct_size([ + u32::STATIC_SIZE, // data_offset + u32::STATIC_SIZE, // data_size + ]); + + fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result + where R: Read + ?Sized { + Ok(Self { data_offset: <_>::from_reader(reader)?, data_size: <_>::from_reader(reader)? }) + } +} + +impl ToWriter for WIAGroup { + fn to_writer(&self, writer: &mut W) -> io::Result<()> + where W: Write + ?Sized { + self.data_offset.to_writer(writer)?; + self.data_size.to_writer(writer)?; + Ok(()) + } + + fn write_size(&self) -> usize { Self::STATIC_SIZE } +} + +/// Compared to [WIAGroup], [RVZGroup] changes the meaning of the most significant bit of +/// [data_size](Self::data_size) and adds one additional attribute. +#[derive(Clone, Debug)] +pub(crate) struct RVZGroup { + /// The offset in the file where the compressed data is stored, divided by 4. + pub(crate) data_offset: u32, + /// The most significant bit is 1 if the data is compressed using the compression method + /// indicated in [WIADisc], and 0 if it is not compressed. The lower 31 bits are the size of + /// the compressed data, including any [WIAExceptionList] structs. The lower 31 bits being 0 is + /// a special case meaning that every byte of the decompressed and unpacked data is 0x00 and + /// the [WIAExceptionList] structs (if there are supposed to be any) contain 0 exceptions. + pub(crate) data_size: u32, + /// The size after decompressing but before decoding the RVZ packing. + /// If this is 0, RVZ packing is not used for this group. + pub(crate) rvz_packed_size: u32, + /// Extracted from the most significant bit of [data_size](Self::data_size). + pub(crate) is_compressed: bool, +} + +impl FromReader for RVZGroup { + type Args<'a> = (); + + const STATIC_SIZE: usize = struct_size([ + u32::STATIC_SIZE, // data_offset + u32::STATIC_SIZE, // data_size + u32::STATIC_SIZE, // rvz_packed_size + ]); + + fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result + where R: Read + ?Sized { + let data_offset = u32::from_reader(reader)?; + let size_and_flag = u32::from_reader(reader)?; + let rvz_packed_size = u32::from_reader(reader)?; + Ok(Self { + data_offset, + data_size: size_and_flag & 0x7FFFFFFF, + rvz_packed_size, + is_compressed: size_and_flag & 0x80000000 != 0, + }) + } +} + +impl ToWriter for RVZGroup { + fn to_writer(&self, writer: &mut W) -> io::Result<()> + where W: Write + ?Sized { + self.data_offset.to_writer(writer)?; + (self.data_size | (self.is_compressed as u32) << 31).to_writer(writer)?; + self.rvz_packed_size.to_writer(writer)?; + Ok(()) + } + + fn write_size(&self) -> usize { Self::STATIC_SIZE } +} + +impl From for RVZGroup { + fn from(value: WIAGroup) -> Self { + Self { + data_offset: value.data_offset, + data_size: value.data_size, + rvz_packed_size: 0, + is_compressed: true, + } + } +} + +/// This struct represents a 20-byte difference between the recalculated hash data and the original +/// hash data. (See also [WIAExceptionList]) +/// +/// When recalculating hashes for a [WIAGroup] with a size which is not evenly divisible by 2 MiB +/// (with the size of the hashes included), the missing bytes should be treated as zeroes for the +/// purpose of hashing. (wit's writing code seems to act as if the reading code does not assume that +/// these missing bytes are zero, but both wit's and Dolphin's reading code treat them as zero. +/// Dolphin's writing code assumes that the reading code treats them as zero.) +/// +/// wit's writing code only outputs [WIAException] structs for mismatches in the actual hash +/// data, not in the padding data (which normally only contains zeroes). Dolphin's writing code +/// outputs [WIAException] structs for both hash data and padding data. When Dolphin needs to +/// write [WIAException] structs for a padding area which is 32 bytes long, it writes one which +/// covers the first 20 bytes of the padding area and one which covers the last 20 bytes of the +/// padding area, generating 12 bytes of overlap between the [WIAException] structs. +#[derive(Clone, Debug)] +pub(crate) struct WIAException { + /// The offset among the hashes. The offsets 0x0000-0x0400 here map to the offsets 0x0000-0x0400 + /// in the full 2 MiB of data, the offsets 0x0400-0x0800 here map to the offsets 0x8000-0x8400 + /// in the full 2 MiB of data, and so on. + /// + /// The offsets start over at 0 for each new [WIAExceptionList]. + pub(crate) offset: u16, + /// The hash that the automatically generated hash at the given offset needs to be replaced + /// with. + /// + /// The replacement should happen after calculating all hashes for the current 2 MiB of data + /// but before encrypting the hashes. + pub(crate) hash: HashBytes, +} + +impl FromReader for WIAException { + type Args<'a> = (); + + const STATIC_SIZE: usize = struct_size([ + u16::STATIC_SIZE, // offset + HashBytes::STATIC_SIZE, // hash + ]); + + fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result + where R: Read + ?Sized { + Ok(Self { offset: <_>::from_reader(reader)?, hash: <_>::from_reader(reader)? }) + } +} + +impl ToWriter for WIAException { + fn to_writer(&self, writer: &mut W) -> io::Result<()> + where W: Write + ?Sized { + self.offset.to_writer(writer)?; + self.hash.to_writer(writer)?; + Ok(()) + } + + fn write_size(&self) -> usize { Self::STATIC_SIZE } +} + +/// Each [WIAGroup] of Wii partition data contains one or more [WIAExceptionList] structs before +/// the actual data, one for each 2 MiB of data in the [WIAGroup]. The number of [WIAExceptionList] +/// structs per [WIAGroup] is always `chunk_size / 0x200000`, even for a [WIAGroup] which contains +/// less data than normal due to it being at the end of a partition. +/// +/// For memory management reasons, programs which read WIA files might place a limit on how many +/// exceptions there can be in a [WIAExceptionList]. Dolphin's reading code has a limit of +/// `52 × 64 = 3328` (unless the compression method is [None](Compression::None) or +/// [Purge](Compression::Purge), in which case there is no limit), which is enough to cover all +/// hashes and all padding. wit's reading code seems to be written as if `47 × 64 = 3008` is the +/// maximum it needs to be able to handle, which is enough to cover all hashes but not any padding. +/// However, because wit allocates more memory than needed, it seems to be possible to exceed 3008 +/// by some amount without problems. It should be safe for writing code to assume that reading code +/// can handle at least 3328 exceptions per [WIAExceptionList]. +/// +/// Somewhat ironically, there are exceptions to how [WIAExceptionList] structs are handled: +/// +/// For the compression method [Purge](Compression::Purge), the [WIAExceptionList] structs are +/// stored uncompressed (in other words, before the first [WIASegment]). For +/// [Bzip2](Compression::Bzip2), [Lzma](Compression::Lzma) and [Lzma2](Compression::Lzma2), they are +/// compressed along with the rest of the data. +/// +/// For the compression methods [None](Compression::None) and [Purge](Compression::Purge), if the +/// end offset of the last [WIAExceptionList] is not evenly divisible by 4, padding is inserted +/// after it so that the data afterwards will start at a 4 byte boundary. This padding is not +/// inserted for the other compression methods. +#[derive(Clone, Debug)] +pub(crate) struct WIAExceptionList { + /// Each [WIAException] describes one difference between the hashes obtained by hashing the + /// partition data and the original hashes. + pub(crate) exceptions: Vec, +} + +impl FromReader for WIAExceptionList { + type Args<'a> = (); + + const STATIC_SIZE: usize = DYNAMIC_SIZE; + + fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result + where R: Read + ?Sized { + let num_exceptions = u16::from_reader(reader)?; + let exceptions = read_vec(reader, num_exceptions as usize)?; + Ok(Self { exceptions }) + } +} + +impl ToWriter for WIAExceptionList { + fn to_writer(&self, writer: &mut W) -> io::Result<()> + where W: Write + ?Sized { + (self.exceptions.len() as u16).to_writer(writer)?; + write_vec(writer, &self.exceptions)?; + Ok(()) + } + + fn write_size(&self) -> usize { + u16::STATIC_SIZE + self.exceptions.len() * WIAException::STATIC_SIZE + } +} + +/// This struct is used by the simple compression method [Purge](Compression::Purge), which stores +/// runs of zeroes efficiently and stores other data as is. +/// +/// Each [Purge](Compression::Purge) chunk contains zero or more [WIASegment] structs stored in +/// order of ascending offset, followed by a SHA-1 hash (0x14 bytes) of the [WIAExceptionList] +/// structs (if any) and the [WIASegment] structs. Bytes in the decompressed data that are not +/// covered by any [WIASegment] struct are set to 0x00. +#[derive(Clone, Debug)] +pub(crate) struct WIASegment { + /// The offset of data within the decompressed data. + /// + /// Any [WIAExceptionList] structs are not counted as part of the decompressed data. + pub(crate) offset: u32, + /// The data. + pub(crate) data: Vec, +} + +impl FromReader for WIASegment { + type Args<'a> = (); + + const STATIC_SIZE: usize = DYNAMIC_SIZE; + + fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result + where R: Read + ?Sized { + let offset = u32::from_reader(reader)?; + let size = u32::from_reader(reader)?; + let data = read_bytes(reader, size as usize)?; + Ok(Self { offset, data }) + } +} + +impl ToWriter for WIASegment { + fn to_writer(&self, writer: &mut W) -> io::Result<()> + where W: Write + ?Sized { + self.offset.to_writer(writer)?; + (self.data.len() as u32).to_writer(writer)?; + self.data.to_writer(writer)?; + Ok(()) + } + + fn write_size(&self) -> usize { u32::STATIC_SIZE * 2 + self.data.len() } +} + +pub(crate) enum Decompressor { + None, + // Purge, + #[cfg(feature = "compress-bzip2")] + Bzip2, + // Lzma, + // Lzma2, + #[cfg(feature = "compress-zstd")] + Zstandard, +} + +impl Decompressor { + pub(crate) fn new(disc: &WIADisc) -> Result { + match disc.compression { + Compression::None => Ok(Self::None), + // Compression::Purge => Ok(Self::Purge), + #[cfg(feature = "compress-bzip2")] + Compression::Bzip2 => Ok(Self::Bzip2), + // Compression::Lzma => Ok(Self::Lzma), + // Compression::Lzma2 => Ok(Self::Lzma2), + #[cfg(feature = "compress-zstd")] + Compression::Zstandard => Ok(Self::Zstandard), + comp => Err(Error::DiscFormat(format!("Unsupported WIA/RVZ compression: {:?}", comp))), + } + } + + pub(crate) fn wrap<'a, R>(&mut self, reader: R) -> Result> + where R: Read + 'a { + Ok(match self { + Decompressor::None => Box::new(reader), + #[cfg(feature = "compress-bzip2")] + Decompressor::Bzip2 => Box::new(bzip2::read::BzDecoder::new(reader)), + #[cfg(feature = "compress-zstd")] + Decompressor::Zstandard => { + Box::new(zstd::stream::Decoder::new(reader).context("Creating zstd decoder")?) + } + }) + } +} + +/// In a sector, following the 0x400 byte block of hashes, each 0x400 bytes of decrypted data is +/// hashed, yielding 31 H0 hashes. +/// Then, 8 sectors are aggregated into a subgroup, and the 31 H0 hashes for each sector are hashed, +/// yielding 8 H1 hashes. +/// Then, 8 subgroups are aggregated into a group, and the 8 H1 hashes for each subgroup are hashed, +/// yielding 8 H2 hashes. +/// Finally, the 8 H2 hashes for each group are hashed, yielding 1 H3 hash. +/// The H3 hashes for each group are stored in the partition's H3 table. +pub(crate) struct HashTable { + /// SHA-1 hash of the 31 H0 hashes for each sector. + pub(crate) h1_hashes: Vec, + /// SHA-1 hash of the 8 H1 hashes for each subgroup. + pub(crate) h2_hashes: Vec, + /// SHA-1 hash of the 8 H2 hashes for each group. + pub(crate) h3_hashes: Vec, +} + +pub(crate) struct DiscIOWIA { + pub(crate) header: WIAFileHeader, + pub(crate) disc: WIADisc, + pub(crate) partitions: Vec, + pub(crate) raw_data: Vec, + pub(crate) groups: Vec, + pub(crate) filename: PathBuf, + pub(crate) encrypt: bool, + pub(crate) hash_tables: Vec, +} + +/// Wraps a buffer, reading zeros for any extra bytes. +struct SizedRead<'a> { + buf: &'a [u8], + pos: usize, +} + +impl<'a> SizedRead<'a> { + fn new(buf: &'a [u8]) -> Self { Self { buf, pos: 0 } } +} + +impl Read for SizedRead<'_> { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + let written = if self.pos < self.buf.len() { + let to_read = min(buf.len(), self.buf.len() - self.pos); + buf[..to_read].copy_from_slice(&self.buf[self.pos..self.pos + to_read]); + to_read + } else { + 0 + }; + buf[written..].fill(0); + self.pos += buf.len(); + Ok(buf.len()) + } +} + +impl Seek for SizedRead<'_> { + fn seek(&mut self, pos: SeekFrom) -> io::Result { + match pos { + SeekFrom::Start(pos) => self.pos = pos as usize, + SeekFrom::Current(pos) => self.pos = (self.pos as i64 + pos) as usize, + SeekFrom::End(_) => unimplemented!(), + } + Ok(self.pos as u64) + } + + fn stream_position(&mut self) -> io::Result { Ok(self.pos as u64) } +} + +#[derive(Debug)] +struct GroupResult { + /// Offset of the group in the raw disc image. + disc_offset: u64, + /// Data offset of the group within a partition, excluding hashes. + /// Same as `disc_offset` for raw data or GameCube discs. + partition_offset: u64, + /// The group. + group: RVZGroup, + /// The index of the Wii partition that this group belongs to. + partition_index: Option, + /// Chunk size, differs between Wii and raw data. + chunk_size: u32, + /// End offset for the partition or raw data. + partition_end: u64, +} + +#[inline] +fn hash_bytes(buf: &[u8]) -> HashBytes { + let mut hasher = Sha1::new(); + hasher.update(buf); + hasher.finalize().into() +} + +fn verify_hash(buf: &[u8], expected: &HashBytes) -> Result<()> { + let out = hash_bytes(buf); + if out != *expected { + let mut got_bytes = [0u8; 40]; + let got = base16ct::lower::encode_str(&out, &mut got_bytes)?; + let mut expected_bytes = [0u8; 40]; + let expected = base16ct::lower::encode_str(expected, &mut expected_bytes)?; + return Err(Error::DiscFormat(format!( + "WIA hash mismatch: {}, expected {}", + got, expected + ))); + } + Ok(()) +} + +impl DiscIOWIA { + pub(crate) fn new(filename: &Path, options: &DiscIOOptions) -> Result { + let mut file = BufReader::new( + File::open(filename).with_context(|| format!("Opening file {}", filename.display()))?, + ); + + // Load & verify file header + let header_buf = <[u8; WIAFileHeader::STATIC_SIZE]>::from_reader(&mut file) + .context("Reading WIA/RVZ file header")?; + let header = WIAFileHeader::from_reader(&mut header_buf.as_slice()) + .context("Parsing WIA/RVZ file header")?; + verify_hash( + &header_buf[..WIAFileHeader::STATIC_SIZE - HashBytes::STATIC_SIZE], + &header.file_head_hash, + )?; + if header.version_compatible < 0x30000 { + return Err(Error::DiscFormat(format!( + "WIA/RVZ version {:#X} is not supported", + header.version_compatible + ))); + } + let is_rvz = header.magic == WIARVZMagic::Rvz; + // println!("Header: {:?}", header); + + // Load & verify disc header + let disc_buf = read_bytes(&mut file, header.disc_size as usize) + .context("Reading WIA/RVZ disc header")?; + verify_hash(&disc_buf, &header.disc_hash)?; + let disc = WIADisc::from_reader(&mut SizedRead::new(&disc_buf)) + .context("Parsing WIA/RVZ disc header")?; + // println!("Disc: {:?}", disc); + if disc.partition_type_size != WIAPartition::STATIC_SIZE as u32 { + return Err(Error::DiscFormat(format!( + "WIA partition type size is {}, expected {}", + disc.partition_type_size, + WIAPartition::STATIC_SIZE + ))); + } + + // Load & verify partition headers + file.seek(SeekFrom::Start(disc.partition_offset)) + .context("Seeking to WIA/RVZ partition headers")?; + let partition_buf = + read_bytes(&mut file, disc.partition_type_size as usize * disc.num_partitions as usize) + .context("Reading WIA/RVZ partition headers")?; + verify_hash(&partition_buf, &disc.partition_hash)?; + let partitions = read_vec(&mut partition_buf.as_slice(), disc.num_partitions as usize) + .context("Parsing WIA/RVZ partition headers")?; + // println!("Partitions: {:?}", partitions); + + // Create decompressor + let mut decompressor = Decompressor::new(&disc)?; + + // Load raw data headers + let raw_data = { + file.seek(SeekFrom::Start(disc.raw_data_offset)) + .context("Seeking to WIA/RVZ raw data headers")?; + let mut reader = decompressor.wrap((&mut file).take(disc.raw_data_size as u64))?; + read_vec(&mut reader, disc.num_raw_data as usize) + .context("Reading WIA/RVZ raw data headers")? + // println!("Raw data: {:?}", raw_data); + }; + + // Load group headers + let mut groups = Vec::with_capacity(disc.num_groups as usize); + { + file.seek(SeekFrom::Start(disc.group_offset)) + .context("Seeking to WIA/RVZ group headers")?; + let mut reader = decompressor.wrap((&mut file).take(disc.group_size as u64))?; + let bytes = read_bytes( + &mut reader, + disc.num_groups as usize + * if is_rvz { RVZGroup::STATIC_SIZE } else { WIAGroup::STATIC_SIZE }, + ) + .context("Reading WIA/RVZ group headers")?; + let mut slice = bytes.as_slice(); + for i in 0..disc.num_groups { + if is_rvz { + groups.push( + RVZGroup::from_reader(&mut slice) + .with_context(|| format!("Parsing RVZ group header {}", i))?, + ); + } else { + groups.push( + WIAGroup::from_reader(&mut slice) + .with_context(|| format!("Parsing WIA group header {}", i))? + .into(), + ); + } + } + // println!("Groups: {:?}", groups); + } + + let mut disc_io = Self { + header, + disc, + partitions, + raw_data, + groups, + filename: filename.to_owned(), + encrypt: options.rebuild_hashes, + hash_tables: vec![], + }; + if options.rebuild_hashes { + disc_io.rebuild_hashes()?; + } + Ok(disc_io) + } + + fn group_for_offset(&self, offset: u64) -> Option { + if let Some((p_idx, pd)) = self.partitions.iter().enumerate().find_map(|(p_idx, p)| { + p.partition_data + .iter() + .find(|pd| { + let start = pd.first_sector as u64 * SECTOR_SIZE as u64; + let end = start + pd.num_sectors as u64 * SECTOR_SIZE as u64; + offset >= start && offset < end + }) + .map(|pd| (p_idx, pd)) + }) { + let start = pd.first_sector as u64 * SECTOR_SIZE as u64; + let group_index = (offset - start) / self.disc.chunk_size as u64; + if group_index >= pd.num_groups as u64 { + return None; + } + let disc_offset = start + group_index * self.disc.chunk_size as u64; + let chunk_size = (self.disc.chunk_size as u64 * BLOCK_SIZE as u64) / SECTOR_SIZE as u64; + let partition_offset = group_index * chunk_size; + let partition_end = pd.num_sectors as u64 * BLOCK_SIZE as u64; + self.groups.get(pd.group_index as usize + group_index as usize).map(|g| GroupResult { + disc_offset, + partition_offset, + group: g.clone(), + partition_index: Some(p_idx), + chunk_size: chunk_size as u32, + partition_end, + }) + } else if let Some(d) = self.raw_data.iter().find(|d| { + let start = d.raw_data_offset & !0x7FFF; + let end = d.raw_data_offset + d.raw_data_size; + offset >= start && offset < end + }) { + let start = d.raw_data_offset & !0x7FFF; + let end = d.raw_data_offset + d.raw_data_size; + let group_index = (offset - start) / self.disc.chunk_size as u64; + if group_index >= d.num_groups as u64 { + return None; + } + let disc_offset = start + group_index * self.disc.chunk_size as u64; + self.groups.get(d.group_index as usize + group_index as usize).map(|g| GroupResult { + disc_offset, + partition_offset: disc_offset, + group: g.clone(), + partition_index: None, + chunk_size: self.disc.chunk_size, + partition_end: end, + }) + } else { + None + } + } + + pub(crate) fn rebuild_hashes(&mut self) -> Result<()> { + const NUM_H0_HASHES: usize = BLOCK_SIZE / HASHES_SIZE; + const H0_HASHES_SIZE: usize = HashBytes::STATIC_SIZE * NUM_H0_HASHES; + + // Precompute hashes for zeroed sectors. + let zero_h0_hash = hash_bytes(&[0u8; HASHES_SIZE]); + let mut zero_h1_hash = Sha1::new(); + for _ in 0..NUM_H0_HASHES { + zero_h1_hash.update(zero_h0_hash); + } + let zero_h1_hash: HashBytes = zero_h1_hash.finalize().into(); + + let mut hash_tables = Vec::with_capacity(self.partitions.len()); + let mut stream = + WIAReadStream::new(self, 0, false).context("Creating WIA/RVZ read stream")?; + for part in &self.partitions { + let first_sector = part.partition_data[0].first_sector; + if first_sector + part.partition_data[0].num_sectors + != part.partition_data[1].first_sector + { + return Err(Error::DiscFormat(format!( + "Partition data is not contiguous: {}..{} != {}", + first_sector, + first_sector + part.partition_data[0].num_sectors, + part.partition_data[1].first_sector + ))); + } + let part_sectors = + part.partition_data[0].num_sectors + part.partition_data[1].num_sectors; + + let num_sectors = part_sectors.next_multiple_of(64) as usize; + let num_subgroups = num_sectors / 8; + let num_groups = num_subgroups / 8; + println!( + "Rebuilding hashes: {} sectors, {} subgroups, {} groups", + num_sectors, num_subgroups, num_groups + ); + + let mut hash_table = HashTable { + h1_hashes: vec![HashBytes::default(); num_sectors], + h2_hashes: vec![HashBytes::default(); num_subgroups], + h3_hashes: vec![HashBytes::default(); num_groups], + }; + let mut h0_buf = [0u8; H0_HASHES_SIZE]; + for h3_index in 0..num_groups { + let mut h3_hasher = Sha1::new(); + for h2_index in h3_index * 8..h3_index * 8 + 8 { + let mut h2_hasher = Sha1::new(); + for h1_index in h2_index * 8..h2_index * 8 + 8 { + let h1_hash = if h1_index >= part_sectors as usize { + zero_h1_hash + } else { + let sector = first_sector + h1_index as u32; + stream + .seek(SeekFrom::Start(sector as u64 * SECTOR_SIZE as u64)) + .with_context(|| format!("Seeking to sector {}", sector))?; + stream + .read_exact(&mut h0_buf) + .with_context(|| format!("Reading sector {}", sector))?; + hash_bytes(&h0_buf) + }; + hash_table.h1_hashes[h1_index] = h1_hash; + h2_hasher.update(h1_hash); + } + let h2_hash = h2_hasher.finalize().into(); + hash_table.h2_hashes[h2_index] = h2_hash; + h3_hasher.update(h2_hash); + } + hash_table.h3_hashes[h3_index] = h3_hasher.finalize().into(); + } + + hash_tables.push(hash_table); + } + self.hash_tables = hash_tables; + Ok(()) + } +} + +impl DiscIO for DiscIOWIA { + fn begin_read_stream(&mut self, offset: u64) -> io::Result> { + Ok(Box::new(WIAReadStream::new(self, offset, self.encrypt)?)) + } + + fn has_wii_crypto(&self) -> bool { self.encrypt && self.disc.disc_type == DiscType::Wii } +} + +pub(crate) struct WIAReadStream<'a> { + /// The disc IO. + disc_io: &'a DiscIOWIA, + /// The currently open file handle. + file: BufReader, + /// The data read offset. + offset: u64, + /// The data offset of the current group. + group_offset: u64, + /// The current group data. + group_data: Vec, + /// Exception lists for the current group. + exception_lists: Vec, + /// The decompressor data. + decompressor: Decompressor, + /// Whether to re-encrypt Wii partition data. + encrypt: bool, +} + +fn read_exception_lists( + reader: &mut R, + partition_index: Option, + chunk_size: u32, +) -> io::Result> +where + R: Read + ?Sized, +{ + if partition_index.is_none() { + return Ok(vec![]); + } + + let num_exception_list = (chunk_size as usize).div_ceil(0x200000); + // println!("Num exception list: {:?}", num_exception_list); + let exception_lists = read_vec::(reader, num_exception_list)?; + for list in &exception_lists { + if !list.exceptions.is_empty() { + println!("Exception list: {:?}", list); + } + } + Ok(exception_lists) +} + +impl<'a> WIAReadStream<'a> { + pub(crate) fn new(disc_io: &'a DiscIOWIA, offset: u64, encrypt: bool) -> io::Result { + let result = match disc_io.group_for_offset(offset) { + Some(v) => v, + None => return Err(io::Error::from(io::ErrorKind::InvalidInput)), + }; + let file = BufReader::new(File::open(&disc_io.filename)?); + let decompressor = Decompressor::new(&disc_io.disc) + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + let mut stream = Self { + disc_io, + file, + offset, + group_offset: result.disc_offset, + group_data: Vec::new(), + exception_lists: vec![], + decompressor, + encrypt, + }; + stream.read_group(result)?; // Initialize group data + Ok(stream) + } + + /// If the current group does not contain the current offset, load the new group. + /// Returns false if the offset is not in the disc. + fn check_group(&mut self) -> io::Result { + if self.offset < self.group_offset + || self.offset >= self.group_offset + self.group_data.len() as u64 + { + let Some(result) = self.disc_io.group_for_offset(self.offset) else { + return Ok(false); + }; + if result.disc_offset == self.group_offset { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + "Group offset did not change", + )); + } + self.group_offset = result.disc_offset; + self.read_group(result)?; + } + Ok(true) + } + + /// Reads new group data into the buffer, handling decompression and RVZ packing. + fn read_group(&mut self, result: GroupResult) -> io::Result<()> { + // Special case for all-zero data + if result.group.data_size == 0 { + self.exception_lists.clear(); + let size = min(result.chunk_size as u64, result.partition_end - result.partition_offset) + as usize; + self.group_data = vec![0u8; size]; + self.recalculate_hashes(result)?; + return Ok(()); + } + + self.group_data = Vec::with_capacity(result.chunk_size as usize); + let group_data_start = result.group.data_offset as u64 * 4; + self.file.seek(SeekFrom::Start(group_data_start))?; + + let mut reader = (&mut self.file).take_seek(result.group.data_size as u64); + let uncompressed_exception_lists = + matches!(self.disc_io.disc.compression, Compression::None | Compression::Purge) + || !result.group.is_compressed; + if uncompressed_exception_lists { + self.exception_lists = read_exception_lists( + &mut reader, + result.partition_index, + self.disc_io.disc.chunk_size, // result.chunk_size? + )?; + // Align to 4 + let rem = reader.stream_position()? % 4; + if rem != 0 { + reader.seek(SeekFrom::Current((4 - rem) as i64))?; + } + } + let mut reader: Box = + if result.group.is_compressed && self.disc_io.disc.compression != Compression::None { + self.decompressor + .wrap(reader) + .map_err(|v| io::Error::new(io::ErrorKind::InvalidData, v))? + } else { + Box::new(reader) + }; + if !uncompressed_exception_lists { + self.exception_lists = read_exception_lists( + reader.as_mut(), + result.partition_index, + self.disc_io.disc.chunk_size, // result.chunk_size? + )?; + } + + if result.group.rvz_packed_size > 0 { + // Decode RVZ packed data + let mut lfg = LaggedFibonacci::default(); + loop { + let mut size_bytes = [0u8; 4]; + let read = reader.read(&mut size_bytes)?; + if read == 0 { + break; + } else if read < 4 { + return Err(io::Error::new( + io::ErrorKind::UnexpectedEof, + "Failed to read RVZ packed size", + )); + } + let size = u32::from_be_bytes(size_bytes); + let cur_data_len = self.group_data.len(); + if size & 0x80000000 != 0 { + // Junk data + let size = size & 0x7FFFFFFF; + lfg.init_with_reader(reader.as_mut())?; + lfg.skip( + ((result.partition_offset + cur_data_len as u64) % SECTOR_SIZE as u64) + as usize, + ); + self.group_data.resize(cur_data_len + size as usize, 0); + lfg.fill(&mut self.group_data[cur_data_len..]); + } else { + // Real data + self.group_data.resize(cur_data_len + size as usize, 0); + reader.read_exact(&mut self.group_data[cur_data_len..])?; + } + } + } else { + // Read and decompress data + reader.read_to_end(&mut self.group_data)?; + } + + drop(reader); + self.recalculate_hashes(result)?; + Ok(()) + } + + fn recalculate_hashes(&mut self, result: GroupResult) -> io::Result<()> { + let Some(partition_index) = result.partition_index else { + // Data not inside of a Wii partition + return Ok(()); + }; + let hash_table = self.disc_io.hash_tables.get(partition_index); + + if self.group_data.len() % BLOCK_SIZE != 0 { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + format!("Invalid group data size: {:#X}", self.group_data.len()), + )); + } + + // WIA/RVZ excludes the hash data for each sector, instead storing all data contiguously. + // We need to add space for the hash data, and then recalculate the hashes. + let num_sectors = self.group_data.len() / BLOCK_SIZE; + let mut out = vec![0u8; num_sectors * SECTOR_SIZE]; + for i in 0..num_sectors { + let data = array_ref![self.group_data, i * BLOCK_SIZE, BLOCK_SIZE]; + let out = array_ref_mut![out, i * SECTOR_SIZE, SECTOR_SIZE]; + + // Rebuild H0 hashes + for n in 0..31 { + let hash = hash_bytes(array_ref![data, n * 0x400, 0x400]); + array_ref_mut![out, n * 20, 20].copy_from_slice(&hash); + } + + // Rebuild H1 and H2 hashes if available + let mut data_copied = false; + if let Some(hash_table) = hash_table { + let partition = &self.disc_io.partitions[partition_index]; + let part_sector = (result.disc_offset / SECTOR_SIZE as u64) as usize + i + - partition.partition_data[0].first_sector as usize; + let h1_start = part_sector & !7; + for i in 0..8 { + array_ref_mut![out, 0x280 + i * 20, 20] + .copy_from_slice(&hash_table.h1_hashes[h1_start + i]); + } + let h2_start = (h1_start / 8) & !7; + for i in 0..8 { + array_ref_mut![out, 0x340 + i * 20, 20] + .copy_from_slice(&hash_table.h2_hashes[h2_start + i]); + } + + // if result.disc_offset == 0x9150000 { + // println!("Validating hashes for sector {}: {:X?}", part_sector, result); + // // Print H0 hashes + // for i in 0..31 { + // println!("H0 hash {} {:x}", i, as_digest(array_ref![out, i * 20, 20])); + // } + // // Print H1 hashes + // for i in 0..8 { + // println!( + // "H1 hash {} {:x}", + // i, + // as_digest(array_ref![out, 0x280 + i * 20, 20]) + // ); + // } + // // Print H2 hashes + // for i in 0..8 { + // println!( + // "H2 hash {} {:x}", + // i, + // as_digest(array_ref![out, 0x340 + i * 20, 20]) + // ); + // } + // } + + if self.encrypt { + // Re-encrypt hashes and data + let key = (&partition.partition_key).into(); + Aes128Cbc::new(key, &Block::from([0u8; 16])) + .encrypt_padded_mut::(&mut out[..HASHES_SIZE], HASHES_SIZE) + .expect("Failed to encrypt hashes"); + Aes128Cbc::new(key, &Block::from(*array_ref![out, 0x3d0, 16])) + .encrypt_padded_b2b_mut::(data, &mut out[HASHES_SIZE..]) + .expect("Failed to encrypt data"); + data_copied = true; + } + } + + if !data_copied { + // Copy decrypted data + array_ref_mut![out, 0x400, BLOCK_SIZE].copy_from_slice(data); + } + } + + self.group_data = out; + Ok(()) + } +} + +impl<'a> Read for WIAReadStream<'a> { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + let mut rem = buf.len(); + let mut read: usize = 0; + + // Special case: First 0x80 bytes are stored in the disc header + if self.offset < DISC_HEAD_SIZE as u64 { + let to_read = min(rem, DISC_HEAD_SIZE); + buf[read..read + to_read].copy_from_slice( + &self.disc_io.disc.disc_head[self.offset as usize..self.offset as usize + to_read], + ); + rem -= to_read; + read += to_read; + self.offset += to_read as u64; + } + + // Decompress groups and read data + while rem > 0 { + if !self.check_group()? { + break; + } + let group_offset = (self.offset - self.group_offset) as usize; + let to_read = min(rem, self.group_data.len() - group_offset); + buf[read..read + to_read] + .copy_from_slice(&self.group_data[group_offset..group_offset + to_read]); + rem -= to_read; + read += to_read; + self.offset += to_read as u64; + } + Ok(read) + } +} + +impl<'a> Seek for WIAReadStream<'a> { + fn seek(&mut self, pos: SeekFrom) -> io::Result { + self.offset = match pos { + SeekFrom::Start(v) => v, + SeekFrom::End(v) => (self.stable_stream_len()? as i64 + v) as u64, + SeekFrom::Current(v) => (self.offset as i64 + v) as u64, + }; + self.check_group()?; + Ok(self.offset) + } + + fn stream_position(&mut self) -> io::Result { Ok(self.offset) } +} + +impl<'a> ReadStream for WIAReadStream<'a> { + fn stable_stream_len(&mut self) -> io::Result { Ok(self.disc_io.header.iso_file_size) } + + fn as_dyn(&mut self) -> &mut dyn ReadStream { self } +} diff --git a/src/lib.rs b/src/lib.rs index 93822f7..c005e26 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,71 +1,93 @@ -#![warn(missing_docs)] -#![warn(rustdoc::missing_doc_code_examples)] +#![warn(missing_docs, rustdoc::missing_doc_code_examples)] //! Library for traversing & reading GameCube and Wii disc images. //! //! Based on the C++ library [nod](https://github.com/AxioDL/nod), //! but does not currently support authoring. //! //! Currently supported file formats: -//! - ISO +//! - ISO (GCM) +//! - WIA / RVZ +//! - WBFS //! - NFS (Wii U VC files, e.g. `hif_000000.nfs`) //! //! # Examples //! //! Opening a disc image and reading a file: //! ```no_run -//! use nod::disc::{new_disc_base, PartHeader}; -//! use nod::fst::NodeType; -//! use nod::io::new_disc_io; //! use std::io::Read; //! -//! let mut disc_io = new_disc_io("path/to/file".as_ref())?; -//! let disc_base = new_disc_base(disc_io.as_mut())?; -//! let mut partition = disc_base.get_data_partition(disc_io.as_mut(), false)?; -//! let header = partition.read_header()?; -//! if let Some(NodeType::File(node)) = header.find_node("/MP3/Worlds.txt") { -//! let mut s = String::new(); -//! partition.begin_file_stream(node)?.read_to_string(&mut s); -//! println!("{}", s); +//! use nod::{ +//! disc::{new_disc_base, PartHeader}, +//! fst::NodeType, +//! io::{new_disc_io, DiscIOOptions}, +//! }; +//! +//! fn main() -> nod::Result<()> { +//! let options = DiscIOOptions::default(); +//! let mut disc_io = new_disc_io("path/to/file.iso".as_ref(), &options)?; +//! let disc_base = new_disc_base(disc_io.as_mut())?; +//! let mut partition = disc_base.get_data_partition(disc_io.as_mut(), false)?; +//! let header = partition.read_header()?; +//! if let Some(NodeType::File(node)) = header.find_node("/MP3/Worlds.txt") { +//! let mut s = String::new(); +//! partition.begin_file_stream(node)?.read_to_string(&mut s).expect("Failed to read file"); +//! println!("{}", s); +//! } +//! Ok(()) //! } -//! # Ok::<(), nod::Error>(()) //! ``` -use thiserror::Error; - pub mod disc; pub mod fst; pub mod io; pub mod streams; +pub mod util; /// Error types for nod. -#[derive(Error, Debug)] +#[derive(thiserror::Error, Debug)] pub enum Error { - /// An error during binary format parsing. - #[error("binary format")] - BinaryFormat(#[from] binrw::Error), - /// An error during Wii disc decryption. - #[error("encryption")] - Encryption(#[from] block_modes::BlockModeError), - /// A general I/O error. - #[error("io error: `{0}`")] - Io(String, #[source] std::io::Error), /// An error for disc format related issues. - #[error("disc format error: `{0}`")] + #[error("disc format error: {0}")] DiscFormat(String), + /// A general I/O error. + #[error("I/O error: {0}")] + Io(String, #[source] std::io::Error), } -/// Helper result type for [`enum@Error`]. +/// Helper result type for [`Error`]. pub type Result = core::result::Result; -impl From for Error { - fn from(v: std::io::Error) -> Self { Error::Io("I/O error".to_string(), v) } +impl From for Error { + fn from(_: aes::cipher::block_padding::UnpadError) -> Self { unreachable!() } } -#[inline(always)] -pub(crate) fn div_rem + std::ops::Rem + Copy>( - x: T, - y: T, -) -> (T, T) { - let quot = x / y; - let rem = x % y; - (quot, rem) +impl From for Error { + fn from(_: base16ct::Error) -> Self { unreachable!() } +} + +pub trait ErrorContext { + fn context(self, context: impl Into) -> Error; +} + +impl ErrorContext for std::io::Error { + fn context(self, context: impl Into) -> Error { Error::Io(context.into(), self) } +} + +pub trait ResultContext { + fn context(self, context: impl Into) -> Result; + + fn with_context(self, f: F) -> Result + where F: FnOnce() -> String; +} + +impl ResultContext for Result +where E: ErrorContext +{ + fn context(self, context: impl Into) -> Result { + self.map_err(|e| e.context(context)) + } + + fn with_context(self, f: F) -> Result + where F: FnOnce() -> String { + self.map_err(|e| e.context(f())) + } } diff --git a/src/streams.rs b/src/streams.rs index 7618892..ca036a7 100644 --- a/src/streams.rs +++ b/src/streams.rs @@ -43,11 +43,7 @@ pub trait ReadStream: Read + Seek { /// Seeks underlying stream immediately. fn new_window(&mut self, offset: u64, size: u64) -> io::Result { self.seek(SeekFrom::Start(offset))?; - io::Result::Ok(SharedWindowedReadStream { - base: self.as_dyn(), - begin: offset, - end: offset + size, - }) + Ok(SharedWindowedReadStream { base: self.as_dyn(), begin: offset, end: offset + size }) } /// Retrieves a type-erased reference to the stream. @@ -91,7 +87,7 @@ pub fn wrap_windowed<'a>( size: u64, ) -> io::Result> { base.seek(SeekFrom::Start(offset))?; - io::Result::Ok(OwningWindowedReadStream { base, begin: offset, end: offset + size }) + Ok(OwningWindowedReadStream { base, begin: offset, end: offset + size }) } /// A non-owning window into an existing [`ReadStream`]. @@ -110,7 +106,7 @@ impl<'a> SharedWindowedReadStream<'a> { self.base.seek(SeekFrom::Start(begin))?; self.begin = begin; self.end = end; - io::Result::Ok(()) + Ok(()) } } @@ -137,9 +133,9 @@ fn windowed_seek(stream: &mut dyn WindowedReadStream, pos: SeekFrom) -> io::Resu SeekFrom::Current(_) => pos, })?; if result < begin || result > end { - io::Result::Err(io::Error::from(io::ErrorKind::UnexpectedEof)) + Err(io::Error::from(io::ErrorKind::UnexpectedEof)) } else { - io::Result::Ok(result - begin) + Ok(result - begin) } } @@ -151,12 +147,12 @@ impl<'a> Seek for OwningWindowedReadStream<'a> { fn seek(&mut self, pos: SeekFrom) -> io::Result { windowed_seek(self, pos) } fn stream_position(&mut self) -> io::Result { - Result::Ok(self.base.stream_position()? - self.begin) + Ok(self.base.stream_position()? - self.begin) } } impl<'a> ReadStream for OwningWindowedReadStream<'a> { - fn stable_stream_len(&mut self) -> io::Result { Result::Ok(self.end - self.begin) } + fn stable_stream_len(&mut self) -> io::Result { Ok(self.end - self.begin) } fn as_dyn(&mut self) -> &mut dyn ReadStream { self } } @@ -175,12 +171,12 @@ impl<'a> Seek for SharedWindowedReadStream<'a> { fn seek(&mut self, pos: SeekFrom) -> io::Result { windowed_seek(self, pos) } fn stream_position(&mut self) -> io::Result { - Result::Ok(self.base.stream_position()? - self.begin) + Ok(self.base.stream_position()? - self.begin) } } impl<'a> ReadStream for SharedWindowedReadStream<'a> { - fn stable_stream_len(&mut self) -> io::Result { Result::Ok(self.end - self.begin) } + fn stable_stream_len(&mut self) -> io::Result { Ok(self.end - self.begin) } fn as_dyn(&mut self) -> &mut dyn ReadStream { self } } diff --git a/src/util/lfg.rs b/src/util/lfg.rs new file mode 100644 index 0000000..df38ebf --- /dev/null +++ b/src/util/lfg.rs @@ -0,0 +1,77 @@ +use std::{cmp::min, io, io::Read}; + +pub(crate) const LFG_K: usize = 521; +pub(crate) const LFG_J: usize = 32; +pub(crate) const SEED_SIZE: usize = 17; + +/// Lagged Fibonacci generator for Wii partition junk data. +/// https://github.com/dolphin-emu/dolphin/blob/master/docs/WiaAndRvz.md#prng-algorithm +pub(crate) struct LaggedFibonacci { + buffer: [u32; LFG_K], + position: usize, +} + +impl Default for LaggedFibonacci { + fn default() -> Self { Self { buffer: [0u32; LFG_K], position: 0 } } +} + +impl LaggedFibonacci { + fn init(&mut self) { + for i in SEED_SIZE..LFG_K { + self.buffer[i] = + (self.buffer[i - 17] << 23) ^ (self.buffer[i - 16] >> 9) ^ self.buffer[i - 1]; + } + for x in self.buffer.iter_mut() { + *x = ((*x & 0xFF00FFFF) | (*x >> 2 & 0x00FF0000)).swap_bytes(); + } + for _ in 0..4 { + self.forward(); + } + } + + pub(crate) fn init_with_reader(&mut self, reader: &mut R) -> io::Result<()> + where R: Read + ?Sized { + reader.read_exact(bytemuck::cast_slice_mut(&mut self.buffer[..SEED_SIZE]))?; + for x in self.buffer[..SEED_SIZE].iter_mut() { + *x = u32::from_be(*x); + } + self.position = 0; + self.init(); + Ok(()) + } + + pub(crate) fn forward(&mut self) { + for i in 0..LFG_J { + self.buffer[i] ^= self.buffer[i + LFG_K - LFG_J]; + } + for i in LFG_J..LFG_K { + self.buffer[i] ^= self.buffer[i - LFG_J]; + } + } + + pub(crate) fn skip(&mut self, n: usize) { + self.position += n; + while self.position >= LFG_K * 4 { + self.forward(); + self.position -= LFG_K * 4; + } + } + + #[inline] + fn bytes(&self) -> &[u8; LFG_K * 4] { + unsafe { &*(self.buffer.as_ptr() as *const [u8; LFG_K * 4]) } + } + + pub(crate) fn fill(&mut self, mut buf: &mut [u8]) { + while !buf.is_empty() { + let len = min(buf.len(), LFG_K * 4 - self.position); + buf[..len].copy_from_slice(&self.bytes()[self.position..self.position + len]); + self.position += len; + buf = &mut buf[len..]; + if self.position == LFG_K * 4 { + self.forward(); + self.position = 0; + } + } + } +} diff --git a/src/util/mod.rs b/src/util/mod.rs new file mode 100644 index 0000000..ab1b635 --- /dev/null +++ b/src/util/mod.rs @@ -0,0 +1,13 @@ +use std::ops::{Div, Rem}; + +pub(crate) mod lfg; +pub(crate) mod reader; +pub(crate) mod take_seek; + +#[inline(always)] +pub(crate) fn div_rem(x: T, y: T) -> (T, T) +where T: Div + Rem + Copy { + let quot = x / y; + let rem = x % y; + (quot, rem) +} diff --git a/src/util/reader.rs b/src/util/reader.rs new file mode 100644 index 0000000..f26fff3 --- /dev/null +++ b/src/util/reader.rs @@ -0,0 +1,243 @@ +use std::{ffi::CString, io, io::Read}; + +use io::Write; + +pub(crate) const DYNAMIC_SIZE: usize = 0; + +pub(crate) const fn struct_size(fields: [usize; N]) -> usize { + let mut result = 0; + let mut i = 0; + while i < N { + let size = fields[i]; + if size == DYNAMIC_SIZE { + // Dynamically sized + return DYNAMIC_SIZE; + } + result += size; + i += 1; + } + result +} + +pub(crate) fn skip_bytes(reader: &mut R) -> io::Result<()> +where R: Read + ?Sized { + let mut buf = [0u8; N]; + reader.read_exact(&mut buf)?; + Ok(()) +} + +pub(crate) trait FromReader: Sized { + type Args<'a>; + + const STATIC_SIZE: usize; + + fn from_reader_args(reader: &mut R, args: Self::Args<'_>) -> io::Result + where R: Read + ?Sized; + + fn from_reader<'a, R>(reader: &mut R) -> io::Result + where + R: Read + ?Sized, + Self::Args<'a>: Default, + { + Self::from_reader_args(reader, Default::default()) + } +} + +macro_rules! impl_from_reader { + ($($t:ty),*) => { + $( + impl FromReader for $t { + type Args<'a> = (); + + const STATIC_SIZE: usize = std::mem::size_of::(); + + fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result where R: Read + ?Sized{ + let mut buf = [0u8; Self::STATIC_SIZE]; + reader.read_exact(&mut buf)?; + Ok(Self::from_be_bytes(buf)) + } + } + )* + }; +} + +impl_from_reader!(u8, u16, u32, u64, u128, i8, i16, i32, i64, i128); + +#[repr(transparent)] +pub struct U24(pub u32); + +impl FromReader for U24 { + type Args<'a> = (); + + const STATIC_SIZE: usize = 3; + + fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result + where R: Read + ?Sized { + let mut buf = [0u8; 4]; + reader.read_exact(&mut buf[1..])?; + Ok(U24(u32::from_be_bytes(buf))) + } +} + +impl FromReader for [u8; N] { + type Args<'a> = (); + + const STATIC_SIZE: usize = N; + + fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result + where R: Read + ?Sized { + let mut buf = [0u8; N]; + reader.read_exact(&mut buf)?; + Ok(buf) + } +} + +impl FromReader for [u32; N] { + type Args<'a> = (); + + const STATIC_SIZE: usize = N * u32::STATIC_SIZE; + + fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result + where R: Read + ?Sized { + let mut buf = [0u32; N]; + reader.read_exact(unsafe { + std::slice::from_raw_parts_mut(buf.as_mut_ptr() as *mut u8, Self::STATIC_SIZE) + })?; + for x in buf.iter_mut() { + *x = u32::from_be(*x); + } + Ok(buf) + } +} + +impl FromReader for CString { + type Args<'a> = (); + + const STATIC_SIZE: usize = DYNAMIC_SIZE; + + fn from_reader_args(reader: &mut R, _args: Self::Args<'_>) -> io::Result + where R: Read + ?Sized { + let mut buf = Vec::new(); + loop { + let mut byte = [0u8; 1]; + reader.read_exact(&mut byte)?; + buf.push(byte[0]); + if byte[0] == 0 { + break; + } + } + Ok(unsafe { CString::from_vec_with_nul_unchecked(buf) }) + } +} + +pub(crate) fn read_bytes(reader: &mut R, count: usize) -> io::Result> +where R: Read + ?Sized { + let mut buf = vec![0u8; count]; + reader.read_exact(&mut buf)?; + Ok(buf) +} + +pub(crate) fn read_vec<'a, T, R>(reader: &mut R, count: usize) -> io::Result> +where + T: FromReader, + R: Read + ?Sized, + ::Args<'a>: Default, +{ + let mut vec = Vec::with_capacity(count); + if T::STATIC_SIZE != DYNAMIC_SIZE { + // Read the entire buffer at once + let buf = read_bytes(reader, T::STATIC_SIZE * count)?; + let mut slice = buf.as_slice(); + for _ in 0..count { + vec.push(T::from_reader(&mut slice)?); + } + } else { + for _ in 0..count { + vec.push(T::from_reader(reader)?); + } + } + Ok(vec) +} + +pub(crate) trait ToWriter: Sized { + fn to_writer(&self, writer: &mut W) -> io::Result<()> + where W: Write + ?Sized; + + fn to_bytes(&self) -> io::Result> { + let mut buf = vec![0u8; self.write_size()]; + self.to_writer(&mut buf.as_mut_slice())?; + Ok(buf) + } + + fn write_size(&self) -> usize; +} + +macro_rules! impl_to_writer { + ($($t:ty),*) => { + $( + impl ToWriter for $t { + fn to_writer(&self, writer: &mut W) -> io::Result<()> + where W: Write + ?Sized { + writer.write_all(&self.to_be_bytes()) + } + + fn to_bytes(&self) -> io::Result> { + Ok(self.to_be_bytes().to_vec()) + } + + fn write_size(&self) -> usize { + std::mem::size_of::() + } + } + )* + }; +} + +impl_to_writer!(u8, u16, u32, u64, u128, i8, i16, i32, i64, i128); + +impl ToWriter for U24 { + fn to_writer(&self, writer: &mut W) -> io::Result<()> + where W: Write + ?Sized { + writer.write_all(&self.0.to_be_bytes()[1..]) + } + + fn write_size(&self) -> usize { 3 } +} + +impl ToWriter for [u8; N] { + fn to_writer(&self, writer: &mut W) -> io::Result<()> + where W: Write + ?Sized { + writer.write_all(self) + } + + fn write_size(&self) -> usize { N } +} + +impl ToWriter for &[u8] { + fn to_writer(&self, writer: &mut W) -> io::Result<()> + where W: Write + ?Sized { + writer.write_all(self) + } + + fn write_size(&self) -> usize { self.len() } +} + +impl ToWriter for Vec { + fn to_writer(&self, writer: &mut W) -> io::Result<()> + where W: Write + ?Sized { + writer.write_all(self) + } + + fn write_size(&self) -> usize { self.len() } +} + +pub(crate) fn write_vec(writer: &mut W, vec: &[T]) -> io::Result<()> +where + T: ToWriter, + W: Write + ?Sized, +{ + for item in vec { + item.to_writer(writer)?; + } + Ok(()) +} diff --git a/src/util/take_seek.rs b/src/util/take_seek.rs new file mode 100644 index 0000000..1f3710c --- /dev/null +++ b/src/util/take_seek.rs @@ -0,0 +1,127 @@ +// From https://github.com/jam1garner/binrw/blob/e96a1320287ec83d1f471525ffa380800ec9e124/binrw/src/io/take_seek.rs +// MIT License +// +// Copyright (c) jam1garner and other contributors +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. +#![allow(dead_code)] +//! Types for seekable reader adapters which limit the number of bytes read from +//! the underlying reader. + +use std::io::{Read, Result, Seek, SeekFrom}; + +/// Read adapter which limits the bytes read from an underlying reader, with +/// seek support. +/// +/// This struct is generally created by importing the [`TakeSeekExt`] extension +/// and calling [`take_seek`] on a reader. +/// +/// [`take_seek`]: TakeSeekExt::take_seek +#[derive(Debug)] +pub struct TakeSeek { + inner: T, + pos: u64, + end: u64, +} + +impl TakeSeek { + /// Gets a reference to the underlying reader. + pub fn get_ref(&self) -> &T { &self.inner } + + /// Gets a mutable reference to the underlying reader. + /// + /// Care should be taken to avoid modifying the internal I/O state of the + /// underlying reader as doing so may corrupt the internal limit of this + /// `TakeSeek`. + pub fn get_mut(&mut self) -> &mut T { &mut self.inner } + + /// Consumes this wrapper, returning the wrapped value. + pub fn into_inner(self) -> T { self.inner } + + /// Returns the number of bytes that can be read before this instance will + /// return EOF. + /// + /// # Note + /// + /// This instance may reach EOF after reading fewer bytes than indicated by + /// this method if the underlying [`Read`] instance reaches EOF. + pub fn limit(&self) -> u64 { self.end.saturating_sub(self.pos) } +} + +impl TakeSeek { + /// Sets the number of bytes that can be read before this instance will + /// return EOF. This is the same as constructing a new `TakeSeek` instance, + /// so the amount of bytes read and the previous limit value don’t matter + /// when calling this method. + /// + /// # Panics + /// + /// Panics if the inner stream returns an error from `stream_position`. + pub fn set_limit(&mut self, limit: u64) { + let pos = self.inner.stream_position().expect("cannot get position for `set_limit`"); + self.pos = pos; + self.end = pos + limit; + } +} + +impl Read for TakeSeek { + fn read(&mut self, buf: &mut [u8]) -> Result { + let limit = self.limit(); + + // Don't call into inner reader at all at EOF because it may still block + if limit == 0 { + return Ok(0); + } + + // Lint: It is impossible for this cast to truncate because the value + // being cast is the minimum of two values, and one of the value types + // is already `usize`. + #[allow(clippy::cast_possible_truncation)] + let max = (buf.len() as u64).min(limit) as usize; + let n = self.inner.read(&mut buf[0..max])?; + self.pos += n as u64; + Ok(n) + } +} + +impl Seek for TakeSeek { + fn seek(&mut self, pos: SeekFrom) -> Result { + self.pos = self.inner.seek(pos)?; + Ok(self.pos) + } + + fn stream_position(&mut self) -> Result { Ok(self.pos) } +} + +/// An extension trait that implements `take_seek()` for compatible streams. +pub trait TakeSeekExt { + /// Creates an adapter which will read at most `limit` bytes from the + /// wrapped stream. + fn take_seek(self, limit: u64) -> TakeSeek + where Self: Sized; +} + +impl TakeSeekExt for T { + fn take_seek(mut self, limit: u64) -> TakeSeek + where Self: Sized { + let pos = self.stream_position().expect("cannot get position for `take_seek`"); + + TakeSeek { inner: self, pos, end: pos + limit } + } +}