WIP WIA/RVZ & more

This commit is contained in:
Luke Street 2024-02-02 16:17:35 -07:00
parent 97c726c209
commit 4f794f06cb
19 changed files with 3507 additions and 590 deletions

View File

@ -2,59 +2,174 @@ name: Build
on: [ push, pull_request ] on: [ push, pull_request ]
env:
BUILD_PROFILE: release-lto
CARGO_BIN_NAME: nodtool
CARGO_TARGET_DIR: target
jobs: jobs:
check: check:
name: Check name: Check
runs-on: ubuntu-latest runs-on: ubuntu-latest
strategy: strategy:
matrix: matrix:
toolchain: [ stable, 1.51.0, nightly ] toolchain: [ stable, 1.56.0, nightly ]
env: env:
RUSTFLAGS: -D warnings RUSTFLAGS: -D warnings
steps: steps:
- uses: actions/checkout@v2 - name: Checkout
- uses: actions-rs/toolchain@v1 uses: actions/checkout@v3
- name: Setup Rust toolchain
uses: dtolnay/rust-toolchain@master
with: with:
profile: minimal
toolchain: ${{ matrix.toolchain }} toolchain: ${{ matrix.toolchain }}
override: true
components: rustfmt, clippy components: rustfmt, clippy
- name: Cargo check
run: cargo check --all-features --all-targets
- name: Cargo clippy
run: cargo clippy --all-features --all-targets
fmt:
name: Format
runs-on: ubuntu-latest
env:
RUSTFLAGS: -D warnings
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Setup Rust toolchain
# We use nightly options in rustfmt.toml
uses: dtolnay/rust-toolchain@nightly
with:
components: rustfmt
- name: Cargo fmt
run: cargo fmt --all --check
deny:
name: Deny
runs-on: ubuntu-latest
strategy:
matrix:
checks:
- advisories
- bans licenses sources
# Prevent new advisories from failing CI
continue-on-error: ${{ matrix.checks == 'advisories' }}
steps:
- uses: actions/checkout@v3
- uses: EmbarkStudios/cargo-deny-action@v1 - uses: EmbarkStudios/cargo-deny-action@v1
- uses: actions-rs/cargo@v1
with: with:
command: check command: check ${{ matrix.checks }}
args: --all-features
- uses: actions-rs/cargo@v1 test:
with: name: Test
command: clippy strategy:
args: --all-features matrix:
platform: [ ubuntu-latest, windows-latest, macos-latest ]
fail-fast: false
runs-on: ${{ matrix.platform }}
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Setup Rust toolchain
uses: dtolnay/rust-toolchain@stable
- name: Cargo test
run: cargo test --release --all-features
build: build:
name: Build name: Build
strategy: strategy:
matrix: matrix:
platform: [ ubuntu-latest, macos-latest, windows-latest ] include:
toolchain: [ stable, 1.51.0, nightly ] - platform: ubuntu-latest
target: x86_64-unknown-linux-musl
name: linux-x86_64
build: zigbuild
features: asm
- platform: ubuntu-latest
target: i686-unknown-linux-musl
name: linux-i686
build: zigbuild
features: asm
- platform: ubuntu-latest
target: aarch64-unknown-linux-musl
name: linux-aarch64
build: zigbuild
features: nightly
- platform: ubuntu-latest
target: armv7-unknown-linux-musleabi
name: linux-armv7l
build: zigbuild
features: default
- platform: windows-latest
target: x86_64-pc-windows-msvc
name: windows-x86_64
build: build
features: asm
- platform: windows-latest
target: aarch64-pc-windows-msvc
name: windows-arm64
build: build
features: nightly
- platform: macos-latest
target: x86_64-apple-darwin
name: macos-x86_64
build: build
features: asm
- platform: macos-latest
target: aarch64-apple-darwin
name: macos-arm64
build: build
features: nightly
fail-fast: false fail-fast: false
runs-on: ${{ matrix.platform }} runs-on: ${{ matrix.platform }}
steps: steps:
- uses: actions/checkout@v2 - name: Checkout
- uses: actions-rs/toolchain@v1 uses: actions/checkout@v3
- name: Install dependencies
if: matrix.packages != ''
run: |
sudo apt-get -y update
sudo apt-get -y install ${{ matrix.packages }}
- name: Install cargo-zigbuild
if: matrix.build == 'zigbuild'
run: pip install ziglang==0.11.0 cargo-zigbuild==0.18.3
- name: Setup Rust toolchain
uses: dtolnay/rust-toolchain@nightly
with: with:
profile: minimal targets: ${{ matrix.target }}
toolchain: ${{ matrix.toolchain }} - name: Cargo build
override: true run: cargo ${{ matrix.build }} --profile ${{ env.BUILD_PROFILE }} --target ${{ matrix.target }} --bin ${{ env.CARGO_BIN_NAME }} --features ${{ matrix.features }}
- uses: actions-rs/cargo@v1 - name: Upload artifacts
uses: actions/upload-artifact@v3
with: with:
command: test name: ${{ matrix.name }}
args: --release --all-features
- uses: actions-rs/cargo@v1
with:
command: build
args: --release --all-features
- uses: actions/upload-artifact@v2
with:
name: ${{ matrix.platform }}-${{ matrix.toolchain }}
path: | path: |
target/release/nodtool ${{ env.CARGO_TARGET_DIR }}/${{ env.BUILD_PROFILE }}/${{ env.CARGO_BIN_NAME }}
target/release/nodtool.exe ${{ env.CARGO_TARGET_DIR }}/${{ env.BUILD_PROFILE }}/${{ env.CARGO_BIN_NAME }}.exe
${{ env.CARGO_TARGET_DIR }}/${{ matrix.target }}/${{ env.BUILD_PROFILE }}/${{ env.CARGO_BIN_NAME }}
${{ env.CARGO_TARGET_DIR }}/${{ matrix.target }}/${{ env.BUILD_PROFILE }}/${{ env.CARGO_BIN_NAME }}.exe
if-no-files-found: error
release:
name: Release
if: startsWith(github.ref, 'refs/tags/')
runs-on: ubuntu-latest
needs: [ build ]
steps:
- name: Download artifacts
uses: actions/download-artifact@v3
with:
path: artifacts
- name: Rename artifacts
working-directory: artifacts
run: |
mkdir ../out
for i in */*/$BUILD_PROFILE/$CARGO_BIN_NAME*; do
mv "$i" "../out/$(sed -E "s/([^/]+)\/[^/]+\/$BUILD_PROFILE\/($CARGO_BIN_NAME)/\2-\1/" <<< "$i")"
done
ls -R ../out
- name: Release
uses: softprops/action-gh-release@v1
with:
files: out/*

View File

@ -1,7 +1,8 @@
[package] [package]
name = "nod" name = "nod"
version = "0.1.2" version = "0.2.0"
edition = "2018" edition = "2021"
rust-version = "1.56.0"
authors = ["Luke Street <luke@street.dev>"] authors = ["Luke Street <luke@street.dev>"]
license = "MIT OR Apache-2.0" license = "MIT OR Apache-2.0"
repository = "https://github.com/encounter/nod-rs" repository = "https://github.com/encounter/nod-rs"
@ -17,16 +18,33 @@ categories = ["command-line-utilities", "parser-implementations"]
name = "nodtool" name = "nodtool"
path = "src/bin.rs" path = "src/bin.rs"
[profile.release] [profile.release-lto]
lto = true inherits = "release"
lto = "thin"
strip = "debuginfo"
[features]
default = ["compress-bzip2", "compress-zstd"] #, "compress-lzma"
asm = ["md-5/asm", "sha1/asm"]
compress-bzip2 = ["bzip2"]
compress-zstd = ["zstd"]
#compress-lzma = ["xz2"]
nightly = ["crc32fast/nightly"]
[dependencies] [dependencies]
aes = "0.7.5" aes = "0.8.3"
anyhow = "1.0.53" argh = "0.1.12"
binrw = "0.8.4" argh_derive = "0.1.12"
block-modes = "0.8.1" base16ct = "0.2.0"
clap = "2.34.0" binrw = "0.13.3"
encoding_rs = "0.8.30" bytemuck = "1.14.1"
bzip2 = { version = "0.4.4", optional = true }
cbc = "0.1.2"
crc32fast = "1.3.2"
encoding_rs = "0.8.33"
file-size = "1.0.3" file-size = "1.0.3"
sha-1 = "0.10.0" md-5 = "0.10.6"
thiserror = "1.0.30" sha1 = "0.10.6"
thiserror = "1.0.56"
xz2 = { version = "0.1.7", optional = true }
zstd = { version = "0.13.0", optional = true }

View File

@ -14,7 +14,9 @@ Based on the C++ library [nod](https://github.com/AxioDL/nod),
but does not currently support authoring. but does not currently support authoring.
Currently supported file formats: Currently supported file formats:
- ISO - ISO (GCM)
- WIA / RVZ
- WBFS
- NFS (Wii U VC files, e.g. `hif_000000.nfs`) - NFS (Wii U VC files, e.g. `hif_000000.nfs`)
### CLI tool ### CLI tool
@ -34,20 +36,28 @@ nodtool extract /path/to/game/content/hif_000000.nfs [outdir]
### Library example ### Library example
Opening a disc image and reading a file: Opening a disc image and reading a file:
```rust ```rust
use nod::disc::{new_disc_base, PartHeader};
use nod::fst::NodeType;
use nod::io::new_disc_io;
use std::io::Read; use std::io::Read;
let mut disc_io = new_disc_io("path/to/file".as_ref())?; use nod::{
let disc_base = new_disc_base(disc_io.as_mut())?; disc::{new_disc_base, PartHeader},
let mut partition = disc_base.get_data_partition(disc_io.as_mut())?; fst::NodeType,
let header = partition.read_header()?; io::{new_disc_io, DiscIOOptions},
if let Some(NodeType::File(node)) = header.find_node("/MP3/Worlds.txt") { };
fn main() -> nod::Result<()> {
let options = DiscIOOptions::default();
let mut disc_io = new_disc_io("path/to/file.iso".as_ref(), &options)?;
let disc_base = new_disc_base(disc_io.as_mut())?;
let mut partition = disc_base.get_data_partition(disc_io.as_mut(), false)?;
let header = partition.read_header()?;
if let Some(NodeType::File(node)) = header.find_node("/MP3/Worlds.txt") {
let mut s = String::new(); let mut s = String::new();
partition.begin_file_stream(node)?.read_to_string(&mut s); partition.begin_file_stream(node)?.read_to_string(&mut s).expect("Failed to read file");
println!(s); println!("{}", s);
}
Ok(())
} }
``` ```

View File

@ -6,3 +6,4 @@ reorder_impl_items = true
use_field_init_shorthand = true use_field_init_shorthand = true
use_small_heuristics = "Max" use_small_heuristics = "Max"
where_single_line = true where_single_line = true
format_code_in_doc_comments = true

View File

@ -1,74 +1,205 @@
use std::{ use std::{
env, fs, io, error::Error,
io::BufWriter, fs,
fs::File,
io,
io::{BufWriter, Write},
path::{Path, PathBuf}, path::{Path, PathBuf},
}; };
use clap::{clap_app, AppSettings}; use argh_derive::FromArgs;
use nod::{ use nod::{
disc::{new_disc_base, PartReadStream}, disc::{new_disc_base, PartHeader, PartReadStream, PartitionType},
fst::NodeType, fst::NodeType,
io::{has_extension, new_disc_io}, io::{has_extension, new_disc_io, DiscIOOptions},
Result, Result, ResultContext,
}; };
use sha1::Digest;
fn main() -> Result<()> { #[derive(FromArgs, Debug)]
let matches = clap_app!(nodtool => /// Tool for reading GameCube and Wii disc images.
(settings: &[ struct TopLevel {
AppSettings::SubcommandRequiredElseHelp, #[argh(subcommand)]
AppSettings::GlobalVersion, command: SubCommand,
AppSettings::DeriveDisplayOrder, }
AppSettings::VersionlessSubcommands,
])
(global_settings: &[
AppSettings::ColoredHelp,
AppSettings::UnifiedHelpMessage,
])
(version: env!("CARGO_PKG_VERSION"))
(author: "Luke Street <luke@street.dev>")
(about: "Tool for reading GameCube and Wii disc images.")
(long_about: "Tool for reading GameCube and Wii disc images.
Based on <https://github.com/AxioDL/nod>, original authors: #[derive(FromArgs, Debug)]
Jack Andersen (jackoalan) #[argh(subcommand)]
Phillip Stephens (Antidote)") enum SubCommand {
(@subcommand extract => Extract(ExtractArgs),
(about: "Extract GameCube & Wii disc images") Convert(ConvertArgs),
(@arg FILE: +required "Path to disc image (ISO or NFS)") Verify(VerifyArgs),
(@arg DIR: "Output directory (optional)") }
(@arg quiet: -q "Quiet output")
(@arg validate: -h "Validate disc hashes (Wii only)") #[derive(FromArgs, Debug)]
/// Extract a disc image.
#[argh(subcommand, name = "extract")]
struct ExtractArgs {
#[argh(positional)]
/// path to disc image (ISO or NFS)
file: PathBuf,
#[argh(positional)]
/// output directory (optional)
out: Option<PathBuf>,
#[argh(switch, short = 'q')]
/// quiet output
quiet: bool,
#[argh(switch, short = 'h')]
/// validate disc hashes (Wii only)
validate: bool,
}
#[derive(FromArgs, Debug)]
/// Extract a disc image.
#[argh(subcommand, name = "convert")]
struct ConvertArgs {
#[argh(positional)]
/// path to disc image
file: PathBuf,
#[argh(positional)]
/// output ISO file
out: PathBuf,
}
#[derive(FromArgs, Debug)]
/// Verifies a disc image.
#[argh(subcommand, name = "verify")]
struct VerifyArgs {
#[argh(positional)]
/// path to disc image
file: PathBuf,
}
fn main() {
let args: TopLevel = argh::from_env();
let result = match args.command {
SubCommand::Convert(c_args) => convert(c_args),
SubCommand::Extract(c_args) => extract(c_args),
SubCommand::Verify(c_args) => verify(c_args),
};
if let Err(e) = result {
eprintln!("Failed: {}", e);
if let Some(source) = e.source() {
eprintln!("Caused by: {}", source);
}
std::process::exit(1);
}
}
fn convert(args: ConvertArgs) -> Result<()> { convert_and_verify(&args.file, Some(&args.out)) }
fn verify(args: VerifyArgs) -> Result<()> { convert_and_verify(&args.file, None) }
fn convert_and_verify(in_file: &Path, out_file: Option<&Path>) -> Result<()> {
println!("Loading {}", in_file.display());
let mut disc_io = new_disc_io(in_file, &DiscIOOptions { rebuild_hashes: true })?;
let disc_base = new_disc_base(disc_io.as_mut())?;
let header = disc_base.get_header();
println!(
"\nGame ID: {}{}{}{}{}{}",
header.game_id[0] as char,
header.game_id[1] as char,
header.game_id[2] as char,
header.game_id[3] as char,
header.game_id[4] as char,
header.game_id[5] as char
);
println!("Game title: {}", header.game_title);
println!("Disc num: {}", header.disc_num);
println!("Disc version: {}", header.disc_version);
let mut stream = disc_io.begin_read_stream(0).context("Creating disc read stream")?;
let mut crc = crc32fast::Hasher::new();
let mut md5 = md5::Md5::new();
let mut sha1 = sha1::Sha1::new();
let mut file = if let Some(out_file) = out_file {
Some(
File::create(out_file)
.with_context(|| format!("Creating file {}", out_file.display()))?,
) )
) } else {
.get_matches(); None
if let Some(matches) = matches.subcommand_matches("extract") { };
let file: PathBuf = PathBuf::from(matches.value_of("FILE").unwrap());
const BUFFER_SIZE: usize = 1015808; // LCM(0x8000, 0x7C00)
let mut buf = vec![0u8; BUFFER_SIZE];
let mut total_read = 0u64;
loop {
let read = stream.read(&mut buf).with_context(|| {
format!("Reading {} bytes at disc offset {}", BUFFER_SIZE, total_read)
})?;
if read == 0 {
break;
}
let slice = &buf[..read];
crc.update(slice);
md5.update(slice);
sha1.update(slice);
if let Some(file) = &mut file {
file.write_all(slice).with_context(|| {
format!("Writing {} bytes at offset {}", slice.len(), total_read)
})?;
}
total_read += read as u64;
}
println!();
println!("CRC32: {:08x}", crc.finalize());
println!("MD5: {:032x}", md5.finalize());
println!("SHA-1: {:040x}", sha1.finalize());
if let (Some(path), Some(file)) = (out_file, &mut file) {
file.flush().context("Flushing output file")?;
println!("Wrote {} to {}", file_size::fit_4(total_read), path.display());
}
Ok(())
}
fn extract(args: ExtractArgs) -> Result<()> {
let output_dir: PathBuf; let output_dir: PathBuf;
if let Some(dir) = matches.value_of("DIR") { if let Some(dir) = args.out {
output_dir = PathBuf::from(dir); output_dir = dir;
} else if has_extension(file.as_path(), "nfs") { } else if has_extension(&args.file, "nfs") {
// Special logic to extract from content/hif_*.nfs to extracted/.. // Special logic to extract from content/hif_*.nfs to extracted/..
if let Some(parent) = file.parent() { if let Some(parent) = args.file.parent() {
output_dir = parent.with_file_name("extracted"); output_dir = parent.with_file_name("extracted");
} else { } else {
output_dir = file.with_extension(""); output_dir = args.file.with_extension("");
} }
} else { } else {
output_dir = file.with_extension(""); output_dir = args.file.with_extension("");
} }
let mut disc_io = new_disc_io(file.as_path())?; let mut disc_io = new_disc_io(&args.file, &DiscIOOptions { rebuild_hashes: args.validate })?;
let disc_base = new_disc_base(disc_io.as_mut())?; let disc_base = new_disc_base(disc_io.as_mut())?;
let mut partition = let mut partition =
disc_base.get_data_partition(disc_io.as_mut(), matches.is_present("validate"))?; disc_base.get_partition(disc_io.as_mut(), PartitionType::Data, args.validate)?;
let header = partition.read_header()?; let header = partition.read_header()?;
extract_node( extract_sys_files(header.as_ref(), &output_dir.join("sys"), args.quiet)?;
header.root_node(), extract_node(header.root_node(), partition.as_mut(), &output_dir.join("files"), args.quiet)?;
partition.as_mut(), Ok(())
output_dir.as_path(), }
matches.is_present("quiet"),
)?; fn extract_sys_files(header: &dyn PartHeader, out_dir: &Path, quiet: bool) -> Result<()> {
fs::create_dir_all(out_dir)
.with_context(|| format!("Creating output directory {}", out_dir.display()))?;
extract_file(header.boot_bytes(), &out_dir.join("boot.bin"), quiet)?;
extract_file(header.bi2_bytes(), &out_dir.join("bi2.bin"), quiet)?;
extract_file(header.apploader_bytes(), &out_dir.join("apploader.img"), quiet)?;
extract_file(header.fst_bytes(), &out_dir.join("fst.bin"), quiet)?;
extract_file(header.dol_bytes(), &out_dir.join("main.dol"), quiet)?;
Ok(())
}
fn extract_file(bytes: &[u8], out_path: &Path, quiet: bool) -> Result<()> {
if !quiet {
println!(
"Extracting {} (size: {})",
out_path.display(),
file_size::fit_4(bytes.len() as u64)
);
} }
Result::Ok(()) fs::write(out_path, bytes).with_context(|| format!("Writing file {}", out_path.display()))?;
Ok(())
} }
fn extract_node( fn extract_node(
@ -76,37 +207,49 @@ fn extract_node(
partition: &mut dyn PartReadStream, partition: &mut dyn PartReadStream,
base_path: &Path, base_path: &Path,
quiet: bool, quiet: bool,
) -> io::Result<()> { ) -> Result<()> {
match node { match node {
NodeType::File(v) => { NodeType::File(v) => {
let mut file_path = base_path.to_owned(); let mut file_path = base_path.to_path_buf();
file_path.push(v.name.as_ref()); file_path.push(v.name.as_str());
if !quiet { if !quiet {
println!( println!(
"Extracting {} (size: {})", "Extracting {} (size: {})",
file_path.to_string_lossy(), file_path.display(),
file_size::fit_4(v.length as u64) file_size::fit_4(v.length as u64)
); );
} }
let file = fs::File::create(file_path)?; let file = File::create(&file_path)
.with_context(|| format!("Creating file {}", file_path.display()))?;
let mut buf_writer = BufWriter::with_capacity(partition.ideal_buffer_size(), file); let mut buf_writer = BufWriter::with_capacity(partition.ideal_buffer_size(), file);
io::copy(&mut partition.begin_file_stream(v)?, &mut buf_writer)?; let mut stream = partition.begin_file_stream(v).with_context(|| {
format!(
"Opening file {} on disc for reading (offset {}, size {})",
v.name, v.offset, v.length
)
})?;
io::copy(&mut stream, &mut buf_writer)
.with_context(|| format!("Extracting file {}", file_path.display()))?;
buf_writer.flush().with_context(|| format!("Flushing file {}", file_path.display()))?;
} }
NodeType::Directory(v, c) => { NodeType::Directory(v, c) => {
if v.name.is_empty() { if v.name.is_empty() {
fs::create_dir_all(base_path)?; fs::create_dir_all(base_path).with_context(|| {
format!("Creating output directory {}", base_path.display())
})?;
for x in c { for x in c {
extract_node(x, partition, base_path, quiet)?; extract_node(x, partition, base_path, quiet)?;
} }
} else { } else {
let mut new_base = base_path.to_owned(); let mut new_base = base_path.to_path_buf();
new_base.push(v.name.as_ref()); new_base.push(v.name.as_str());
fs::create_dir_all(&new_base)?; fs::create_dir_all(&new_base)
.with_context(|| format!("Creating output directory {}", new_base.display()))?;
for x in c { for x in c {
extract_node(x, partition, new_base.as_path(), quiet)?; extract_node(x, partition, new_base.as_path(), quiet)?;
} }
} }
} }
} }
io::Result::Ok(()) Ok(())
} }

View File

@ -1,16 +1,20 @@
use std::{ use std::{
io, io,
io::{Read, Seek, SeekFrom}, io::{Cursor, Read, Seek, SeekFrom},
}; };
use binrw::{BinRead, BinReaderExt};
use crate::{ use crate::{
disc::{BI2Header, DiscBase, DiscIO, Header, PartHeader, PartReadStream, BUFFER_SIZE}, disc::{
div_rem, AppLoaderHeader, DiscBase, DiscIO, DolHeader, Header, PartHeader, PartReadStream,
fst::{find_node, node_parser, Node, NodeKind, NodeType}, PartitionHeader, PartitionType, SECTOR_SIZE,
},
fst::{find_node, read_fst, Node, NodeKind, NodeType},
streams::{ReadStream, SharedWindowedReadStream}, streams::{ReadStream, SharedWindowedReadStream},
Result, util::{
div_rem,
reader::{read_bytes, FromReader},
},
Error, Result, ResultContext,
}; };
pub(crate) struct DiscGCN { pub(crate) struct DiscGCN {
@ -18,7 +22,7 @@ pub(crate) struct DiscGCN {
} }
impl DiscGCN { impl DiscGCN {
pub(crate) fn new(header: Header) -> Result<DiscGCN> { Result::Ok(DiscGCN { header }) } pub(crate) fn new(header: Header) -> Result<DiscGCN> { Ok(DiscGCN { header }) }
} }
impl DiscBase for DiscGCN { impl DiscBase for DiscGCN {
@ -29,37 +33,62 @@ impl DiscBase for DiscGCN {
disc_io: &'a mut dyn DiscIO, disc_io: &'a mut dyn DiscIO,
_validate_hashes: bool, _validate_hashes: bool,
) -> Result<Box<dyn PartReadStream + 'a>> { ) -> Result<Box<dyn PartReadStream + 'a>> {
Result::Ok(Box::from(GCPartReadStream { let stream = disc_io.begin_read_stream(0).context("Opening data partition stream")?;
stream: disc_io.begin_read_stream(0)?, Ok(Box::from(GCPartReadStream {
stream,
offset: 0, offset: 0,
cur_block: u64::MAX, cur_block: u32::MAX,
buf: [0; BUFFER_SIZE], buf: [0; SECTOR_SIZE],
})) }))
} }
fn get_partition<'a>(
&self,
disc_io: &'a mut dyn DiscIO,
part_type: PartitionType,
_validate_hashes: bool,
) -> Result<Box<dyn PartReadStream + 'a>> {
if part_type == PartitionType::Data {
Ok(Box::from(GCPartReadStream {
stream: disc_io.begin_read_stream(0).context("Opening partition read stream")?,
offset: 0,
cur_block: u32::MAX,
buf: [0; SECTOR_SIZE],
}))
} else {
Err(Error::DiscFormat(format!(
"Invalid partition type {:?} for GameCube disc",
part_type
)))
}
}
} }
struct GCPartReadStream<'a> { struct GCPartReadStream<'a> {
stream: Box<dyn ReadStream + 'a>, stream: Box<dyn ReadStream + 'a>,
offset: u64, offset: u64,
cur_block: u64, cur_block: u32,
buf: [u8; BUFFER_SIZE], buf: [u8; SECTOR_SIZE],
} }
impl<'a> Read for GCPartReadStream<'a> { impl<'a> Read for GCPartReadStream<'a> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let (mut block, mut block_offset) = div_rem(self.offset as usize, BUFFER_SIZE); let (block, block_offset) = div_rem(self.offset, SECTOR_SIZE as u64);
let mut block = block as u32;
let mut block_offset = block_offset as usize;
let mut rem = buf.len(); let mut rem = buf.len();
let mut read: usize = 0; let mut read: usize = 0;
while rem > 0 { while rem > 0 {
if block != self.cur_block as usize { if block != self.cur_block {
self.stream.read_exact(&mut self.buf)?; self.stream.read_exact(&mut self.buf)?;
self.cur_block = block as u64; self.cur_block = block;
} }
let mut cache_size = rem; let mut cache_size = rem;
if cache_size + block_offset > BUFFER_SIZE { if cache_size + block_offset > SECTOR_SIZE {
cache_size = BUFFER_SIZE - block_offset; cache_size = SECTOR_SIZE - block_offset;
} }
buf[read..read + cache_size] buf[read..read + cache_size]
@ -71,7 +100,7 @@ impl<'a> Read for GCPartReadStream<'a> {
} }
self.offset += buf.len() as u64; self.offset += buf.len() as u64;
io::Result::Ok(buf.len()) Ok(buf.len())
} }
} }
@ -82,15 +111,15 @@ impl<'a> Seek for GCPartReadStream<'a> {
SeekFrom::End(v) => (self.stable_stream_len()? as i64 + v) as u64, SeekFrom::End(v) => (self.stable_stream_len()? as i64 + v) as u64,
SeekFrom::Current(v) => (self.offset as i64 + v) as u64, SeekFrom::Current(v) => (self.offset as i64 + v) as u64,
}; };
let block = self.offset / BUFFER_SIZE as u64; let block = self.offset / SECTOR_SIZE as u64;
if block != self.cur_block { if block as u32 != self.cur_block {
self.stream.seek(SeekFrom::Start(block * BUFFER_SIZE as u64))?; self.stream.seek(SeekFrom::Start(block * SECTOR_SIZE as u64))?;
self.cur_block = u64::MAX; self.cur_block = u32::MAX;
} }
io::Result::Ok(self.offset) Ok(self.offset)
} }
fn stream_position(&mut self) -> io::Result<u64> { io::Result::Ok(self.offset) } fn stream_position(&mut self) -> io::Result<u64> { Ok(self.offset) }
} }
impl<'a> ReadStream for GCPartReadStream<'a> { impl<'a> ReadStream for GCPartReadStream<'a> {
@ -102,28 +131,129 @@ impl<'a> ReadStream for GCPartReadStream<'a> {
impl<'a> PartReadStream for GCPartReadStream<'a> { impl<'a> PartReadStream for GCPartReadStream<'a> {
fn begin_file_stream(&mut self, node: &Node) -> io::Result<SharedWindowedReadStream> { fn begin_file_stream(&mut self, node: &Node) -> io::Result<SharedWindowedReadStream> {
assert_eq!(node.kind, NodeKind::File); assert_eq!(node.kind, NodeKind::File);
io::Result::Ok(self.new_window(node.offset as u64, node.length as u64)?) self.new_window(node.offset as u64, node.length as u64)
} }
fn read_header(&mut self) -> Result<Box<dyn PartHeader>> { fn read_header(&mut self) -> Result<Box<dyn PartHeader>> {
self.seek(SeekFrom::Start(0))?; self.seek(SeekFrom::Start(0)).context("Seeking to partition header")?;
Result::Ok(Box::from(self.read_be::<GCPartition>()?)) Ok(Box::from(read_part_header(self)?))
} }
fn ideal_buffer_size(&self) -> usize { BUFFER_SIZE } fn ideal_buffer_size(&self) -> usize { SECTOR_SIZE }
} }
#[derive(Clone, Debug, PartialEq, BinRead)] const BOOT_SIZE: usize = Header::STATIC_SIZE + PartitionHeader::STATIC_SIZE;
const BI2_SIZE: usize = 0x2000;
#[derive(Clone, Debug)]
pub(crate) struct GCPartition { pub(crate) struct GCPartition {
raw_boot: [u8; BOOT_SIZE],
raw_bi2: [u8; BI2_SIZE],
raw_apploader: Vec<u8>,
raw_fst: Vec<u8>,
raw_dol: Vec<u8>,
// Parsed
header: Header, header: Header,
bi2_header: BI2Header, partition_header: PartitionHeader,
#[br(seek_before = SeekFrom::Start(header.fst_off as u64))] apploader_header: AppLoaderHeader,
#[br(parse_with = node_parser)]
root_node: NodeType, root_node: NodeType,
dol_header: DolHeader,
}
fn read_part_header<R>(reader: &mut R) -> Result<GCPartition>
where R: Read + Seek + ?Sized {
// boot.bin
let raw_boot = <[u8; BOOT_SIZE]>::from_reader(reader).context("Reading boot.bin")?;
let mut boot_bytes = raw_boot.as_slice();
let header = Header::from_reader(&mut boot_bytes).context("Parsing disc header")?;
let partition_header =
PartitionHeader::from_reader(&mut boot_bytes).context("Parsing partition header")?;
debug_assert_eq!(boot_bytes.len(), 0, "failed to consume boot.bin");
// bi2.bin
let raw_bi2 = <[u8; BI2_SIZE]>::from_reader(reader).context("Reading bi2.bin")?;
// apploader.bin
let mut raw_apploader =
read_bytes(reader, AppLoaderHeader::STATIC_SIZE).context("Reading apploader header")?;
let apploader_header = AppLoaderHeader::from_reader(&mut raw_apploader.as_slice())
.context("Parsing apploader header")?;
raw_apploader.resize(
AppLoaderHeader::STATIC_SIZE
+ apploader_header.size as usize
+ apploader_header.trailer_size as usize,
0,
);
reader
.read_exact(&mut raw_apploader[AppLoaderHeader::STATIC_SIZE..])
.context("Reading apploader")?;
// fst.bin
reader
.seek(SeekFrom::Start(partition_header.fst_off as u64))
.context("Seeking to FST offset")?;
let raw_fst = read_bytes(reader, partition_header.fst_sz as usize).with_context(|| {
format!(
"Reading partition FST (offset {}, size {})",
partition_header.fst_off, partition_header.fst_sz
)
})?;
let root_node = read_fst(&mut Cursor::new(&*raw_fst))?;
// main.dol
reader
.seek(SeekFrom::Start(partition_header.dol_off as u64))
.context("Seeking to DOL offset")?;
let mut raw_dol = read_bytes(reader, DolHeader::STATIC_SIZE).context("Reading DOL header")?;
let dol_header =
DolHeader::from_reader(&mut raw_dol.as_slice()).context("Parsing DOL header")?;
let dol_size = dol_header
.text_offs
.iter()
.zip(&dol_header.text_sizes)
.map(|(offs, size)| offs + size)
.chain(
dol_header.data_offs.iter().zip(&dol_header.data_sizes).map(|(offs, size)| offs + size),
)
.max()
.unwrap_or(DolHeader::STATIC_SIZE as u32);
raw_dol.resize(dol_size as usize, 0);
reader.read_exact(&mut raw_dol[DolHeader::STATIC_SIZE..]).context("Reading DOL")?;
Ok(GCPartition {
raw_boot,
raw_bi2,
raw_apploader,
raw_fst,
raw_dol,
header,
partition_header,
apploader_header,
root_node,
dol_header,
})
} }
impl PartHeader for GCPartition { impl PartHeader for GCPartition {
fn root_node(&self) -> &NodeType { &self.root_node } fn root_node(&self) -> &NodeType { &self.root_node }
fn find_node(&self, path: &str) -> Option<&NodeType> { find_node(&self.root_node, path) } fn find_node(&self, path: &str) -> Option<&NodeType> { find_node(&self.root_node, path) }
fn boot_bytes(&self) -> &[u8] { &self.raw_boot }
fn bi2_bytes(&self) -> &[u8] { &self.raw_bi2 }
fn apploader_bytes(&self) -> &[u8] { &self.raw_apploader }
fn fst_bytes(&self) -> &[u8] { &self.raw_fst }
fn dol_bytes(&self) -> &[u8] { &self.raw_dol }
fn disc_header(&self) -> &Header { &self.header }
fn partition_header(&self) -> &PartitionHeader { &self.partition_header }
fn apploader_header(&self) -> &AppLoaderHeader { &self.apploader_header }
fn dol_header(&self) -> &DolHeader { &self.dol_header }
} }

View File

@ -1,22 +1,21 @@
//! Disc type related logic (GameCube, Wii) //! Disc type related logic (GameCube, Wii)
use std::{fmt::Debug, io}; use std::{ffi::CStr, fmt::Debug, io, io::Read};
use binrw::{BinRead, BinReaderExt, NullString};
use crate::{ use crate::{
disc::{gcn::DiscGCN, wii::DiscWii}, disc::{gcn::DiscGCN, wii::DiscWii},
fst::{Node, NodeType}, fst::{Node, NodeType},
io::DiscIO, io::DiscIO,
streams::{ReadStream, SharedWindowedReadStream}, streams::{ReadStream, SharedWindowedReadStream},
Error, Result, util::reader::{skip_bytes, struct_size, FromReader},
Error, Result, ResultContext,
}; };
pub(crate) mod gcn; pub(crate) mod gcn;
pub(crate) mod wii; pub(crate) mod wii;
/// Shared GameCube & Wii disc header /// Shared GameCube & Wii disc header
#[derive(Clone, Debug, PartialEq, BinRead)] #[derive(Clone, Debug, PartialEq)]
pub struct Header { pub struct Header {
/// Game ID (e.g. GM8E01 for Metroid Prime) /// Game ID (e.g. GM8E01 for Metroid Prime)
pub game_id: [u8; 6], pub game_id: [u8; 6],
@ -28,24 +27,67 @@ pub struct Header {
pub audio_streaming: u8, pub audio_streaming: u8,
/// Audio streaming buffer size /// Audio streaming buffer size
pub audio_stream_buf_size: u8, pub audio_stream_buf_size: u8,
#[br(pad_before(14))]
/// If this is a Wii disc, this will be 0x5D1C9EA3 /// If this is a Wii disc, this will be 0x5D1C9EA3
pub wii_magic: u32, pub wii_magic: u32,
/// If this is a GameCube disc, this will be 0xC2339F3D /// If this is a GameCube disc, this will be 0xC2339F3D
pub gcn_magic: u32, pub gcn_magic: u32,
/// Game title /// Game title
#[br(pad_size_to(64), map = NullString::into_string)]
pub game_title: String, pub game_title: String,
/// Disable hash verification /// Disable hash verification
pub disable_hash_verification: u8, pub disable_hash_verification: u8,
/// Disable disc encryption and H3 hash table loading and verification /// Disable disc encryption and H3 hash table loading and verification
pub disable_disc_enc: u8, pub disable_disc_enc: u8,
}
fn from_c_str(bytes: &[u8]) -> io::Result<String> {
CStr::from_bytes_until_nul(bytes)
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?
.to_str()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
.map(|s| s.to_string())
}
impl FromReader for Header {
type Args<'a> = ();
const STATIC_SIZE: usize = 0x400;
fn from_reader_args<R>(reader: &mut R, _args: Self::Args<'_>) -> io::Result<Self>
where R: Read + ?Sized {
let game_id = <[u8; 6]>::from_reader(reader)?;
let disc_num = u8::from_reader(reader)?;
let disc_version = u8::from_reader(reader)?;
let audio_streaming = u8::from_reader(reader)?;
let audio_stream_buf_size = u8::from_reader(reader)?;
skip_bytes::<14, _>(reader)?; // padding
let wii_magic = u32::from_reader(reader)?;
let gcn_magic = u32::from_reader(reader)?;
let game_title = from_c_str(&<[u8; 64]>::from_reader(reader)?)?;
let disable_hash_verification = u8::from_reader(reader)?;
let disable_disc_enc = u8::from_reader(reader)?;
skip_bytes::<926, _>(reader)?; // padding
Ok(Self {
game_id,
disc_num,
disc_version,
audio_streaming,
audio_stream_buf_size,
wii_magic,
gcn_magic,
game_title,
disable_hash_verification,
disable_disc_enc,
})
}
}
/// Partition header
#[derive(Clone, Debug, PartialEq)]
pub struct PartitionHeader {
/// Debug monitor offset /// Debug monitor offset
#[br(pad_before(0x39e))]
pub debug_mon_off: u32, pub debug_mon_off: u32,
/// Debug monitor load address /// Debug monitor load address
pub debug_load_addr: u32, pub debug_load_addr: u32,
#[br(pad_before(0x18))]
/// Offset to main DOL (Wii: >> 2) /// Offset to main DOL (Wii: >> 2)
pub dol_off: u32, pub dol_off: u32,
/// Offset to file system table (Wii: >> 2) /// Offset to file system table (Wii: >> 2)
@ -59,28 +101,130 @@ pub struct Header {
/// User position /// User position
pub user_position: u32, pub user_position: u32,
/// User size /// User size
#[br(pad_after(4))]
pub user_sz: u32, pub user_sz: u32,
} }
#[derive(Debug, PartialEq, BinRead, Copy, Clone)] impl FromReader for PartitionHeader {
pub(crate) struct BI2Header { type Args<'a> = ();
pub(crate) debug_monitor_size: i32,
pub(crate) sim_mem_size: i32, const STATIC_SIZE: usize = struct_size([
pub(crate) arg_offset: u32, u32::STATIC_SIZE, // debug_mon_off
pub(crate) debug_flag: u32, u32::STATIC_SIZE, // debug_load_addr
pub(crate) trk_address: u32, 0x18, // padding
pub(crate) trk_size: u32, u32::STATIC_SIZE, // dol_off
pub(crate) country_code: u32, u32::STATIC_SIZE, // fst_off
pub(crate) unk1: u32, u32::STATIC_SIZE, // fst_sz
pub(crate) unk2: u32, u32::STATIC_SIZE, // fst_max_sz
pub(crate) unk3: u32, u32::STATIC_SIZE, // fst_memory_address
pub(crate) dol_limit: u32, u32::STATIC_SIZE, // user_position
#[br(pad_after(0x1fd0))] u32::STATIC_SIZE, // user_sz
pub(crate) unk4: u32, 4, // padding
]);
fn from_reader_args<R>(reader: &mut R, _args: Self::Args<'_>) -> io::Result<Self>
where R: Read + ?Sized {
let debug_mon_off = u32::from_reader(reader)?;
let debug_load_addr = u32::from_reader(reader)?;
skip_bytes::<0x18, _>(reader)?; // padding
let dol_off = u32::from_reader(reader)?;
let fst_off = u32::from_reader(reader)?;
let fst_sz = u32::from_reader(reader)?;
let fst_max_sz = u32::from_reader(reader)?;
let fst_memory_address = u32::from_reader(reader)?;
let user_position = u32::from_reader(reader)?;
let user_sz = u32::from_reader(reader)?;
skip_bytes::<4, _>(reader)?; // padding
Ok(Self {
debug_mon_off,
debug_load_addr,
dol_off,
fst_off,
fst_sz,
fst_max_sz,
fst_memory_address,
user_position,
user_sz,
})
}
} }
pub(crate) const BUFFER_SIZE: usize = 0x8000; #[derive(Debug, PartialEq, Clone)]
pub struct AppLoaderHeader {
pub date: String,
pub entry_point: u32,
pub size: u32,
pub trailer_size: u32,
}
impl FromReader for AppLoaderHeader {
type Args<'a> = ();
const STATIC_SIZE: usize = struct_size([
16, // date
u32::STATIC_SIZE, // entry_point
u32::STATIC_SIZE, // size
u32::STATIC_SIZE, // trailer_size
4, // padding
]);
fn from_reader_args<R>(reader: &mut R, _args: Self::Args<'_>) -> io::Result<Self>
where R: Read + ?Sized {
let date = from_c_str(&<[u8; 16]>::from_reader(reader)?)?;
let entry_point = u32::from_reader(reader)?;
let size = u32::from_reader(reader)?;
let trailer_size = u32::from_reader(reader)?;
skip_bytes::<4, _>(reader)?; // padding
Ok(Self { date, entry_point, size, trailer_size })
}
}
pub const DOL_MAX_TEXT_SECTIONS: usize = 7;
pub const DOL_MAX_DATA_SECTIONS: usize = 11;
#[derive(Debug, Clone)]
pub struct DolHeader {
pub text_offs: [u32; DOL_MAX_TEXT_SECTIONS],
pub data_offs: [u32; DOL_MAX_DATA_SECTIONS],
pub text_addrs: [u32; DOL_MAX_TEXT_SECTIONS],
pub data_addrs: [u32; DOL_MAX_DATA_SECTIONS],
pub text_sizes: [u32; DOL_MAX_TEXT_SECTIONS],
pub data_sizes: [u32; DOL_MAX_DATA_SECTIONS],
pub bss_addr: u32,
pub bss_size: u32,
pub entry_point: u32,
}
impl FromReader for DolHeader {
type Args<'a> = ();
const STATIC_SIZE: usize = 0x100;
fn from_reader_args<R>(reader: &mut R, _args: Self::Args<'_>) -> io::Result<Self>
where R: Read + ?Sized {
let result = Self {
text_offs: <_>::from_reader(reader)?,
data_offs: <_>::from_reader(reader)?,
text_addrs: <_>::from_reader(reader)?,
data_addrs: <_>::from_reader(reader)?,
text_sizes: <_>::from_reader(reader)?,
data_sizes: <_>::from_reader(reader)?,
bss_addr: <_>::from_reader(reader)?,
bss_size: <_>::from_reader(reader)?,
entry_point: <_>::from_reader(reader)?,
};
skip_bytes::<0x1C, _>(reader)?; // padding
Ok(result)
}
}
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub enum PartitionType {
Data,
Update,
Channel,
}
pub(crate) const SECTOR_SIZE: usize = 0x8000;
/// Contains a disc's header & partition information. /// Contains a disc's header & partition information.
pub trait DiscBase: Send + Sync { pub trait DiscBase: Send + Sync {
@ -95,19 +239,35 @@ pub trait DiscBase: Send + Sync {
/// ///
/// Basic usage: /// Basic usage:
/// ```no_run /// ```no_run
/// use nod::disc::new_disc_base; /// use nod::{
/// use nod::io::new_disc_io; /// disc::new_disc_base,
/// io::{new_disc_io, DiscIOOptions},
/// };
/// ///
/// let mut disc_io = new_disc_io("path/to/file".as_ref())?; /// # fn main() -> nod::Result<()> {
/// let options = DiscIOOptions::default();
/// let mut disc_io = new_disc_io("path/to/file.iso".as_ref(), &options)?;
/// let disc_base = new_disc_base(disc_io.as_mut())?; /// let disc_base = new_disc_base(disc_io.as_mut())?;
/// let mut partition = disc_base.get_data_partition(disc_io.as_mut(), false)?; /// let mut partition = disc_base.get_data_partition(disc_io.as_mut(), false)?;
/// # Ok::<(), nod::Error>(()) /// # Ok(())
/// # }
/// ``` /// ```
fn get_data_partition<'a>( fn get_data_partition<'a>(
&self, &self,
disc_io: &'a mut dyn DiscIO, disc_io: &'a mut dyn DiscIO,
validate_hashes: bool, validate_hashes: bool,
) -> Result<Box<dyn PartReadStream + 'a>>; ) -> Result<Box<dyn PartReadStream + 'a>>;
/// Opens a new partition read stream for the first partition matching
/// the specified type.
///
/// `validate_hashes`: Validate Wii disc hashes while reading (slow!)
fn get_partition<'a>(
&self,
disc_io: &'a mut dyn DiscIO,
part_type: PartitionType,
validate_hashes: bool,
) -> Result<Box<dyn PartReadStream + 'a>>;
} }
/// Creates a new [`DiscBase`] instance. /// Creates a new [`DiscBase`] instance.
@ -116,23 +276,31 @@ pub trait DiscBase: Send + Sync {
/// ///
/// Basic usage: /// Basic usage:
/// ```no_run /// ```no_run
/// use nod::io::new_disc_io; /// use nod::{
/// use nod::disc::new_disc_base; /// disc::new_disc_base,
/// io::{new_disc_io, DiscIOOptions},
/// };
/// ///
/// let mut disc_io = new_disc_io("path/to/file".as_ref())?; /// # fn main() -> nod::Result<()> {
/// let options = DiscIOOptions::default();
/// let mut disc_io = new_disc_io("path/to/file.iso".as_ref(), &options)?;
/// let disc_base = new_disc_base(disc_io.as_mut())?; /// let disc_base = new_disc_base(disc_io.as_mut())?;
/// disc_base.get_header(); /// disc_base.get_header();
/// # Ok::<(), nod::Error>(()) /// # Ok(())
/// # }
/// ``` /// ```
pub fn new_disc_base(disc_io: &mut dyn DiscIO) -> Result<Box<dyn DiscBase>> { pub fn new_disc_base(disc_io: &mut dyn DiscIO) -> Result<Box<dyn DiscBase>> {
let mut stream = disc_io.begin_read_stream(0)?; let mut stream = disc_io.begin_read_stream(0).context("Opening disc stream")?;
let header: Header = stream.read_be()?; let header_bytes =
<[u8; Header::STATIC_SIZE]>::from_reader(&mut stream).context("Reading disc header")?;
let header =
Header::from_reader(&mut header_bytes.as_slice()).context("Parsing disc header")?;
if header.wii_magic == 0x5D1C9EA3 { if header.wii_magic == 0x5D1C9EA3 {
Result::Ok(Box::from(DiscWii::new(stream.as_mut(), header)?)) Ok(Box::from(DiscWii::new(stream.as_mut(), header)?))
} else if header.gcn_magic == 0xC2339F3D { } else if header.gcn_magic == 0xC2339F3D {
Result::Ok(Box::from(DiscGCN::new(header)?)) Ok(Box::from(DiscGCN::new(header)?))
} else { } else {
Result::Err(Error::DiscFormat("Invalid GC/Wii magic".to_string())) Err(Error::DiscFormat(format!("Invalid GC/Wii magic: {:#010X}", header.wii_magic)))
} }
} }
@ -145,21 +313,27 @@ pub trait PartReadStream: ReadStream {
/// ///
/// Basic usage: /// Basic usage:
/// ```no_run /// ```no_run
/// use nod::disc::{new_disc_base, PartHeader};
/// use nod::fst::NodeType;
/// use nod::io::new_disc_io;
/// use std::io::Read; /// use std::io::Read;
/// ///
/// let mut disc_io = new_disc_io("path/to/file".as_ref())?; /// use nod::{
/// disc::{new_disc_base, PartHeader},
/// fst::NodeType,
/// io::{new_disc_io, DiscIOOptions},
/// };
///
/// fn main() -> nod::Result<()> {
/// let options = DiscIOOptions::default();
/// let mut disc_io = new_disc_io("path/to/file.iso".as_ref(), &options)?;
/// let disc_base = new_disc_base(disc_io.as_mut())?; /// let disc_base = new_disc_base(disc_io.as_mut())?;
/// let mut partition = disc_base.get_data_partition(disc_io.as_mut(), false)?; /// let mut partition = disc_base.get_data_partition(disc_io.as_mut(), false)?;
/// let header = partition.read_header()?; /// let header = partition.read_header()?;
/// if let Some(NodeType::File(node)) = header.find_node("/MP3/Worlds.txt") { /// if let Some(NodeType::File(node)) = header.find_node("/MP3/Worlds.txt") {
/// let mut s = String::new(); /// let mut s = String::new();
/// partition.begin_file_stream(node)?.read_to_string(&mut s); /// partition.begin_file_stream(node)?.read_to_string(&mut s).expect("Failed to read file");
/// println!("{}", s); /// println!("{}", s);
/// } /// }
/// # Ok::<(), nod::Error>(()) /// Ok(())
/// }
/// ``` /// ```
fn begin_file_stream(&mut self, node: &Node) -> io::Result<SharedWindowedReadStream>; fn begin_file_stream(&mut self, node: &Node) -> io::Result<SharedWindowedReadStream>;
@ -183,11 +357,15 @@ pub trait PartHeader: Debug + Send + Sync {
/// ///
/// Basic usage: /// Basic usage:
/// ```no_run /// ```no_run
/// use nod::disc::{new_disc_base, PartHeader}; /// use nod::{
/// use nod::fst::NodeType; /// disc::{new_disc_base, PartHeader},
/// use nod::io::new_disc_io; /// fst::NodeType,
/// io::{new_disc_io, DiscIOOptions},
/// };
/// ///
/// let mut disc_io = new_disc_io("path/to/file".as_ref())?; /// fn main() -> nod::Result<()> {
/// let options = DiscIOOptions::default();
/// let mut disc_io = new_disc_io("path/to/file.iso".as_ref(), &options)?;
/// let disc_base = new_disc_base(disc_io.as_mut())?; /// let disc_base = new_disc_base(disc_io.as_mut())?;
/// let mut partition = disc_base.get_data_partition(disc_io.as_mut(), false)?; /// let mut partition = disc_base.get_data_partition(disc_io.as_mut(), false)?;
/// let header = partition.read_header()?; /// let header = partition.read_header()?;
@ -197,7 +375,35 @@ pub trait PartHeader: Debug + Send + Sync {
/// if let Some(NodeType::Directory(node, children)) = header.find_node("/MP1") { /// if let Some(NodeType::Directory(node, children)) = header.find_node("/MP1") {
/// println!("Number of files: {}", children.len()); /// println!("Number of files: {}", children.len());
/// } /// }
/// # Ok::<(), nod::Error>(()) /// Ok(())
/// }
/// ``` /// ```
fn find_node(&self, path: &str) -> Option<&NodeType>; fn find_node(&self, path: &str) -> Option<&NodeType>;
/// Disc and partition header (boot.bin)
fn boot_bytes(&self) -> &[u8];
/// Debug and region information (bi2.bin)
fn bi2_bytes(&self) -> &[u8];
/// Apploader (apploader.bin)
fn apploader_bytes(&self) -> &[u8];
/// File system table (fst.bin)
fn fst_bytes(&self) -> &[u8];
/// Main binary (main.dol)
fn dol_bytes(&self) -> &[u8];
/// Disc header
fn disc_header(&self) -> &Header;
/// Partition header
fn partition_header(&self) -> &PartitionHeader;
/// Apploader header
fn apploader_header(&self) -> &AppLoaderHeader;
/// DOL header
fn dol_header(&self) -> &DolHeader;
} }

View File

@ -3,24 +3,33 @@ use std::{
io::{Read, Seek, SeekFrom}, io::{Read, Seek, SeekFrom},
}; };
use aes::{Aes128, Block, NewBlockCipher}; use aes::{
use binrw::{BinRead, BinReaderExt}; cipher::{block_padding::NoPadding, BlockDecryptMut, KeyIvInit},
use block_modes::{block_padding::NoPadding, BlockMode, Cbc}; Aes128, Block,
};
use sha1::{digest, Digest, Sha1}; use sha1::{digest, Digest, Sha1};
use crate::{ use crate::{
array_ref, array_ref,
disc::{BI2Header, DiscBase, DiscIO, Header, PartHeader, PartReadStream, BUFFER_SIZE}, disc::{
div_rem, AppLoaderHeader, DiscBase, DiscIO, DolHeader, Header, PartHeader, PartReadStream,
fst::{find_node, node_parser, Node, NodeKind, NodeType}, PartitionHeader, PartitionType, SECTOR_SIZE,
},
fst::{find_node, Node, NodeKind, NodeType},
streams::{wrap_windowed, OwningWindowedReadStream, ReadStream, SharedWindowedReadStream}, streams::{wrap_windowed, OwningWindowedReadStream, ReadStream, SharedWindowedReadStream},
Error, Result, util::{
div_rem,
reader::{skip_bytes, struct_size, FromReader},
},
Error, Result, ResultContext,
}; };
type Aes128Cbc = Cbc<Aes128, NoPadding>; pub(crate) const HASHES_SIZE: usize = 0x400;
pub(crate) const BLOCK_SIZE: usize = SECTOR_SIZE - HASHES_SIZE; // 0x7C00
/// AES-128-CBC decryptor
type Aes128Cbc = cbc::Decryptor<Aes128>;
const BLOCK_SIZE: usize = 0x7c00;
const BUFFER_OFFSET: usize = BUFFER_SIZE - BLOCK_SIZE;
#[rustfmt::skip] #[rustfmt::skip]
const COMMON_KEYS: [[u8; 16]; 2] = [ const COMMON_KEYS: [[u8; 16]; 2] = [
/* Normal */ /* Normal */
@ -29,82 +38,186 @@ const COMMON_KEYS: [[u8; 16]; 2] = [
[0x63, 0xb8, 0x2b, 0xb4, 0xf4, 0x61, 0x4e, 0x2e, 0x13, 0xf2, 0xfe, 0xfb, 0xba, 0x4c, 0x9b, 0x7e], [0x63, 0xb8, 0x2b, 0xb4, 0xf4, 0x61, 0x4e, 0x2e, 0x13, 0xf2, 0xfe, 0xfb, 0xba, 0x4c, 0x9b, 0x7e],
]; ];
#[derive(Debug, PartialEq, BinRead)] #[derive(Debug, PartialEq, Eq, Copy, Clone)]
#[br(repr = u32)]
enum WiiPartType {
Data,
Update,
Channel,
}
#[derive(Debug, PartialEq, BinRead)]
#[br(repr = u32)]
enum SigType { enum SigType {
Rsa4096 = 0x00010000, Rsa4096,
Rsa2048 = 0x00010001, Rsa2048,
EllipticalCurve = 0x00010002, EllipticalCurve,
} }
#[derive(Debug, PartialEq, BinRead)] impl FromReader for SigType {
#[br(repr = u32)] type Args<'a> = ();
const STATIC_SIZE: usize = u32::STATIC_SIZE;
fn from_reader_args<R>(reader: &mut R, _args: Self::Args<'_>) -> io::Result<Self>
where R: Read + ?Sized {
match u32::from_reader(reader)? {
0x00010000 => Ok(SigType::Rsa4096),
0x00010001 => Ok(SigType::Rsa2048),
0x00010002 => Ok(SigType::EllipticalCurve),
_ => Err(io::Error::new(io::ErrorKind::InvalidData, "invalid signature type")),
}
}
}
impl SigType {
fn size(self) -> usize {
match self {
SigType::Rsa4096 => 512,
SigType::Rsa2048 => 256,
SigType::EllipticalCurve => 64,
}
}
}
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
enum KeyType { enum KeyType {
Rsa4096 = 0x00000000, Rsa4096,
Rsa2048 = 0x00000001, Rsa2048,
} }
#[derive(Debug, PartialEq, BinRead)] impl FromReader for KeyType {
type Args<'a> = ();
const STATIC_SIZE: usize = u32::STATIC_SIZE;
fn from_reader_args<R>(reader: &mut R, _args: Self::Args<'_>) -> io::Result<Self>
where R: Read + ?Sized {
match u32::from_reader(reader)? {
0x00000000 => Ok(KeyType::Rsa4096),
0x00000001 => Ok(KeyType::Rsa2048),
_ => Err(io::Error::new(io::ErrorKind::InvalidData, "invalid key type")),
}
}
}
impl KeyType {
fn size(self) -> usize {
match self {
KeyType::Rsa4096 => 512,
KeyType::Rsa2048 => 256,
}
}
}
#[derive(Debug, PartialEq)]
struct WiiPart { struct WiiPart {
#[br(map = | x: u32 | (x as u64) << 2)] // #[br(map = |x: u32| (x as u64) << 2)]
part_data_off: u64, part_data_off: u64,
part_type: WiiPartType, part_type: PartitionType,
#[br(restore_position, args(part_data_off))] // #[br(restore_position, args(part_data_off))]
part_header: WiiPartitionHeader, part_header: WiiPartitionHeader,
} }
#[derive(Debug, PartialEq, BinRead)] #[derive(Debug, PartialEq)]
struct WiiPartInfo { struct WiiPartInfo {
#[br(seek_before = SeekFrom::Start(0x40000))] // #[br(seek_before = SeekFrom::Start(0x40000))]
part_count: u32, part_count: u32,
#[br(map = | x: u32 | (x as u64) << 2)] // #[br(map = |x: u32| (x as u64) << 2)]
part_info_off: u64, part_info_off: u64,
#[br(seek_before = SeekFrom::Start(part_info_off), count = part_count)] // #[br(seek_before = SeekFrom::Start(part_info_off), count = part_count)]
parts: Vec<WiiPart>, parts: Vec<WiiPart>,
} }
#[derive(Debug, PartialEq, BinRead)] #[derive(Debug, PartialEq, Default)]
struct TicketTimeLimit { struct TicketTimeLimit {
enable_time_limit: u32, enable_time_limit: u32,
time_limit: u32, time_limit: u32,
} }
#[derive(Debug, PartialEq, BinRead)] impl FromReader for TicketTimeLimit {
type Args<'a> = ();
const STATIC_SIZE: usize = struct_size([
u32::STATIC_SIZE, // enable_time_limit
u32::STATIC_SIZE, // time_limit
]);
fn from_reader_args<R>(reader: &mut R, _args: Self::Args<'_>) -> io::Result<Self>
where R: Read + ?Sized {
let enable_time_limit = u32::from_reader(reader)?;
let time_limit = u32::from_reader(reader)?;
Ok(TicketTimeLimit { enable_time_limit, time_limit })
}
}
#[derive(Debug, PartialEq)]
struct Ticket { struct Ticket {
sig_type: SigType, sig_type: SigType,
#[br(count = 256)] sig: [u8; 256],
sig: Vec<u8>, sig_issuer: [u8; 64],
#[br(pad_before = 60, count = 64)] ecdh: [u8; 60],
sig_issuer: Vec<u8>,
#[br(count = 60)]
ecdh: Vec<u8>,
#[br(pad_before = 3)]
enc_key: [u8; 16], enc_key: [u8; 16],
#[br(pad_before = 1)]
ticket_id: [u8; 8], ticket_id: [u8; 8],
console_id: [u8; 4], console_id: [u8; 4],
title_id: [u8; 8], title_id: [u8; 8],
#[br(pad_before = 2)]
ticket_version: u16, ticket_version: u16,
permitted_titles_mask: u32, permitted_titles_mask: u32,
permit_mask: u32, permit_mask: u32,
title_export_allowed: u8, title_export_allowed: u8,
common_key_idx: u8, common_key_idx: u8,
#[br(pad_before = 48, count = 64)] content_access_permissions: [u8; 64],
content_access_permissions: Vec<u8>, time_limits: [TicketTimeLimit; 8],
#[br(pad_before = 2, count = 8)]
time_limits: Vec<TicketTimeLimit>,
} }
#[derive(Debug, PartialEq, BinRead)] impl FromReader for Ticket {
type Args<'a> = ();
const STATIC_SIZE: usize = 0x2A4;
fn from_reader_args<R>(reader: &mut R, _args: Self::Args<'_>) -> io::Result<Self>
where R: Read + ?Sized {
let sig_type = SigType::from_reader(reader)?;
let sig = <[u8; 256]>::from_reader(reader)?;
skip_bytes::<0x3C, _>(reader)?;
let sig_issuer = <[u8; 64]>::from_reader(reader)?;
let ecdh = <[u8; 60]>::from_reader(reader)?;
skip_bytes::<3, _>(reader)?;
let enc_key = <[u8; 16]>::from_reader(reader)?;
skip_bytes::<1, _>(reader)?;
let ticket_id = <[u8; 8]>::from_reader(reader)?;
let console_id = <[u8; 4]>::from_reader(reader)?;
let title_id = <[u8; 8]>::from_reader(reader)?;
skip_bytes::<2, _>(reader)?;
let ticket_version = u16::from_reader(reader)?;
let permitted_titles_mask = u32::from_reader(reader)?;
let permit_mask = u32::from_reader(reader)?;
let title_export_allowed = u8::from_reader(reader)?;
let common_key_idx = u8::from_reader(reader)?;
skip_bytes::<48, _>(reader)?;
let content_access_permissions = <[u8; 64]>::from_reader(reader)?;
let time_limits = [
TicketTimeLimit::from_reader(reader)?,
TicketTimeLimit::from_reader(reader)?,
TicketTimeLimit::from_reader(reader)?,
TicketTimeLimit::from_reader(reader)?,
TicketTimeLimit::from_reader(reader)?,
TicketTimeLimit::from_reader(reader)?,
TicketTimeLimit::from_reader(reader)?,
TicketTimeLimit::from_reader(reader)?,
];
Ok(Ticket {
sig_type,
sig,
sig_issuer,
ecdh,
enc_key,
ticket_id,
console_id,
title_id,
ticket_version,
permitted_titles_mask,
permit_mask,
title_export_allowed,
common_key_idx,
content_access_permissions,
time_limits,
})
}
}
#[derive(Debug, PartialEq)]
struct TmdContent { struct TmdContent {
id: u32, id: u32,
index: u16, index: u16,
@ -113,78 +226,89 @@ struct TmdContent {
hash: [u8; 20], hash: [u8; 20],
} }
#[derive(Debug, PartialEq, BinRead)] #[derive(Debug, PartialEq)]
struct Tmd { struct Tmd {
sig_type: SigType, sig_type: SigType,
#[br(count = 256)] // #[br(count = 256)]
sig: Vec<u8>, sig: Vec<u8>,
#[br(pad_before = 60, count = 64)] // #[br(pad_before = 60, count = 64)]
sig_issuer: Vec<u8>, sig_issuer: Vec<u8>,
version: u8, version: u8,
ca_crl_version: u8, ca_crl_version: u8,
signer_crl_version: u8, signer_crl_version: u8,
#[br(pad_before = 1)] // #[br(pad_before = 1)]
ios_id_major: u32, ios_id_major: u32,
ios_id_minor: u32, ios_id_minor: u32,
title_id_major: u32, title_id_major: u32,
title_id_minor: [char; 4], title_id_minor: [u8; 4],
title_type: u32, title_type: u32,
group_id: u16, group_id: u16,
#[br(pad_before = 62)] // #[br(pad_before = 62)]
access_flags: u32, access_flags: u32,
title_version: u16, title_version: u16,
num_contents: u16, num_contents: u16,
#[br(pad_after = 2)] // #[br(pad_after = 2)]
boot_idx: u16, boot_idx: u16,
#[br(count = num_contents)] // #[br(count = num_contents)]
contents: Vec<TmdContent>, contents: Vec<TmdContent>,
} }
#[derive(Debug, PartialEq, BinRead)] #[derive(Debug, PartialEq)]
struct Certificate { struct Certificate {
sig_type: SigType, sig_type: SigType,
#[br(count = if sig_type == SigType::Rsa4096 { 512 } else if sig_type == SigType::Rsa2048 { 256 } else if sig_type == SigType::EllipticalCurve { 64 } else { 0 })] // #[br(count = sig_size(sig_type))]
sig: Vec<u8>, sig: Vec<u8>,
#[br(pad_before = 60, count = 64)] // #[br(pad_before = 60, count = 64)]
issuer: Vec<u8>, issuer: Vec<u8>,
key_type: KeyType, key_type: KeyType,
#[br(count = 64)] // #[br(count = 64)]
subject: Vec<u8>, subject: Vec<u8>,
#[br(count = if key_type == KeyType::Rsa4096 { 512 } else if key_type == KeyType::Rsa2048 { 256 } else { 0 })] // #[br(count = key_size(key_type))]
key: Vec<u8>, key: Vec<u8>,
modulus: u32, modulus: u32,
#[br(pad_after = 52)] // #[br(pad_after = 52)]
pub_exp: u32, pub_exp: u32,
} }
#[derive(Debug, PartialEq, BinRead)] #[derive(Debug, PartialEq)]
#[br(import(partition_off: u64))] // #[br(import(partition_off: u64))]
struct WiiPartitionHeader { struct WiiPartitionHeader {
#[br(seek_before = SeekFrom::Start(partition_off))] // #[br(seek_before = SeekFrom::Start(partition_off))]
ticket: Ticket, ticket: Ticket,
tmd_size: u32, tmd_size: u32,
#[br(map = | x: u32 | ((x as u64) << 2) + partition_off)] // #[br(map = |x: u32| ((x as u64) << 2) + partition_off)]
tmd_off: u64, tmd_off: u64,
cert_chain_size: u32, cert_chain_size: u32,
#[br(map = | x: u32 | ((x as u64) << 2) + partition_off)] // #[br(map = |x: u32| ((x as u64) << 2) + partition_off)]
cert_chain_off: u64, cert_chain_off: u64,
#[br(map = | x: u32 | ((x as u64) << 2) + partition_off)] // #[br(map = |x: u32| ((x as u64) << 2) + partition_off)]
global_hash_table_off: u64, global_hash_table_off: u64,
#[br(map = | x: u32 | ((x as u64) << 2) + partition_off)] // #[br(map = |x: u32| ((x as u64) << 2) + partition_off)]
data_off: u64, data_off: u64,
#[br(map = | x: u32 | (x as u64) << 2)] // #[br(map = |x: u32| (x as u64) << 2)]
data_size: u64, data_size: u64,
#[br(seek_before = SeekFrom::Start(tmd_off))] // #[br(seek_before = SeekFrom::Start(tmd_off))]
tmd: Tmd, tmd: Tmd,
#[br(seek_before = SeekFrom::Start(cert_chain_off))] // #[br(seek_before = SeekFrom::Start(cert_chain_off))]
ca_cert: Certificate, ca_cert: Certificate,
tmd_cert: Certificate, tmd_cert: Certificate,
ticket_cert: Certificate, ticket_cert: Certificate,
#[br(seek_before = SeekFrom::Start(global_hash_table_off), count = 0x18000)] // #[br(seek_before = SeekFrom::Start(global_hash_table_off), count = 0x18000)]
h3_data: Vec<u8>, h3_data: Vec<u8>,
} }
impl FromReader for WiiPartitionHeader {
type Args<'a> = u64;
const STATIC_SIZE: usize = Ticket::STATIC_SIZE;
fn from_reader_args<R>(reader: &mut R, args: Self::Args<'_>) -> io::Result<Self>
where R: Read + ?Sized {
todo!()
}
}
pub(crate) struct DiscWii { pub(crate) struct DiscWii {
header: Header, header: Header,
part_info: WiiPartInfo, part_info: WiiPartInfo,
@ -192,9 +316,9 @@ pub(crate) struct DiscWii {
impl DiscWii { impl DiscWii {
pub(crate) fn new(mut stream: &mut dyn ReadStream, header: Header) -> Result<DiscWii> { pub(crate) fn new(mut stream: &mut dyn ReadStream, header: Header) -> Result<DiscWii> {
let mut disc = DiscWii { header, part_info: stream.read_be()? }; let mut disc = DiscWii { header, part_info: todo!() }; // stream.read_be()?
disc.decrypt_partition_keys()?; disc.decrypt_partition_keys()?;
Result::Ok(disc) Ok(disc)
} }
} }
@ -204,13 +328,10 @@ impl DiscWii {
let ticket = &mut part.part_header.ticket; let ticket = &mut part.part_header.ticket;
let mut iv: [u8; 16] = [0; 16]; let mut iv: [u8; 16] = [0; 16];
iv[..8].copy_from_slice(&ticket.title_id); iv[..8].copy_from_slice(&ticket.title_id);
Aes128Cbc::new( Aes128Cbc::new(&COMMON_KEYS[ticket.common_key_idx as usize].into(), &iv.into())
Aes128::new(&COMMON_KEYS[ticket.common_key_idx as usize].into()), .decrypt_padded_mut::<NoPadding>(&mut ticket.enc_key)?;
&iv.into(),
)
.decrypt(&mut ticket.enc_key)?;
} }
Result::Ok(()) Ok(())
} }
} }
@ -226,48 +347,74 @@ impl DiscBase for DiscWii {
.part_info .part_info
.parts .parts
.iter() .iter()
.find(|v| v.part_type == WiiPartType::Data) .find(|v| v.part_type == PartitionType::Data)
.ok_or_else(|| Error::DiscFormat("Failed to locate data partition".to_string()))?; .ok_or_else(|| Error::DiscFormat("Failed to locate data partition".to_string()))?;
let data_off = part.part_header.data_off; let data_off = part.part_header.data_off;
let has_crypto = disc_io.has_wii_crypto(); let has_crypto = disc_io.has_wii_crypto();
let base = disc_io
.begin_read_stream(data_off)
.map_err(|e| Error::Io("Opening data partition stream".to_string(), e))?;
let stream = wrap_windowed(base, data_off, part.part_header.data_size)
.context("Wrapping data partition stream")?;
let result = Box::new(WiiPartReadStream { let result = Box::new(WiiPartReadStream {
stream: wrap_windowed( stream,
disc_io.begin_read_stream(data_off)?, crypto: if has_crypto { Some(part.part_header.ticket.enc_key) } else { None },
data_off,
part.part_header.data_size,
)?,
crypto: if has_crypto {
Aes128::new(&part.part_header.ticket.enc_key.into()).into()
} else {
Option::None
},
offset: 0, offset: 0,
cur_block: u64::MAX, cur_block: u32::MAX,
buf: [0; 0x8000], buf: [0; 0x8000],
validate_hashes, validate_hashes,
}); });
Result::Ok(result) Ok(result)
}
fn get_partition<'a>(
&self,
disc_io: &'a mut dyn DiscIO,
part_type: PartitionType,
validate_hashes: bool,
) -> Result<Box<dyn PartReadStream + 'a>> {
let part =
self.part_info.parts.iter().find(|v| v.part_type == part_type).ok_or_else(|| {
Error::DiscFormat(format!("Failed to locate {:?} partition", part_type))
})?;
let data_off = part.part_header.data_off;
let has_crypto = disc_io.has_wii_crypto();
let base = disc_io
.begin_read_stream(data_off)
.with_context(|| format!("Opening {:?} partition stream", part_type))?;
let stream = wrap_windowed(base, data_off, part.part_header.data_size)
.with_context(|| format!("Wrapping {:?} partition stream", part_type))?;
let result = Box::new(WiiPartReadStream {
stream,
crypto: if has_crypto { Some(part.part_header.ticket.enc_key) } else { None },
offset: 0,
cur_block: u32::MAX,
buf: [0; 0x8000],
validate_hashes,
});
Ok(result)
} }
} }
struct WiiPartReadStream<'a> { struct WiiPartReadStream<'a> {
stream: OwningWindowedReadStream<'a>, stream: OwningWindowedReadStream<'a>,
crypto: Option<Aes128>, crypto: Option<[u8; 16]>,
offset: u64, offset: u64,
cur_block: u64, cur_block: u32,
buf: [u8; BUFFER_SIZE], buf: [u8; SECTOR_SIZE],
validate_hashes: bool, validate_hashes: bool,
} }
impl<'a> PartReadStream for WiiPartReadStream<'a> { impl<'a> PartReadStream for WiiPartReadStream<'a> {
fn begin_file_stream(&mut self, node: &Node) -> io::Result<SharedWindowedReadStream> { fn begin_file_stream(&mut self, node: &Node) -> io::Result<SharedWindowedReadStream> {
assert_eq!(node.kind, NodeKind::File); assert_eq!(node.kind, NodeKind::File);
io::Result::Ok(self.new_window((node.offset as u64) << 2, node.length as u64)?) self.new_window((node.offset as u64) << 2, node.length as u64)
} }
fn read_header(&mut self) -> Result<Box<dyn PartHeader>> { fn read_header(&mut self) -> Result<Box<dyn PartHeader>> {
self.seek(SeekFrom::Start(0))?; self.seek(SeekFrom::Start(0)).context("Seeking to partition header")?;
Result::Ok(Box::from(self.read_be::<WiiPartition>()?)) todo!()
// Ok(Box::from(self.read_be::<WiiPartition>()?))
} }
fn ideal_buffer_size(&self) -> usize { BLOCK_SIZE } fn ideal_buffer_size(&self) -> usize { BLOCK_SIZE }
@ -276,25 +423,24 @@ impl<'a> PartReadStream for WiiPartReadStream<'a> {
#[inline(always)] #[inline(always)]
fn as_digest(slice: &[u8; 20]) -> digest::Output<Sha1> { (*slice).into() } fn as_digest(slice: &[u8; 20]) -> digest::Output<Sha1> { (*slice).into() }
fn decrypt_block(part: &mut WiiPartReadStream, cluster: usize) -> io::Result<()> { fn decrypt_block(part: &mut WiiPartReadStream, cluster: u32) -> io::Result<()> {
part.stream.read_exact(&mut part.buf)?; part.stream.read_exact(&mut part.buf)?;
if part.crypto.is_some() { if let Some(key) = &part.crypto {
// Fetch IV before decrypting header // Fetch IV before decrypting header
let iv = Block::from(*array_ref![part.buf, 0x3d0, 16]); let iv_bytes = array_ref![part.buf, 0x3d0, 16];
let iv = Block::from(*iv_bytes);
// Don't need to decrypt header if we're not validating hashes // Don't need to decrypt header if we're not validating hashes
if part.validate_hashes { if part.validate_hashes {
Aes128Cbc::new(part.crypto.as_ref().unwrap().clone(), &Block::from([0; 16])) Aes128Cbc::new(key.into(), &Block::from([0; 16]))
.decrypt(&mut part.buf[..BUFFER_OFFSET]) .decrypt_padded_mut::<NoPadding>(&mut part.buf[..HASHES_SIZE])
.expect("Failed to decrypt header"); .expect("Failed to decrypt header");
} }
Aes128Cbc::new(part.crypto.as_ref().unwrap().clone(), &iv) Aes128Cbc::new(key.into(), &iv)
.decrypt(&mut part.buf[BUFFER_OFFSET..]) .decrypt_padded_mut::<NoPadding>(&mut part.buf[HASHES_SIZE..])
.expect("Failed to decrypt block"); .expect("Failed to decrypt block");
} }
if part.validate_hashes && part.crypto.is_some() if part.validate_hashes {
/* FIXME NFS validation? */ let (mut group, sub_group) = div_rem(cluster as usize, 8);
{
let (mut group, sub_group) = div_rem(cluster, 8);
group %= 8; group %= 8;
// H0 hashes // H0 hashes
for i in 0..31 { for i in 0..31 {
@ -303,12 +449,7 @@ fn decrypt_block(part: &mut WiiPartReadStream, cluster: usize) -> io::Result<()>
let expected = as_digest(array_ref![part.buf, i * 20, 20]); let expected = as_digest(array_ref![part.buf, i * 20, 20]);
let output = hash.finalize(); let output = hash.finalize();
if output != expected { if output != expected {
panic!( panic!("Invalid H0 hash! (block {:?}) {:x}\n\texpected {:x}", i, output, expected);
"Invalid hash! (block {:?}) {:?}\n\texpected {:?}",
i,
output.as_slice(),
expected
);
} }
} }
// H1 hash // H1 hash
@ -319,10 +460,8 @@ fn decrypt_block(part: &mut WiiPartReadStream, cluster: usize) -> io::Result<()>
let output = hash.finalize(); let output = hash.finalize();
if output != expected { if output != expected {
panic!( panic!(
"Invalid hash! (subgroup {:?}) {:?}\n\texpected {:?}", "Invalid H1 hash! (subgroup {:?}) {:x}\n\texpected {:x}",
sub_group, sub_group, output, expected
output.as_slice(),
expected
); );
} }
} }
@ -334,27 +473,28 @@ fn decrypt_block(part: &mut WiiPartReadStream, cluster: usize) -> io::Result<()>
let output = hash.finalize(); let output = hash.finalize();
if output != expected { if output != expected {
panic!( panic!(
"Invalid hash! (group {:?}) {:?}\n\texpected {:?}", "Invalid H2 hash! (group {:?}) {:x}\n\texpected {:x}",
group, group, output, expected
output.as_slice(),
expected
); );
} }
} }
} }
io::Result::Ok(()) Ok(())
} }
impl<'a> Read for WiiPartReadStream<'a> { impl<'a> Read for WiiPartReadStream<'a> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let (mut block, mut block_offset) = div_rem(self.offset as usize, BLOCK_SIZE); let (block, block_offset) = div_rem(self.offset, BLOCK_SIZE as u64);
let mut block = block as u32;
let mut block_offset = block_offset as usize;
let mut rem = buf.len(); let mut rem = buf.len();
let mut read: usize = 0; let mut read: usize = 0;
while rem > 0 { while rem > 0 {
if block != self.cur_block as usize { if block != self.cur_block {
decrypt_block(self, block)?; decrypt_block(self, block)?;
self.cur_block = block as u64; self.cur_block = block;
} }
let mut cache_size = rem; let mut cache_size = rem;
@ -363,7 +503,7 @@ impl<'a> Read for WiiPartReadStream<'a> {
} }
buf[read..read + cache_size].copy_from_slice( buf[read..read + cache_size].copy_from_slice(
&self.buf[BUFFER_OFFSET + block_offset..BUFFER_OFFSET + block_offset + cache_size], &self.buf[HASHES_SIZE + block_offset..HASHES_SIZE + block_offset + cache_size],
); );
read += cache_size; read += cache_size;
rem -= cache_size; rem -= cache_size;
@ -372,13 +512,13 @@ impl<'a> Read for WiiPartReadStream<'a> {
} }
self.offset += buf.len() as u64; self.offset += buf.len() as u64;
io::Result::Ok(buf.len()) Ok(buf.len())
} }
} }
#[inline(always)] #[inline(always)]
fn to_block_size(v: u64) -> u64 { fn to_block_size(v: u64) -> u64 {
(v / BUFFER_SIZE as u64) * BLOCK_SIZE as u64 + (v % BUFFER_SIZE as u64) (v / SECTOR_SIZE as u64) * BLOCK_SIZE as u64 + (v % SECTOR_SIZE as u64)
} }
impl<'a> Seek for WiiPartReadStream<'a> { impl<'a> Seek for WiiPartReadStream<'a> {
@ -388,31 +528,33 @@ impl<'a> Seek for WiiPartReadStream<'a> {
SeekFrom::End(v) => (self.stable_stream_len()? as i64 + v) as u64, SeekFrom::End(v) => (self.stable_stream_len()? as i64 + v) as u64,
SeekFrom::Current(v) => (self.offset as i64 + v) as u64, SeekFrom::Current(v) => (self.offset as i64 + v) as u64,
}; };
let block = self.offset / BLOCK_SIZE as u64; let block = (self.offset / BLOCK_SIZE as u64) as u32;
if block != self.cur_block { if block != self.cur_block {
self.stream.seek(SeekFrom::Start(block * BUFFER_SIZE as u64))?; self.stream.seek(SeekFrom::Start(block as u64 * SECTOR_SIZE as u64))?;
self.cur_block = u64::MAX; self.cur_block = u32::MAX;
} }
io::Result::Ok(self.offset) Ok(self.offset)
} }
fn stream_position(&mut self) -> io::Result<u64> { io::Result::Ok(self.offset) } fn stream_position(&mut self) -> io::Result<u64> { Ok(self.offset) }
} }
impl<'a> ReadStream for WiiPartReadStream<'a> { impl<'a> ReadStream for WiiPartReadStream<'a> {
fn stable_stream_len(&mut self) -> io::Result<u64> { fn stable_stream_len(&mut self) -> io::Result<u64> {
io::Result::Ok(to_block_size(self.stream.stable_stream_len()?)) Ok(to_block_size(self.stream.stable_stream_len()?))
} }
fn as_dyn(&mut self) -> &mut dyn ReadStream { self } fn as_dyn(&mut self) -> &mut dyn ReadStream { self }
} }
#[derive(Clone, Debug, PartialEq, BinRead)] #[derive(Clone, Debug, PartialEq)]
pub(crate) struct WiiPartition { pub(crate) struct WiiPartition {
header: Header, header: Header,
bi2_header: BI2Header, // #[br(seek_before = SeekFrom::Start(0x400))]
#[br(seek_before = SeekFrom::Start((header.fst_off as u64) << 2))] part_header: PartitionHeader,
#[br(parse_with = node_parser)] // bi2_header: BI2Header,
// #[br(seek_before = SeekFrom::Start((part_header.fst_off as u64) << 2))]
// #[br(parse_with = node_parser)]
root_node: NodeType, root_node: NodeType,
} }
@ -420,4 +562,22 @@ impl PartHeader for WiiPartition {
fn root_node(&self) -> &NodeType { &self.root_node } fn root_node(&self) -> &NodeType { &self.root_node }
fn find_node(&self, path: &str) -> Option<&NodeType> { find_node(&self.root_node, path) } fn find_node(&self, path: &str) -> Option<&NodeType> { find_node(&self.root_node, path) }
fn boot_bytes(&self) -> &[u8] { todo!() }
fn bi2_bytes(&self) -> &[u8] { todo!() }
fn apploader_bytes(&self) -> &[u8] { todo!() }
fn fst_bytes(&self) -> &[u8] { todo!() }
fn dol_bytes(&self) -> &[u8] { todo!() }
fn disc_header(&self) -> &Header { todo!() }
fn partition_header(&self) -> &PartitionHeader { todo!() }
fn apploader_header(&self) -> &AppLoaderHeader { todo!() }
fn dol_header(&self) -> &DolHeader { todo!() }
} }

View File

@ -1,10 +1,18 @@
//! Disc file system types //! Disc file system types
use std::io::{Read, Seek, SeekFrom}; use std::{
ffi::CString,
io,
io::{Read, Seek, SeekFrom},
};
use binrw::{binread, BinReaderExt, BinResult, NullString, ReadOptions};
use encoding_rs::SHIFT_JIS; use encoding_rs::SHIFT_JIS;
use crate::{
util::reader::{struct_size, FromReader, DYNAMIC_SIZE, U24},
Result, ResultContext,
};
/// File system node kind. /// File system node kind.
#[derive(Clone, Debug, PartialEq)] #[derive(Clone, Debug, PartialEq)]
pub enum NodeKind { pub enum NodeKind {
@ -14,17 +22,30 @@ pub enum NodeKind {
Directory, Directory,
} }
impl FromReader for NodeKind {
type Args<'a> = ();
const STATIC_SIZE: usize = 1;
fn from_reader_args<R>(_reader: &mut R, _args: Self::Args<'_>) -> io::Result<Self>
where R: Read + ?Sized {
match u8::from_reader(_reader)? {
0 => Ok(NodeKind::File),
1 => Ok(NodeKind::Directory),
_ => Err(io::Error::new(io::ErrorKind::InvalidData, "invalid node kind")),
}
}
}
/// An individual file system node. /// An individual file system node.
#[binread]
#[derive(Clone, Debug, PartialEq)] #[derive(Clone, Debug, PartialEq)]
pub struct Node { pub struct Node {
#[br(temp)]
type_and_name_offset: u32,
/// File system node type. /// File system node type.
#[br(calc = if (type_and_name_offset >> 24) != 0 { NodeKind::Directory } else { NodeKind::File })]
pub kind: NodeKind, pub kind: NodeKind,
/// Offset in the string table to the filename.
pub name_offset: u32,
/// For files, this is the partition offset of the file data. (Wii: >> 2) /// For files, this is the partition offset of the file data. (Wii: >> 2)
/// ///
/// For directories, this is the children start offset in the FST. /// For directories, this is the children start offset in the FST.
@ -37,11 +58,28 @@ pub struct Node {
/// Number of child files and directories recursively is `length - offset`. /// Number of child files and directories recursively is `length - offset`.
pub length: u32, pub length: u32,
#[br(calc = type_and_name_offset & 0xffffff)]
name_offset: u32,
#[br(ignore)]
/// The node name. /// The node name.
pub name: Box<str>, pub name: String,
}
impl FromReader for Node {
type Args<'a> = ();
const STATIC_SIZE: usize = struct_size([
NodeKind::STATIC_SIZE, // type
U24::STATIC_SIZE, // name_offset
u32::STATIC_SIZE, // offset
u32::STATIC_SIZE, // length
]);
fn from_reader_args<R>(reader: &mut R, _args: Self::Args<'_>) -> io::Result<Self>
where R: Read + ?Sized {
let kind = NodeKind::from_reader(reader)?;
let name_offset = U24::from_reader(reader)?.0;
let offset = u32::from_reader(reader)?;
let length = u32::from_reader(reader)?;
Ok(Node { kind, offset, length, name_offset, name: Default::default() })
}
} }
/// Contains a file system node, and if a directory, its children. /// Contains a file system node, and if a directory, its children.
@ -53,74 +91,77 @@ pub enum NodeType {
Directory(Node, Vec<NodeType>), Directory(Node, Vec<NodeType>),
} }
fn read_node<R: Read + Seek>(reader: &mut R, ro: &ReadOptions, i: &mut u32) -> BinResult<NodeType> { impl FromReader for NodeType {
let node = reader.read_type::<Node>(ro.endian())?; type Args<'a> = &'a mut u32;
*i += 1;
BinResult::Ok(if node.kind == NodeKind::Directory { const STATIC_SIZE: usize = DYNAMIC_SIZE;
let mut children: Vec<NodeType> = Vec::new();
children.reserve((node.length - *i) as usize); fn from_reader_args<R>(reader: &mut R, idx: &mut u32) -> io::Result<Self>
while *i < node.length { where R: Read + ?Sized {
children.push(read_node(reader, ro, i)?); let node = Node::from_reader(reader)?;
*idx += 1;
Ok(if node.kind == NodeKind::Directory {
let mut children = Vec::with_capacity((node.length - *idx) as usize);
while *idx < node.length {
children.push(NodeType::from_reader_args(reader, idx)?);
} }
NodeType::Directory(node, children) NodeType::Directory(node, children)
} else { } else {
NodeType::File(node) NodeType::File(node)
}) })
}
} }
fn read_node_name<R: Read + Seek>( fn read_node_name<R>(
reader: &mut R, reader: &mut R,
ro: &ReadOptions, string_base: u64,
base: u64,
node: &mut NodeType, node: &mut NodeType,
root: bool, root: bool,
) -> BinResult<()> { ) -> io::Result<()>
let mut decode_name = |v: &mut Node| -> BinResult<()> { where
R: Read + Seek + ?Sized,
{
let mut decode_name = |v: &mut Node| -> io::Result<()> {
if !root { if !root {
let offset = base + v.name_offset as u64; let offset = string_base + v.name_offset as u64;
reader.seek(SeekFrom::Start(offset))?; reader.seek(SeekFrom::Start(offset))?;
let null_string = reader.read_type::<NullString>(ro.endian())?;
let (res, _, errors) = SHIFT_JIS.decode(&*null_string.0); let c_string = CString::from_reader(reader)?;
let (decoded, _, errors) = SHIFT_JIS.decode(c_string.as_bytes());
if errors { if errors {
return BinResult::Err(binrw::Error::Custom { return Err(io::Error::new(io::ErrorKind::InvalidData, "invalid shift-jis"));
pos: offset,
err: Box::new("Failed to decode node name"),
});
} }
v.name = res.into(); v.name = decoded.into_owned();
} }
BinResult::Ok(()) Ok(())
}; };
match node { match node {
NodeType::File(v) => { NodeType::File(inner) => {
decode_name(v)?; decode_name(inner)?;
} }
NodeType::Directory(v, c) => { NodeType::Directory(inner, children) => {
decode_name(v)?; decode_name(inner)?;
for x in c { for child in children {
read_node_name(reader, ro, base, x, false)?; read_node_name(reader, string_base, child, false)?;
} }
} }
} }
BinResult::Ok(()) Ok(())
} }
pub(crate) fn node_parser<R: Read + Seek>( pub(crate) fn read_fst<R>(reader: &mut R) -> Result<NodeType>
reader: &mut R, where R: Read + Seek + ?Sized {
ro: &ReadOptions, let mut node = NodeType::from_reader_args(reader, &mut 0).context("Parsing FST nodes")?;
_: (), let string_base = reader.stream_position().context("Reading FST end position")?;
) -> BinResult<NodeType> { read_node_name(reader, string_base, &mut node, true).context("Reading FST node names")?;
let mut node = read_node(reader, ro, &mut 0)?; Ok(node)
let base = reader.stream_position()?;
read_node_name(reader, ro, base, &mut node, true)?;
BinResult::Ok(node)
} }
fn matches_name(node: &NodeType, name: &str) -> bool { fn matches_name(node: &NodeType, name: &str) -> bool {
match node { match node {
NodeType::File(v) => v.name.as_ref().eq_ignore_ascii_case(name), NodeType::File(v) => v.name.as_str().eq_ignore_ascii_case(name),
NodeType::Directory(v, _) => { NodeType::Directory(v, _) => {
v.name.is_empty() /* root */ || v.name.as_ref().eq_ignore_ascii_case(name) v.name.is_empty() /* root */ || v.name.as_str().eq_ignore_ascii_case(name)
} }
} }
} }
@ -132,7 +173,7 @@ pub(crate) fn find_node<'a>(mut node: &'a NodeType, path: &str) -> Option<&'a No
if matches_name(node, current.unwrap()) { if matches_name(node, current.unwrap()) {
match node { match node {
NodeType::File(_) => { NodeType::File(_) => {
return if split.next().is_none() { Option::Some(node) } else { Option::None }; return if split.next().is_none() { Some(node) } else { None };
} }
NodeType::Directory(v, c) => { NodeType::Directory(v, c) => {
// Find child // Find child
@ -140,11 +181,7 @@ pub(crate) fn find_node<'a>(mut node: &'a NodeType, path: &str) -> Option<&'a No
current = split.next(); current = split.next();
} }
if current.is_none() || current.unwrap().is_empty() { if current.is_none() || current.unwrap().is_empty() {
return if split.next().is_none() { return if split.next().is_none() { Some(node) } else { None };
Option::Some(node)
} else {
Option::None
};
} }
for x in c { for x in c {
if matches_name(x, current.unwrap()) { if matches_name(x, current.unwrap()) {
@ -158,5 +195,5 @@ pub(crate) fn find_node<'a>(mut node: &'a NodeType, path: &str) -> Option<&'a No
break; break;
} }
} }
Option::None None
} }

View File

@ -13,7 +13,7 @@ pub(crate) struct DiscIOISO {
impl DiscIOISO { impl DiscIOISO {
pub(crate) fn new(filename: &Path) -> Result<DiscIOISO> { pub(crate) fn new(filename: &Path) -> Result<DiscIOISO> {
Result::Ok(DiscIOISO { filename: filename.to_owned() }) Ok(DiscIOISO { filename: filename.to_owned() })
} }
} }
@ -21,21 +21,25 @@ impl DiscIO for DiscIOISO {
fn begin_read_stream(&mut self, offset: u64) -> io::Result<Box<dyn ReadStream>> { fn begin_read_stream(&mut self, offset: u64) -> io::Result<Box<dyn ReadStream>> {
let mut file = File::open(&*self.filename)?; let mut file = File::open(&*self.filename)?;
file.seek(SeekFrom::Start(offset))?; file.seek(SeekFrom::Start(offset))?;
io::Result::Ok(Box::from(file)) Ok(Box::from(file))
} }
} }
pub(crate) struct DiscIOISOStream<T: ReadStream + Sized> { pub(crate) struct DiscIOISOStream<T>
where T: ReadStream + Sized
{
pub(crate) stream: T, pub(crate) stream: T,
} }
impl<T: ReadStream + Sized> DiscIOISOStream<T> { impl<T> DiscIOISOStream<T>
pub(crate) fn new(stream: T) -> Result<DiscIOISOStream<T>> { where T: ReadStream + Sized
Result::Ok(DiscIOISOStream { stream }) {
} pub(crate) fn new(stream: T) -> Result<DiscIOISOStream<T>> { Ok(DiscIOISOStream { stream }) }
} }
impl<T: ReadStream + Sized + Send + Sync> DiscIO for DiscIOISOStream<T> { impl<T> DiscIO for DiscIOISOStream<T>
where T: ReadStream + Sized + Send + Sync
{
fn begin_read_stream<'a>(&'a mut self, offset: u64) -> io::Result<Box<dyn ReadStream + 'a>> { fn begin_read_stream<'a>(&'a mut self, offset: u64) -> io::Result<Box<dyn ReadStream + 'a>> {
let size = self.stream.stable_stream_len()?; let size = self.stream.stable_stream_len()?;
let mut stream = self.stream.new_window(0, size)?; let mut stream = self.stream.new_window(0, size)?;

View File

@ -6,6 +6,7 @@ use crate::{
io::{ io::{
iso::{DiscIOISO, DiscIOISOStream}, iso::{DiscIOISO, DiscIOISOStream},
nfs::DiscIONFS, nfs::DiscIONFS,
wia::DiscIOWIA,
}, },
streams::{ByteReadStream, ReadStream}, streams::{ByteReadStream, ReadStream},
Error, Result, Error, Result,
@ -13,6 +14,13 @@ use crate::{
pub(crate) mod iso; pub(crate) mod iso;
pub(crate) mod nfs; pub(crate) mod nfs;
pub(crate) mod wia;
#[derive(Default, Debug, Clone)]
pub struct DiscIOOptions {
/// Rebuild hashes for the disc image.
pub rebuild_hashes: bool,
}
/// Abstraction over supported disc file types. /// Abstraction over supported disc file types.
pub trait DiscIO: Send + Sync { pub trait DiscIO: Send + Sync {
@ -30,43 +38,40 @@ pub trait DiscIO: Send + Sync {
/// ///
/// Basic usage: /// Basic usage:
/// ```no_run /// ```no_run
/// use nod::io::new_disc_io; /// use nod::io::{new_disc_io, DiscIOOptions};
/// ///
/// let mut disc_io = new_disc_io("path/to/file".as_ref())?; /// # fn main() -> nod::Result<()> {
/// # Ok::<(), nod::Error>(()) /// let options = DiscIOOptions::default();
/// let mut disc_io = new_disc_io("path/to/file.iso".as_ref(), &options)?;
/// # Ok(())
/// # }
/// ``` /// ```
pub fn new_disc_io(filename: &Path) -> Result<Box<dyn DiscIO>> { pub fn new_disc_io(filename: &Path, options: &DiscIOOptions) -> Result<Box<dyn DiscIO>> {
let path_result = fs::canonicalize(filename); let path_result = fs::canonicalize(filename);
if let Err(err) = path_result { if let Err(err) = path_result {
return Result::Err(Error::Io( return Err(Error::Io(format!("Failed to open {}", filename.display()), err));
format!("Failed to open {}", filename.to_string_lossy()),
err,
));
} }
let path = path_result.as_ref().unwrap(); let path = path_result.as_ref().unwrap();
let meta = fs::metadata(path); let meta = fs::metadata(path);
if let Err(err) = meta { if let Err(err) = meta {
return Result::Err(Error::Io( return Err(Error::Io(format!("Failed to open {}", filename.display()), err));
format!("Failed to open {}", filename.to_string_lossy()),
err,
));
} }
if !meta.unwrap().is_file() { if !meta.unwrap().is_file() {
return Result::Err(Error::DiscFormat(format!( return Err(Error::DiscFormat(format!("Input is not a file: {}", filename.display())));
"Input is not a file: {}",
filename.to_string_lossy()
)));
} }
if has_extension(path, "iso") { if has_extension(path, "iso") {
Result::Ok(Box::from(DiscIOISO::new(path)?)) Ok(Box::from(DiscIOISO::new(path)?))
} else if has_extension(path, "nfs") { } else if has_extension(path, "nfs") {
if matches!(path.parent(), Some(parent) if parent.is_dir()) { match path.parent() {
Result::Ok(Box::from(DiscIONFS::new(path.parent().unwrap())?)) Some(parent) if parent.is_dir() => {
} else { Ok(Box::from(DiscIONFS::new(path.parent().unwrap())?))
Result::Err(Error::DiscFormat("Failed to locate NFS parent directory".to_string()))
} }
_ => Err(Error::DiscFormat("Failed to locate NFS parent directory".to_string())),
}
} else if has_extension(path, "wia") || has_extension(path, "rvz") {
Ok(Box::from(DiscIOWIA::new(path, options)?))
} else { } else {
Result::Err(Error::DiscFormat("Unknown file type".to_string())) Err(Error::DiscFormat("Unknown file type".to_string()))
} }
} }
@ -78,9 +83,11 @@ pub fn new_disc_io(filename: &Path) -> Result<Box<dyn DiscIO>> {
/// ```no_run /// ```no_run
/// use nod::io::new_disc_io_from_buf; /// use nod::io::new_disc_io_from_buf;
/// ///
/// # #[allow(non_upper_case_globals)] const buf: [u8; 0] = []; /// # fn main() -> nod::Result<()> {
/// let mut disc_io = new_disc_io_from_buf(&buf)?; /// # #[allow(non_upper_case_globals)] const buf: &[u8] = &[0u8; 0];
/// # Ok::<(), nod::Error>(()) /// let mut disc_io = new_disc_io_from_buf(buf)?;
/// # Ok(())
/// # }
/// ``` /// ```
pub fn new_disc_io_from_buf(buf: &[u8]) -> Result<Box<dyn DiscIO + '_>> { pub fn new_disc_io_from_buf(buf: &[u8]) -> Result<Box<dyn DiscIO + '_>> {
new_disc_io_from_stream(ByteReadStream { bytes: buf, position: 0 }) new_disc_io_from_stream(ByteReadStream { bytes: buf, position: 0 })
@ -92,11 +99,15 @@ pub fn new_disc_io_from_buf(buf: &[u8]) -> Result<Box<dyn DiscIO + '_>> {
/// ///
/// Basic usage: /// Basic usage:
/// ```no_run /// ```no_run
/// use nod::io::new_disc_io_from_buf; /// use nod::io::new_disc_io_from_stream;
/// use nod::streams::ByteReadStream;
/// ///
/// # #[allow(non_upper_case_globals)] const buf: [u8; 0] = []; /// # fn main() -> nod::Result<()> {
/// let mut disc_io = new_disc_io_from_buf(&buf)?; /// # #[allow(non_upper_case_globals)] const buf: &[u8] = &[0u8; 0];
/// # Ok::<(), nod::Error>(()) /// let stream = ByteReadStream { bytes: buf, position: 0 };
/// let mut disc_io = new_disc_io_from_stream(stream)?;
/// # Ok(())
/// # }
/// ``` /// ```
pub fn new_disc_io_from_stream<'a, T: 'a + ReadStream + Sized + Send + Sync>( pub fn new_disc_io_from_stream<'a, T: 'a + ReadStream + Sized + Send + Sync>(
stream: T, stream: T,
@ -107,11 +118,8 @@ pub fn new_disc_io_from_stream<'a, T: 'a + ReadStream + Sized + Send + Sync>(
/// Helper function for checking a file extension. /// Helper function for checking a file extension.
#[inline(always)] #[inline(always)]
pub fn has_extension(filename: &Path, extension: &str) -> bool { pub fn has_extension(filename: &Path, extension: &str) -> bool {
if let Some(ext) = filename.extension() { match filename.extension() {
// TODO use with Rust 1.53+ Some(ext) => ext.eq_ignore_ascii_case(extension),
// ext.eq_ignore_ascii_case(extension) None => false,
ext.to_str().unwrap_or("").eq_ignore_ascii_case(extension)
} else {
false
} }
} }

View File

@ -1,36 +1,87 @@
use std::{ use std::{
fs::File, fs::File,
io, io,
io::{Read, Seek, SeekFrom}, io::{BufReader, Read, Seek, SeekFrom},
path::{Component, Path, PathBuf}, path::{Component, Path, PathBuf},
}; };
use aes::{Aes128, NewBlockCipher}; use aes::{
use binrw::{binread, BinRead, BinReaderExt}; cipher::{block_padding::NoPadding, BlockDecryptMut, KeyIvInit},
use block_modes::{block_padding::NoPadding, BlockMode, Cbc}; Aes128,
};
use crate::{disc::BUFFER_SIZE, io::DiscIO, streams::ReadStream, Error, Result}; use crate::{
disc::SECTOR_SIZE,
io::DiscIO,
streams::ReadStream,
util::reader::{read_vec, struct_size, FromReader},
Error, Result, ResultContext,
};
type Aes128Cbc = Cbc<Aes128, NoPadding>; type Aes128Cbc = cbc::Decryptor<Aes128>;
#[derive(Clone, Debug, PartialEq, BinRead)] #[derive(Clone, Debug, PartialEq)]
pub(crate) struct LBARange { pub(crate) struct LBARange {
pub(crate) start_block: u32, pub(crate) start_block: u32,
pub(crate) num_blocks: u32, pub(crate) num_blocks: u32,
} }
#[binread] impl FromReader for LBARange {
type Args<'a> = ();
const STATIC_SIZE: usize = struct_size([
u32::STATIC_SIZE, // start_block
u32::STATIC_SIZE, // num_blocks
]);
fn from_reader_args<R>(reader: &mut R, _args: Self::Args<'_>) -> io::Result<Self>
where R: Read + ?Sized {
Ok(LBARange {
start_block: u32::from_reader(reader)?,
num_blocks: u32::from_reader(reader)?,
})
}
}
type MagicBytes = [u8; 4];
#[derive(Clone, Debug, PartialEq)] #[derive(Clone, Debug, PartialEq)]
#[br(magic = b"EGGS", assert(end_magic == * b"SGGE"))]
pub(crate) struct NFSHeader { pub(crate) struct NFSHeader {
pub(crate) version: u32, pub(crate) version: u32,
pub(crate) unk1: u32, pub(crate) unk1: u32,
pub(crate) unk2: u32, pub(crate) unk2: u32,
pub(crate) lba_range_count: u32,
#[br(count = 61)]
pub(crate) lba_ranges: Vec<LBARange>, pub(crate) lba_ranges: Vec<LBARange>,
#[br(temp)] }
pub(crate) end_magic: [u8; 4],
impl FromReader for NFSHeader {
type Args<'a> = ();
const STATIC_SIZE: usize = struct_size([
MagicBytes::STATIC_SIZE, // magic
u32::STATIC_SIZE, // version
u32::STATIC_SIZE, // unk1
u32::STATIC_SIZE, // unk2
u32::STATIC_SIZE, // lba_range_count
LBARange::STATIC_SIZE * 61, // lba_ranges
MagicBytes::STATIC_SIZE, // end_magic
]);
fn from_reader_args<R>(reader: &mut R, _args: Self::Args<'_>) -> io::Result<Self>
where R: Read + ?Sized {
if MagicBytes::from_reader(reader)? != *b"EGGS" {
return Err(io::Error::new(io::ErrorKind::InvalidData, "Invalid NFS magic"));
}
let version = u32::from_reader(reader)?;
let unk1 = u32::from_reader(reader)?;
let unk2 = u32::from_reader(reader)?;
let lba_range_count = u32::from_reader(reader)?;
let mut lba_ranges = read_vec(reader, 61)?;
lba_ranges.truncate(lba_range_count as usize);
if MagicBytes::from_reader(reader)? != *b"SGGE" {
return Err(io::Error::new(io::ErrorKind::InvalidData, "Invalid NFS end magic"));
}
Ok(NFSHeader { version, unk1, unk2, lba_ranges })
}
} }
#[derive(Clone, Copy, Debug, PartialEq)] #[derive(Clone, Copy, Debug, PartialEq)]
@ -49,11 +100,8 @@ impl Default for Fbo {
impl NFSHeader { impl NFSHeader {
pub(crate) fn calculate_num_files(&self) -> u32 { pub(crate) fn calculate_num_files(&self) -> u32 {
let total_block_count = self let total_block_count =
.lba_ranges self.lba_ranges.iter().fold(0u32, |acc, range| acc + range.num_blocks);
.iter()
.take(self.lba_range_count as usize)
.fold(0u32, |acc, range| acc + range.num_blocks);
(((total_block_count as u64) * 0x8000u64 + (0x200u64 + 0xF9FFFFFu64)) / 0xFA00000u64) as u32 (((total_block_count as u64) * 0x8000u64 + (0x200u64 + 0xF9FFFFFu64)) / 0xFA00000u64) as u32
} }
@ -62,7 +110,7 @@ impl NFSHeader {
let block_off = (offset % 0x8000) as u32; let block_off = (offset % 0x8000) as u32;
let mut block = u32::MAX; let mut block = u32::MAX;
let mut physical_block = 0u32; let mut physical_block = 0u32;
for range in self.lba_ranges.iter().take(self.lba_range_count as usize) { for range in self.lba_ranges.iter() {
if block_div >= range.start_block && block_div - range.start_block < range.num_blocks { if block_div >= range.start_block && block_div - range.start_block < range.num_blocks {
block = physical_block + (block_div - range.start_block); block = physical_block + (block_div - range.start_block);
break; break;
@ -85,17 +133,16 @@ pub(crate) struct DiscIONFS {
impl DiscIONFS { impl DiscIONFS {
pub(crate) fn new(directory: &Path) -> Result<DiscIONFS> { pub(crate) fn new(directory: &Path) -> Result<DiscIONFS> {
let mut disc_io = let mut disc_io = DiscIONFS { directory: directory.to_owned(), key: [0; 16], header: None };
DiscIONFS { directory: directory.to_owned(), key: [0; 16], header: Option::None };
disc_io.validate_files()?; disc_io.validate_files()?;
Result::Ok(disc_io) Ok(disc_io)
} }
} }
pub(crate) struct NFSReadStream<'a> { pub(crate) struct NFSReadStream<'a> {
disc_io: &'a DiscIONFS, disc_io: &'a DiscIONFS,
file: Option<File>, file: Option<File>,
crypto: Aes128, crypto: [u8; 16],
// Physical address - all UINT32_MAX indicates logical zero block // Physical address - all UINT32_MAX indicates logical zero block
phys_addr: Fbo, phys_addr: Fbo,
// Logical address // Logical address
@ -104,18 +151,21 @@ pub(crate) struct NFSReadStream<'a> {
// Block is typically one ahead of the presently decrypted block. // Block is typically one ahead of the presently decrypted block.
cur_file: u32, cur_file: u32,
cur_block: u32, cur_block: u32,
buf: [u8; BUFFER_SIZE], buf: [u8; SECTOR_SIZE],
} }
impl<'a> NFSReadStream<'a> { impl<'a> NFSReadStream<'a> {
fn set_cur_file(&mut self, cur_file: u32) -> Result<()> { fn set_cur_file(&mut self, cur_file: u32) -> Result<()> {
if cur_file >= self.disc_io.header.as_ref().unwrap().calculate_num_files() { if cur_file >= self.disc_io.header.as_ref().unwrap().calculate_num_files() {
return Result::Err(Error::DiscFormat("Out of bounds NFS file access".to_string())); return Err(Error::DiscFormat(format!("Out of bounds NFS file access: {}", cur_file)));
} }
self.cur_file = cur_file; self.cur_file = cur_file;
self.cur_block = u32::MAX; self.cur_block = u32::MAX;
self.file = Option::from(File::open(self.disc_io.get_nfs(cur_file)?)?); let path = self.disc_io.get_nfs(cur_file)?;
Result::Ok(()) self.file = Option::from(
File::open(&path).with_context(|| format!("Opening file {}", path.display()))?,
);
Ok(())
} }
fn set_cur_block(&mut self, cur_block: u32) -> io::Result<()> { fn set_cur_block(&mut self, cur_block: u32) -> io::Result<()> {
@ -123,22 +173,22 @@ impl<'a> NFSReadStream<'a> {
self.file self.file
.as_ref() .as_ref()
.unwrap() .unwrap()
.seek(SeekFrom::Start(self.cur_block as u64 * BUFFER_SIZE as u64 + 0x200u64))?; .seek(SeekFrom::Start(self.cur_block as u64 * SECTOR_SIZE as u64 + 0x200u64))?;
io::Result::Ok(()) Ok(())
} }
fn set_phys_addr(&mut self, phys_addr: Fbo) -> Result<()> { fn set_phys_addr(&mut self, phys_addr: Fbo) -> Result<()> {
// If we're just changing the offset, nothing else needs to be done // If we're just changing the offset, nothing else needs to be done
if self.phys_addr.file == phys_addr.file && self.phys_addr.block == phys_addr.block { if self.phys_addr.file == phys_addr.file && self.phys_addr.block == phys_addr.block {
self.phys_addr.offset = phys_addr.offset; self.phys_addr.offset = phys_addr.offset;
return Result::Ok(()); return Ok(());
} }
self.phys_addr = phys_addr; self.phys_addr = phys_addr;
// Set logical zero block // Set logical zero block
if phys_addr.file == u32::MAX { if phys_addr.file == u32::MAX {
self.buf.fill(0u8); self.buf.fill(0u8);
return Result::Ok(()); return Ok(());
} }
// Make necessary file and block current with system // Make necessary file and block current with system
@ -146,17 +196,30 @@ impl<'a> NFSReadStream<'a> {
self.set_cur_file(phys_addr.file)?; self.set_cur_file(phys_addr.file)?;
} }
if phys_addr.block != self.cur_block { if phys_addr.block != self.cur_block {
self.set_cur_block(phys_addr.block)?; self.set_cur_block(phys_addr.block)
.with_context(|| format!("Seeking to NFS block {}", phys_addr.block))?;
} }
// Read block, handling 0x200 overlap case // Read block, handling 0x200 overlap case
if phys_addr.block == 7999 { if phys_addr.block == 7999 {
self.file.as_ref().unwrap().read_exact(&mut self.buf[..BUFFER_SIZE - 0x200])?; self.file
.as_ref()
.unwrap()
.read_exact(&mut self.buf[..SECTOR_SIZE - 0x200])
.context("Reading NFS block 7999 part 1")?;
self.set_cur_file(self.cur_file + 1)?; self.set_cur_file(self.cur_file + 1)?;
self.file.as_ref().unwrap().read_exact(&mut self.buf[BUFFER_SIZE - 0x200..])?; self.file
.as_ref()
.unwrap()
.read_exact(&mut self.buf[SECTOR_SIZE - 0x200..])
.context("Reading NFS block 7999 part 2")?;
self.cur_block = 0; self.cur_block = 0;
} else { } else {
self.file.as_ref().unwrap().read_exact(&mut self.buf)?; self.file
.as_ref()
.unwrap()
.read_exact(&mut self.buf)
.with_context(|| format!("Reading NFS block {}", phys_addr.block))?;
self.cur_block += 1; self.cur_block += 1;
} }
@ -169,9 +232,10 @@ impl<'a> NFSReadStream<'a> {
((phys_addr.l_block >> 16) & 0xFF) as u8, ((phys_addr.l_block >> 16) & 0xFF) as u8,
((phys_addr.l_block >> 24) & 0xFF) as u8, ((phys_addr.l_block >> 24) & 0xFF) as u8,
]; ];
Aes128Cbc::new(self.crypto.clone(), &iv.into()).decrypt(&mut self.buf)?; Aes128Cbc::new(self.crypto.as_ref().into(), &iv.into())
.decrypt_padded_mut::<NoPadding>(&mut self.buf)?;
Result::Ok(()) Ok(())
} }
fn set_logical_addr(&mut self, addr: u64) -> Result<()> { fn set_logical_addr(&mut self, addr: u64) -> Result<()> {
@ -187,20 +251,20 @@ impl<'a> Read for NFSReadStream<'a> {
let mut read_size = rem; let mut read_size = rem;
let block_offset: usize = let block_offset: usize =
if self.phys_addr.offset == u32::MAX { 0 } else { self.phys_addr.offset as usize }; if self.phys_addr.offset == u32::MAX { 0 } else { self.phys_addr.offset as usize };
if read_size + block_offset > BUFFER_SIZE { if read_size + block_offset > SECTOR_SIZE {
read_size = BUFFER_SIZE - block_offset read_size = SECTOR_SIZE - block_offset
} }
buf[read..read + read_size] buf[read..read + read_size]
.copy_from_slice(&self.buf[block_offset..block_offset + read_size]); .copy_from_slice(&self.buf[block_offset..block_offset + read_size]);
read += read_size; read += read_size;
rem -= read_size; rem -= read_size;
self.offset += read_size as u64; self.offset += read_size as u64;
self.set_logical_addr(self.offset).map_err(|v| match v { self.set_logical_addr(self.offset).map_err(|e| match e {
Error::Io(_, v) => v, Error::Io(s, e) => io::Error::new(e.kind(), s),
_ => io::Error::from(io::ErrorKind::Other), _ => io::Error::from(io::ErrorKind::Other),
})?; })?;
} }
io::Result::Ok(read) Ok(read)
} }
} }
@ -215,10 +279,10 @@ impl<'a> Seek for NFSReadStream<'a> {
Error::Io(_, v) => v, Error::Io(_, v) => v,
_ => io::Error::from(io::ErrorKind::Other), _ => io::Error::from(io::ErrorKind::Other),
})?; })?;
io::Result::Ok(self.offset) Ok(self.offset)
} }
fn stream_position(&mut self) -> io::Result<u64> { io::Result::Ok(self.offset) } fn stream_position(&mut self) -> io::Result<u64> { Ok(self.offset) }
} }
impl<'a> ReadStream for NFSReadStream<'a> { impl<'a> ReadStream for NFSReadStream<'a> {
@ -229,15 +293,15 @@ impl<'a> ReadStream for NFSReadStream<'a> {
impl DiscIO for DiscIONFS { impl DiscIO for DiscIONFS {
fn begin_read_stream(&mut self, offset: u64) -> io::Result<Box<dyn ReadStream + '_>> { fn begin_read_stream(&mut self, offset: u64) -> io::Result<Box<dyn ReadStream + '_>> {
io::Result::Ok(Box::from(NFSReadStream { Ok(Box::from(NFSReadStream {
disc_io: self, disc_io: self,
file: Option::None, file: None,
crypto: Aes128::new(&self.key.into()), crypto: self.key,
phys_addr: Fbo::default(), phys_addr: Fbo::default(),
offset, offset,
cur_file: u32::MAX, cur_file: u32::MAX,
cur_block: u32::MAX, cur_block: u32::MAX,
buf: [0; BUFFER_SIZE], buf: [0; SECTOR_SIZE],
})) }))
} }
@ -245,7 +309,8 @@ impl DiscIO for DiscIONFS {
} }
impl DiscIONFS { impl DiscIONFS {
fn get_path<P: AsRef<Path>>(&self, path: P) -> PathBuf { fn get_path<P>(&self, path: P) -> PathBuf
where P: AsRef<Path> {
let mut buf = self.directory.clone(); let mut buf = self.directory.clone();
for component in path.as_ref().components() { for component in path.as_ref().components() {
match component { match component {
@ -261,9 +326,9 @@ impl DiscIONFS {
fn get_nfs(&self, num: u32) -> Result<PathBuf> { fn get_nfs(&self, num: u32) -> Result<PathBuf> {
let path = self.get_path(format!("hif_{:06}.nfs", num)); let path = self.get_path(format!("hif_{:06}.nfs", num));
if path.exists() { if path.exists() {
Result::Ok(path) Ok(path)
} else { } else {
Result::Err(Error::DiscFormat(format!("Failed to locate {}", path.to_string_lossy()))) Err(Error::DiscFormat(format!("Failed to locate {}", path.display())))
} }
} }
@ -278,31 +343,32 @@ impl DiscIONFS {
key_path = secondary_key_path.canonicalize(); key_path = secondary_key_path.canonicalize();
} }
if key_path.is_err() { if key_path.is_err() {
return Result::Err(Error::DiscFormat(format!( return Err(Error::DiscFormat(format!(
"Failed to locate {} or {}", "Failed to locate {} or {}",
primary_key_path.to_string_lossy(), primary_key_path.display(),
secondary_key_path.to_string_lossy() secondary_key_path.display()
))); )));
} }
let resolved_path = key_path.unwrap(); let resolved_path = key_path.unwrap();
File::open(resolved_path.as_path()) File::open(resolved_path.as_path())
.map_err(|v| { .map_err(|v| Error::Io(format!("Failed to open {}", resolved_path.display()), v))?
Error::Io(format!("Failed to open {}", resolved_path.to_string_lossy()), v)
})?
.read(&mut self.key) .read(&mut self.key)
.map_err(|v| { .map_err(|v| Error::Io(format!("Failed to read {}", resolved_path.display()), v))?;
Error::Io(format!("Failed to read {}", resolved_path.to_string_lossy()), v)
})?;
} }
{ {
// Load header from first file // Load header from first file
let header: NFSHeader = File::open(self.get_nfs(0)?)?.read_be()?; let path = self.get_nfs(0)?;
let mut file = BufReader::new(
File::open(&path).with_context(|| format!("Opening file {}", path.display()))?,
);
let header = NFSHeader::from_reader(&mut file)
.with_context(|| format!("Reading NFS header from file {}", path.display()))?;
// Ensure remaining files exist // Ensure remaining files exist
for i in 1..header.calculate_num_files() { for i in 1..header.calculate_num_files() {
self.get_nfs(i)?; self.get_nfs(i)?;
} }
self.header = Option::from(header) self.header = Option::from(header);
} }
Result::Ok(()) Ok(())
} }
} }

1541
src/io/wia.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,71 +1,93 @@
#![warn(missing_docs)] #![warn(missing_docs, rustdoc::missing_doc_code_examples)]
#![warn(rustdoc::missing_doc_code_examples)]
//! Library for traversing & reading GameCube and Wii disc images. //! Library for traversing & reading GameCube and Wii disc images.
//! //!
//! Based on the C++ library [nod](https://github.com/AxioDL/nod), //! Based on the C++ library [nod](https://github.com/AxioDL/nod),
//! but does not currently support authoring. //! but does not currently support authoring.
//! //!
//! Currently supported file formats: //! Currently supported file formats:
//! - ISO //! - ISO (GCM)
//! - WIA / RVZ
//! - WBFS
//! - NFS (Wii U VC files, e.g. `hif_000000.nfs`) //! - NFS (Wii U VC files, e.g. `hif_000000.nfs`)
//! //!
//! # Examples //! # Examples
//! //!
//! Opening a disc image and reading a file: //! Opening a disc image and reading a file:
//! ```no_run //! ```no_run
//! use nod::disc::{new_disc_base, PartHeader};
//! use nod::fst::NodeType;
//! use nod::io::new_disc_io;
//! use std::io::Read; //! use std::io::Read;
//! //!
//! let mut disc_io = new_disc_io("path/to/file".as_ref())?; //! use nod::{
//! disc::{new_disc_base, PartHeader},
//! fst::NodeType,
//! io::{new_disc_io, DiscIOOptions},
//! };
//!
//! fn main() -> nod::Result<()> {
//! let options = DiscIOOptions::default();
//! let mut disc_io = new_disc_io("path/to/file.iso".as_ref(), &options)?;
//! let disc_base = new_disc_base(disc_io.as_mut())?; //! let disc_base = new_disc_base(disc_io.as_mut())?;
//! let mut partition = disc_base.get_data_partition(disc_io.as_mut(), false)?; //! let mut partition = disc_base.get_data_partition(disc_io.as_mut(), false)?;
//! let header = partition.read_header()?; //! let header = partition.read_header()?;
//! if let Some(NodeType::File(node)) = header.find_node("/MP3/Worlds.txt") { //! if let Some(NodeType::File(node)) = header.find_node("/MP3/Worlds.txt") {
//! let mut s = String::new(); //! let mut s = String::new();
//! partition.begin_file_stream(node)?.read_to_string(&mut s); //! partition.begin_file_stream(node)?.read_to_string(&mut s).expect("Failed to read file");
//! println!("{}", s); //! println!("{}", s);
//! } //! }
//! # Ok::<(), nod::Error>(()) //! Ok(())
//! }
//! ``` //! ```
use thiserror::Error;
pub mod disc; pub mod disc;
pub mod fst; pub mod fst;
pub mod io; pub mod io;
pub mod streams; pub mod streams;
pub mod util;
/// Error types for nod. /// Error types for nod.
#[derive(Error, Debug)] #[derive(thiserror::Error, Debug)]
pub enum Error { pub enum Error {
/// An error during binary format parsing.
#[error("binary format")]
BinaryFormat(#[from] binrw::Error),
/// An error during Wii disc decryption.
#[error("encryption")]
Encryption(#[from] block_modes::BlockModeError),
/// A general I/O error.
#[error("io error: `{0}`")]
Io(String, #[source] std::io::Error),
/// An error for disc format related issues. /// An error for disc format related issues.
#[error("disc format error: `{0}`")] #[error("disc format error: {0}")]
DiscFormat(String), DiscFormat(String),
/// A general I/O error.
#[error("I/O error: {0}")]
Io(String, #[source] std::io::Error),
} }
/// Helper result type for [`enum@Error`]. /// Helper result type for [`Error`].
pub type Result<T, E = Error> = core::result::Result<T, E>; pub type Result<T, E = Error> = core::result::Result<T, E>;
impl From<std::io::Error> for Error { impl From<aes::cipher::block_padding::UnpadError> for Error {
fn from(v: std::io::Error) -> Self { Error::Io("I/O error".to_string(), v) } fn from(_: aes::cipher::block_padding::UnpadError) -> Self { unreachable!() }
} }
#[inline(always)] impl From<base16ct::Error> for Error {
pub(crate) fn div_rem<T: std::ops::Div<Output = T> + std::ops::Rem<Output = T> + Copy>( fn from(_: base16ct::Error) -> Self { unreachable!() }
x: T, }
y: T,
) -> (T, T) { pub trait ErrorContext {
let quot = x / y; fn context(self, context: impl Into<String>) -> Error;
let rem = x % y; }
(quot, rem)
impl ErrorContext for std::io::Error {
fn context(self, context: impl Into<String>) -> Error { Error::Io(context.into(), self) }
}
pub trait ResultContext<T> {
fn context(self, context: impl Into<String>) -> Result<T>;
fn with_context<F>(self, f: F) -> Result<T>
where F: FnOnce() -> String;
}
impl<T, E> ResultContext<T> for Result<T, E>
where E: ErrorContext
{
fn context(self, context: impl Into<String>) -> Result<T> {
self.map_err(|e| e.context(context))
}
fn with_context<F>(self, f: F) -> Result<T>
where F: FnOnce() -> String {
self.map_err(|e| e.context(f()))
}
} }

View File

@ -43,11 +43,7 @@ pub trait ReadStream: Read + Seek {
/// Seeks underlying stream immediately. /// Seeks underlying stream immediately.
fn new_window(&mut self, offset: u64, size: u64) -> io::Result<SharedWindowedReadStream> { fn new_window(&mut self, offset: u64, size: u64) -> io::Result<SharedWindowedReadStream> {
self.seek(SeekFrom::Start(offset))?; self.seek(SeekFrom::Start(offset))?;
io::Result::Ok(SharedWindowedReadStream { Ok(SharedWindowedReadStream { base: self.as_dyn(), begin: offset, end: offset + size })
base: self.as_dyn(),
begin: offset,
end: offset + size,
})
} }
/// Retrieves a type-erased reference to the stream. /// Retrieves a type-erased reference to the stream.
@ -91,7 +87,7 @@ pub fn wrap_windowed<'a>(
size: u64, size: u64,
) -> io::Result<OwningWindowedReadStream<'a>> { ) -> io::Result<OwningWindowedReadStream<'a>> {
base.seek(SeekFrom::Start(offset))?; base.seek(SeekFrom::Start(offset))?;
io::Result::Ok(OwningWindowedReadStream { base, begin: offset, end: offset + size }) Ok(OwningWindowedReadStream { base, begin: offset, end: offset + size })
} }
/// A non-owning window into an existing [`ReadStream`]. /// A non-owning window into an existing [`ReadStream`].
@ -110,7 +106,7 @@ impl<'a> SharedWindowedReadStream<'a> {
self.base.seek(SeekFrom::Start(begin))?; self.base.seek(SeekFrom::Start(begin))?;
self.begin = begin; self.begin = begin;
self.end = end; self.end = end;
io::Result::Ok(()) Ok(())
} }
} }
@ -137,9 +133,9 @@ fn windowed_seek(stream: &mut dyn WindowedReadStream, pos: SeekFrom) -> io::Resu
SeekFrom::Current(_) => pos, SeekFrom::Current(_) => pos,
})?; })?;
if result < begin || result > end { if result < begin || result > end {
io::Result::Err(io::Error::from(io::ErrorKind::UnexpectedEof)) Err(io::Error::from(io::ErrorKind::UnexpectedEof))
} else { } else {
io::Result::Ok(result - begin) Ok(result - begin)
} }
} }
@ -151,12 +147,12 @@ impl<'a> Seek for OwningWindowedReadStream<'a> {
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> { windowed_seek(self, pos) } fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> { windowed_seek(self, pos) }
fn stream_position(&mut self) -> io::Result<u64> { fn stream_position(&mut self) -> io::Result<u64> {
Result::Ok(self.base.stream_position()? - self.begin) Ok(self.base.stream_position()? - self.begin)
} }
} }
impl<'a> ReadStream for OwningWindowedReadStream<'a> { impl<'a> ReadStream for OwningWindowedReadStream<'a> {
fn stable_stream_len(&mut self) -> io::Result<u64> { Result::Ok(self.end - self.begin) } fn stable_stream_len(&mut self) -> io::Result<u64> { Ok(self.end - self.begin) }
fn as_dyn(&mut self) -> &mut dyn ReadStream { self } fn as_dyn(&mut self) -> &mut dyn ReadStream { self }
} }
@ -175,12 +171,12 @@ impl<'a> Seek for SharedWindowedReadStream<'a> {
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> { windowed_seek(self, pos) } fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> { windowed_seek(self, pos) }
fn stream_position(&mut self) -> io::Result<u64> { fn stream_position(&mut self) -> io::Result<u64> {
Result::Ok(self.base.stream_position()? - self.begin) Ok(self.base.stream_position()? - self.begin)
} }
} }
impl<'a> ReadStream for SharedWindowedReadStream<'a> { impl<'a> ReadStream for SharedWindowedReadStream<'a> {
fn stable_stream_len(&mut self) -> io::Result<u64> { Result::Ok(self.end - self.begin) } fn stable_stream_len(&mut self) -> io::Result<u64> { Ok(self.end - self.begin) }
fn as_dyn(&mut self) -> &mut dyn ReadStream { self } fn as_dyn(&mut self) -> &mut dyn ReadStream { self }
} }

77
src/util/lfg.rs Normal file
View File

@ -0,0 +1,77 @@
use std::{cmp::min, io, io::Read};
pub(crate) const LFG_K: usize = 521;
pub(crate) const LFG_J: usize = 32;
pub(crate) const SEED_SIZE: usize = 17;
/// Lagged Fibonacci generator for Wii partition junk data.
/// https://github.com/dolphin-emu/dolphin/blob/master/docs/WiaAndRvz.md#prng-algorithm
pub(crate) struct LaggedFibonacci {
buffer: [u32; LFG_K],
position: usize,
}
impl Default for LaggedFibonacci {
fn default() -> Self { Self { buffer: [0u32; LFG_K], position: 0 } }
}
impl LaggedFibonacci {
fn init(&mut self) {
for i in SEED_SIZE..LFG_K {
self.buffer[i] =
(self.buffer[i - 17] << 23) ^ (self.buffer[i - 16] >> 9) ^ self.buffer[i - 1];
}
for x in self.buffer.iter_mut() {
*x = ((*x & 0xFF00FFFF) | (*x >> 2 & 0x00FF0000)).swap_bytes();
}
for _ in 0..4 {
self.forward();
}
}
pub(crate) fn init_with_reader<R>(&mut self, reader: &mut R) -> io::Result<()>
where R: Read + ?Sized {
reader.read_exact(bytemuck::cast_slice_mut(&mut self.buffer[..SEED_SIZE]))?;
for x in self.buffer[..SEED_SIZE].iter_mut() {
*x = u32::from_be(*x);
}
self.position = 0;
self.init();
Ok(())
}
pub(crate) fn forward(&mut self) {
for i in 0..LFG_J {
self.buffer[i] ^= self.buffer[i + LFG_K - LFG_J];
}
for i in LFG_J..LFG_K {
self.buffer[i] ^= self.buffer[i - LFG_J];
}
}
pub(crate) fn skip(&mut self, n: usize) {
self.position += n;
while self.position >= LFG_K * 4 {
self.forward();
self.position -= LFG_K * 4;
}
}
#[inline]
fn bytes(&self) -> &[u8; LFG_K * 4] {
unsafe { &*(self.buffer.as_ptr() as *const [u8; LFG_K * 4]) }
}
pub(crate) fn fill(&mut self, mut buf: &mut [u8]) {
while !buf.is_empty() {
let len = min(buf.len(), LFG_K * 4 - self.position);
buf[..len].copy_from_slice(&self.bytes()[self.position..self.position + len]);
self.position += len;
buf = &mut buf[len..];
if self.position == LFG_K * 4 {
self.forward();
self.position = 0;
}
}
}
}

13
src/util/mod.rs Normal file
View File

@ -0,0 +1,13 @@
use std::ops::{Div, Rem};
pub(crate) mod lfg;
pub(crate) mod reader;
pub(crate) mod take_seek;
#[inline(always)]
pub(crate) fn div_rem<T>(x: T, y: T) -> (T, T)
where T: Div<Output = T> + Rem<Output = T> + Copy {
let quot = x / y;
let rem = x % y;
(quot, rem)
}

243
src/util/reader.rs Normal file
View File

@ -0,0 +1,243 @@
use std::{ffi::CString, io, io::Read};
use io::Write;
pub(crate) const DYNAMIC_SIZE: usize = 0;
pub(crate) const fn struct_size<const N: usize>(fields: [usize; N]) -> usize {
let mut result = 0;
let mut i = 0;
while i < N {
let size = fields[i];
if size == DYNAMIC_SIZE {
// Dynamically sized
return DYNAMIC_SIZE;
}
result += size;
i += 1;
}
result
}
pub(crate) fn skip_bytes<const N: usize, R>(reader: &mut R) -> io::Result<()>
where R: Read + ?Sized {
let mut buf = [0u8; N];
reader.read_exact(&mut buf)?;
Ok(())
}
pub(crate) trait FromReader: Sized {
type Args<'a>;
const STATIC_SIZE: usize;
fn from_reader_args<R>(reader: &mut R, args: Self::Args<'_>) -> io::Result<Self>
where R: Read + ?Sized;
fn from_reader<'a, R>(reader: &mut R) -> io::Result<Self>
where
R: Read + ?Sized,
Self::Args<'a>: Default,
{
Self::from_reader_args(reader, Default::default())
}
}
macro_rules! impl_from_reader {
($($t:ty),*) => {
$(
impl FromReader for $t {
type Args<'a> = ();
const STATIC_SIZE: usize = std::mem::size_of::<Self>();
fn from_reader_args<R>(reader: &mut R, _args: Self::Args<'_>) -> io::Result<Self> where R: Read + ?Sized{
let mut buf = [0u8; Self::STATIC_SIZE];
reader.read_exact(&mut buf)?;
Ok(Self::from_be_bytes(buf))
}
}
)*
};
}
impl_from_reader!(u8, u16, u32, u64, u128, i8, i16, i32, i64, i128);
#[repr(transparent)]
pub struct U24(pub u32);
impl FromReader for U24 {
type Args<'a> = ();
const STATIC_SIZE: usize = 3;
fn from_reader_args<R>(reader: &mut R, _args: Self::Args<'_>) -> io::Result<Self>
where R: Read + ?Sized {
let mut buf = [0u8; 4];
reader.read_exact(&mut buf[1..])?;
Ok(U24(u32::from_be_bytes(buf)))
}
}
impl<const N: usize> FromReader for [u8; N] {
type Args<'a> = ();
const STATIC_SIZE: usize = N;
fn from_reader_args<R>(reader: &mut R, _args: Self::Args<'_>) -> io::Result<Self>
where R: Read + ?Sized {
let mut buf = [0u8; N];
reader.read_exact(&mut buf)?;
Ok(buf)
}
}
impl<const N: usize> FromReader for [u32; N] {
type Args<'a> = ();
const STATIC_SIZE: usize = N * u32::STATIC_SIZE;
fn from_reader_args<R>(reader: &mut R, _args: Self::Args<'_>) -> io::Result<Self>
where R: Read + ?Sized {
let mut buf = [0u32; N];
reader.read_exact(unsafe {
std::slice::from_raw_parts_mut(buf.as_mut_ptr() as *mut u8, Self::STATIC_SIZE)
})?;
for x in buf.iter_mut() {
*x = u32::from_be(*x);
}
Ok(buf)
}
}
impl FromReader for CString {
type Args<'a> = ();
const STATIC_SIZE: usize = DYNAMIC_SIZE;
fn from_reader_args<R>(reader: &mut R, _args: Self::Args<'_>) -> io::Result<Self>
where R: Read + ?Sized {
let mut buf = Vec::new();
loop {
let mut byte = [0u8; 1];
reader.read_exact(&mut byte)?;
buf.push(byte[0]);
if byte[0] == 0 {
break;
}
}
Ok(unsafe { CString::from_vec_with_nul_unchecked(buf) })
}
}
pub(crate) fn read_bytes<R>(reader: &mut R, count: usize) -> io::Result<Vec<u8>>
where R: Read + ?Sized {
let mut buf = vec![0u8; count];
reader.read_exact(&mut buf)?;
Ok(buf)
}
pub(crate) fn read_vec<'a, T, R>(reader: &mut R, count: usize) -> io::Result<Vec<T>>
where
T: FromReader,
R: Read + ?Sized,
<T as FromReader>::Args<'a>: Default,
{
let mut vec = Vec::with_capacity(count);
if T::STATIC_SIZE != DYNAMIC_SIZE {
// Read the entire buffer at once
let buf = read_bytes(reader, T::STATIC_SIZE * count)?;
let mut slice = buf.as_slice();
for _ in 0..count {
vec.push(T::from_reader(&mut slice)?);
}
} else {
for _ in 0..count {
vec.push(T::from_reader(reader)?);
}
}
Ok(vec)
}
pub(crate) trait ToWriter: Sized {
fn to_writer<W>(&self, writer: &mut W) -> io::Result<()>
where W: Write + ?Sized;
fn to_bytes(&self) -> io::Result<Vec<u8>> {
let mut buf = vec![0u8; self.write_size()];
self.to_writer(&mut buf.as_mut_slice())?;
Ok(buf)
}
fn write_size(&self) -> usize;
}
macro_rules! impl_to_writer {
($($t:ty),*) => {
$(
impl ToWriter for $t {
fn to_writer<W>(&self, writer: &mut W) -> io::Result<()>
where W: Write + ?Sized {
writer.write_all(&self.to_be_bytes())
}
fn to_bytes(&self) -> io::Result<Vec<u8>> {
Ok(self.to_be_bytes().to_vec())
}
fn write_size(&self) -> usize {
std::mem::size_of::<Self>()
}
}
)*
};
}
impl_to_writer!(u8, u16, u32, u64, u128, i8, i16, i32, i64, i128);
impl ToWriter for U24 {
fn to_writer<W>(&self, writer: &mut W) -> io::Result<()>
where W: Write + ?Sized {
writer.write_all(&self.0.to_be_bytes()[1..])
}
fn write_size(&self) -> usize { 3 }
}
impl<const N: usize> ToWriter for [u8; N] {
fn to_writer<W>(&self, writer: &mut W) -> io::Result<()>
where W: Write + ?Sized {
writer.write_all(self)
}
fn write_size(&self) -> usize { N }
}
impl ToWriter for &[u8] {
fn to_writer<W>(&self, writer: &mut W) -> io::Result<()>
where W: Write + ?Sized {
writer.write_all(self)
}
fn write_size(&self) -> usize { self.len() }
}
impl ToWriter for Vec<u8> {
fn to_writer<W>(&self, writer: &mut W) -> io::Result<()>
where W: Write + ?Sized {
writer.write_all(self)
}
fn write_size(&self) -> usize { self.len() }
}
pub(crate) fn write_vec<T, W>(writer: &mut W, vec: &[T]) -> io::Result<()>
where
T: ToWriter,
W: Write + ?Sized,
{
for item in vec {
item.to_writer(writer)?;
}
Ok(())
}

127
src/util/take_seek.rs Normal file
View File

@ -0,0 +1,127 @@
// From https://github.com/jam1garner/binrw/blob/e96a1320287ec83d1f471525ffa380800ec9e124/binrw/src/io/take_seek.rs
// MIT License
//
// Copyright (c) jam1garner and other contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
#![allow(dead_code)]
//! Types for seekable reader adapters which limit the number of bytes read from
//! the underlying reader.
use std::io::{Read, Result, Seek, SeekFrom};
/// Read adapter which limits the bytes read from an underlying reader, with
/// seek support.
///
/// This struct is generally created by importing the [`TakeSeekExt`] extension
/// and calling [`take_seek`] on a reader.
///
/// [`take_seek`]: TakeSeekExt::take_seek
#[derive(Debug)]
pub struct TakeSeek<T> {
inner: T,
pos: u64,
end: u64,
}
impl<T> TakeSeek<T> {
/// Gets a reference to the underlying reader.
pub fn get_ref(&self) -> &T { &self.inner }
/// Gets a mutable reference to the underlying reader.
///
/// Care should be taken to avoid modifying the internal I/O state of the
/// underlying reader as doing so may corrupt the internal limit of this
/// `TakeSeek`.
pub fn get_mut(&mut self) -> &mut T { &mut self.inner }
/// Consumes this wrapper, returning the wrapped value.
pub fn into_inner(self) -> T { self.inner }
/// Returns the number of bytes that can be read before this instance will
/// return EOF.
///
/// # Note
///
/// This instance may reach EOF after reading fewer bytes than indicated by
/// this method if the underlying [`Read`] instance reaches EOF.
pub fn limit(&self) -> u64 { self.end.saturating_sub(self.pos) }
}
impl<T: Seek> TakeSeek<T> {
/// Sets the number of bytes that can be read before this instance will
/// return EOF. This is the same as constructing a new `TakeSeek` instance,
/// so the amount of bytes read and the previous limit value dont matter
/// when calling this method.
///
/// # Panics
///
/// Panics if the inner stream returns an error from `stream_position`.
pub fn set_limit(&mut self, limit: u64) {
let pos = self.inner.stream_position().expect("cannot get position for `set_limit`");
self.pos = pos;
self.end = pos + limit;
}
}
impl<T: Read> Read for TakeSeek<T> {
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
let limit = self.limit();
// Don't call into inner reader at all at EOF because it may still block
if limit == 0 {
return Ok(0);
}
// Lint: It is impossible for this cast to truncate because the value
// being cast is the minimum of two values, and one of the value types
// is already `usize`.
#[allow(clippy::cast_possible_truncation)]
let max = (buf.len() as u64).min(limit) as usize;
let n = self.inner.read(&mut buf[0..max])?;
self.pos += n as u64;
Ok(n)
}
}
impl<T: Seek> Seek for TakeSeek<T> {
fn seek(&mut self, pos: SeekFrom) -> Result<u64> {
self.pos = self.inner.seek(pos)?;
Ok(self.pos)
}
fn stream_position(&mut self) -> Result<u64> { Ok(self.pos) }
}
/// An extension trait that implements `take_seek()` for compatible streams.
pub trait TakeSeekExt {
/// Creates an adapter which will read at most `limit` bytes from the
/// wrapped stream.
fn take_seek(self, limit: u64) -> TakeSeek<Self>
where Self: Sized;
}
impl<T: Read + Seek> TakeSeekExt for T {
fn take_seek(mut self, limit: u64) -> TakeSeek<Self>
where Self: Sized {
let pos = self.stream_position().expect("cannot get position for `take_seek`");
TakeSeek { inner: self, pos, end: pos + limit }
}
}