Compare commits

...

28 Commits
0.1.1 ... main

Author SHA1 Message Date
Luke Street 03b83484cb Resolve fmt & check issues 2024-04-30 23:54:30 -06:00
Luke Street f28238209d nodtool: Restructure code for external usage 2024-04-30 23:51:34 -06:00
Luke Street ad1ec5828d Fix RVZ GC junk data generation 2024-02-26 19:21:28 -07:00
Luke Street 8bd52d4075 Documentation updates & fixes for Wii partition streams 2024-02-24 12:35:41 -07:00
Luke Street 1895b7df3f Fixes & bump to version 1.0.0 2024-02-22 23:49:28 -07:00
Luke Street 7e6d880792 Add GCZ support, nodtool extract --partition, various fixes 2024-02-22 19:58:31 -07:00
Luke Street 60b3004999 Update README.md 2024-02-21 23:47:13 -07:00
Luke Street 09f008a34b Disable i686 Linux build 2024-02-21 23:37:29 -07:00
Luke Street 7ba95bb22e Bump MSRV to 1.73 2024-02-21 23:33:48 -07:00
Luke Street f82d8b9f17 Fix checks & build 2024-02-21 23:25:53 -07:00
Luke Street 07bb8ccc1d Restore all functionality, split lib/bin & integrate redump validation 2024-02-21 23:14:07 -07:00
Luke Street 7f97dac399 Convert all formats to new BlockIO trait (WIP)
Simplifies format handling by moving
common logic into a new `DiscReader`
type.
2024-02-21 00:04:23 -07:00
Luke Street ce9fbbf822 Finish WIA/RVZ, add WBFS, CISO & more
Generally a complete overhaul.
2024-02-16 22:53:37 -07:00
Luke Street fff7b350b1 More build fix attempts 2024-02-02 16:29:11 -07:00
Luke Street ec85b8380d Build fix attempts 2024-02-02 16:26:45 -07:00
Luke Street 4f794f06cb WIP WIA/RVZ & more 2024-02-02 16:21:05 -07:00
Luke Street 97c726c209 Add -h (validate Wii disc hashes); complete documentation 2022-02-03 20:54:16 -05:00
Luke Street 3e78aad790 Remove rust-version from Cargo.toml (not useful yet) 2022-02-03 02:42:41 -05:00
Luke Street 6b8d41b130 clippy config & fixes; enable LTO 2022-02-03 02:36:41 -05:00
Luke Street 46fa0c59ff Add cargo deny action to CI 2022-02-03 02:07:24 -05:00
Luke Street 6227718564 Migrate to binrw; update cargo deny config; update deps 2022-02-03 02:03:21 -05:00
Luke Street 8b3f655dc2 Various updates
- Use thiserror for nod::Error
- Use anyhow for error context chaining
- Stream logic fixes
2022-02-03 02:02:49 -05:00
Luke Street 4b32388fa5 Add new_disc_io_from_{buf,stream} 2022-02-03 02:02:49 -05:00
Luke Street 50f171dcf5 Fix resolving NFS key path on Windows 2021-09-08 00:26:22 -04:00
Luke Street 9322b8e55b Add deny.toml for cargo-deny 2021-09-02 23:32:58 -04:00
Luke Street 35388db458 Cleanup & add as_dyn to ReadStream 2021-09-01 16:10:22 -04:00
Luke Street dc5217dd04 Fix LICENSE copyright 2021-08-25 14:54:52 -04:00
Luke Street aaf9d37306 Reformat with rustfmt 2021-08-25 11:22:17 -04:00
59 changed files with 36487 additions and 1648 deletions

View File

@ -1,29 +1,177 @@
name: build
name: Build
on: [ push, pull_request ]
env:
BUILD_PROFILE: release-lto
CARGO_BIN_NAME: nodtool
CARGO_TARGET_DIR: target
jobs:
default:
name: Default
check:
name: Check
runs-on: ubuntu-latest
strategy:
matrix:
platform: [ ubuntu-latest, macos-latest, windows-latest ]
toolchain: [ stable, 1.51.0, nightly ]
toolchain: [ stable, 1.73.0, nightly ]
fail-fast: false
env:
RUSTFLAGS: -D warnings
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Rust toolchain
uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ matrix.toolchain }}
components: rustfmt, clippy
- name: Cargo check
run: cargo check --all-features --all-targets
- name: Cargo clippy
run: cargo clippy --all-features --all-targets
fmt:
name: Format
runs-on: ubuntu-latest
env:
RUSTFLAGS: -D warnings
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Rust toolchain
# We use nightly options in rustfmt.toml
uses: dtolnay/rust-toolchain@nightly
with:
components: rustfmt
- name: Cargo fmt
run: cargo fmt --all --check
deny:
name: Deny
runs-on: ubuntu-latest
strategy:
matrix:
checks:
- advisories
- bans licenses sources
fail-fast: false
# Prevent new advisories from failing CI
continue-on-error: ${{ matrix.checks == 'advisories' }}
steps:
- uses: actions/checkout@v4
- uses: EmbarkStudios/cargo-deny-action@v1
with:
command: check ${{ matrix.checks }}
test:
name: Test
strategy:
matrix:
platform: [ ubuntu-latest, windows-latest, macos-latest ]
fail-fast: false
runs-on: ${{ matrix.platform }}
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
- name: Checkout
uses: actions/checkout@v4
- name: Setup Rust toolchain
uses: dtolnay/rust-toolchain@stable
- name: Cargo test
run: cargo test --release
build:
name: Build
strategy:
matrix:
include:
- platform: ubuntu-latest
target: x86_64-unknown-linux-musl
name: linux-x86_64
build: zigbuild
features: asm
# - platform: ubuntu-latest
# target: i686-unknown-linux-musl
# name: linux-i686
# build: zigbuild
# features: asm
- platform: ubuntu-latest
target: aarch64-unknown-linux-musl
name: linux-aarch64
build: zigbuild
features: nightly
- platform: ubuntu-latest
target: armv7-unknown-linux-musleabi
name: linux-armv7l
build: zigbuild
features: default
- platform: windows-latest
target: x86_64-pc-windows-msvc
name: windows-x86_64
build: build
features: default
- platform: windows-latest
target: aarch64-pc-windows-msvc
name: windows-arm64
build: build
features: nightly
- platform: macos-latest
target: x86_64-apple-darwin
name: macos-x86_64
build: build
features: asm
- platform: macos-latest
target: aarch64-apple-darwin
name: macos-arm64
build: build
features: nightly
fail-fast: false
runs-on: ${{ matrix.platform }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Install dependencies
if: matrix.packages != ''
run: |
sudo apt-get -y update
sudo apt-get -y install ${{ matrix.packages }}
- name: Install cargo-zigbuild
if: matrix.build == 'zigbuild'
run: pip install ziglang==0.11.0 cargo-zigbuild==0.18.3
- name: Setup Rust toolchain
uses: dtolnay/rust-toolchain@nightly
with:
toolchain: ${{ matrix.toolchain }}
override: true
- uses: actions-rs/cargo@v1
targets: ${{ matrix.target }}
- name: Cargo build
run: cargo ${{ matrix.build }} --profile ${{ env.BUILD_PROFILE }} --target ${{ matrix.target }} --bin ${{ env.CARGO_BIN_NAME }} --features ${{ matrix.features }}
- name: Upload artifacts
uses: actions/upload-artifact@v4
with:
command: build
args: --release --all-features
- uses: actions/upload-artifact@v2
with:
name: ${{ matrix.platform }}-${{ matrix.toolchain }}
name: ${{ matrix.name }}
path: |
target/release/nodtool
target/release/nodtool.exe
${{ env.CARGO_TARGET_DIR }}/${{ env.BUILD_PROFILE }}/${{ env.CARGO_BIN_NAME }}
${{ env.CARGO_TARGET_DIR }}/${{ env.BUILD_PROFILE }}/${{ env.CARGO_BIN_NAME }}.exe
${{ env.CARGO_TARGET_DIR }}/${{ matrix.target }}/${{ env.BUILD_PROFILE }}/${{ env.CARGO_BIN_NAME }}
${{ env.CARGO_TARGET_DIR }}/${{ matrix.target }}/${{ env.BUILD_PROFILE }}/${{ env.CARGO_BIN_NAME }}.exe
if-no-files-found: error
release:
name: Release
if: startsWith(github.ref, 'refs/tags/')
runs-on: ubuntu-latest
needs: [ build ]
steps:
- name: Download artifacts
uses: actions/download-artifact@v4
with:
path: artifacts
- name: Rename artifacts
working-directory: artifacts
run: |
mkdir ../out
for i in */*/$BUILD_PROFILE/$CARGO_BIN_NAME*; do
mv "$i" "../out/$(sed -E "s/([^/]+)\/[^/]+\/$BUILD_PROFILE\/($CARGO_BIN_NAME)/\2-\1/" <<< "$i")"
done
ls -R ../out
- name: Release
uses: softprops/action-gh-release@4634c16e79c963813287e889244c50009e7f0981
with:
files: out/*

1
.gitignore vendored
View File

@ -1,3 +1,2 @@
/target
Cargo.lock
.idea

1033
Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,27 +1,8 @@
[package]
name = "nod"
version = "0.1.1"
edition = "2018"
authors = ["Luke Street <luke@street.dev>"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/encounter/nod-rs"
documentation = "https://docs.rs/nod"
readme = "README.md"
description = """
Rust library and CLI tool for reading GameCube and Wii disc images.
"""
keywords = ["gamecube", "wii", "iso", "nfs", "gcm"]
categories = ["command-line-utilities", "parser-implementations"]
[workspace]
members = ["nod", "nodtool"]
resolver = "2"
[[bin]]
name = "nodtool"
path = "src/bin.rs"
[dependencies]
aes = "0.7.4"
binread = "2.1.1"
block-modes = "0.8.1"
clap = "2.33.3"
encoding_rs = "0.8.28"
file-size = "1.0.3"
sha-1 = "0.9.7"
[profile.release-lto]
inherits = "release"
lto = "thin"
strip = "debuginfo"

View File

@ -186,7 +186,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2018, 2019, 2020 Michael Sanders.
Copyright 2021 Luke Street.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@ -1,6 +1,6 @@
MIT License
Copyright 2018, 2019, 2020 Michael Sanders.
Copyright 2021 Luke Street.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

102
README.md
View File

@ -1,57 +1,115 @@
# nod-rs [![Build Status]][actions] [![Latest Version]][crates.io] [![Api Rustdoc]][rustdoc] ![Rust Version]
# nod [![Build Status]][actions] [![Latest Version]][crates.io] [![Api Rustdoc]][rustdoc] ![Rust Version]
[Build Status]: https://github.com/encounter/nod-rs/workflows/build/badge.svg
[Build Status]: https://github.com/encounter/nod-rs/actions/workflows/build.yaml/badge.svg
[actions]: https://github.com/encounter/nod-rs/actions
[Latest Version]: https://img.shields.io/crates/v/nod.svg
[crates.io]: https://crates.io/crates/nod
[Api Rustdoc]: https://img.shields.io/badge/api-rustdoc-blue.svg
[rustdoc]: https://docs.rs/nod
[Rust Version]: https://img.shields.io/badge/rust-1.51+-blue.svg?maxAge=3600
[Rust Version]: https://img.shields.io/badge/rust-1.73+-blue.svg?maxAge=3600
Library for traversing & reading GameCube and Wii disc images.
Library for traversing & reading Nintendo Optical Disc (GameCube and Wii) images.
Based on the C++ library [nod](https://github.com/AxioDL/nod),
Originally based on the C++ library [nod](https://github.com/AxioDL/nod),
but does not currently support authoring.
Currently supported file formats:
- ISO
- NFS (Wii U VC files, e.g. `hif_000000.nfs`)
- ISO (GCM)
- WIA / RVZ
- WBFS (+ NKit 2 lossless)
- CISO (+ NKit 2 lossless)
- NFS (Wii U VC)
- GCZ
### CLI tool
## CLI tool
This crate includes a CLI tool `nodtool`, which can be used to extract disc images to a specified directory:
This crate includes a command-line tool called `nodtool`.
### info
Displays information about a disc image.
```shell
nodtool info /path/to/game.iso
```
### extract
Extracts the contents of a disc image to a directory.
```shell
nodtool extract /path/to/game.iso [outdir]
```
For Wii U VC titles, use `content/hif_*.nfs`:
For Wii U VC titles, use `content/hif_000000.nfs`:
```shell
nodtool extract /path/to/game/content/hif_000000.nfs [outdir]
```
### Library example
### convert
Converts any supported format to raw ISO.
```shell
nodtool convert /path/to/game.wia /path/to/game.iso
```
### verify
Hashes the contents of a disc image and verifies it.
```shell
nodtool verify /path/to/game.iso
```
## Library example
Opening a disc image and reading a file:
```rust
use nod::disc::{new_disc_base, PartHeader};
use nod::fst::NodeType;
use nod::io::new_disc_io;
use std::io::Read;
let mut disc_io = new_disc_io("path/to/file".as_ref())?;
let disc_base = new_disc_base(disc_io.as_mut())?;
let mut partition = disc_base.get_data_partition(disc_io.as_mut())?;
let header = partition.read_header()?;
if let Some(NodeType::File(node)) = header.find_node("/MP3/Worlds.txt") {
// Open a disc image and the first data partition.
let disc = nod::Disc::new("path/to/file.iso")
.expect("Failed to open disc");
let mut partition = disc.open_partition_kind(nod::PartitionKind::Data)
.expect("Failed to open data partition");
// Read partition metadata and the file system table.
let meta = partition.meta()
.expect("Failed to read partition metadata");
let fst = meta.fst()
.expect("File system table is invalid");
// Find a file by path and read it into a string.
if let Some((_, node)) = fst.find("/MP3/Worlds.txt") {
let mut s = String::new();
partition.begin_file_stream(node)?.read_to_string(&mut s);
println!(s);
partition
.open_file(node)
.expect("Failed to open file stream")
.read_to_string(&mut s)
.expect("Failed to read file");
println!("{}", s);
}
```
### License
Converting a disc image to raw ISO:
```rust
// Enable `rebuild_encryption` to ensure the output is a valid ISO.
let options = nod::OpenOptions { rebuild_encryption: true, ..Default::default() };
let mut disc = nod::Disc::new_with_options("path/to/file.rvz", &options)
.expect("Failed to open disc");
// Read directly from the open disc and write to the output file.
let mut out = std::fs::File::create("output.iso")
.expect("Failed to create output file");
std::io::copy(&mut disc, &mut out)
.expect("Failed to write data");
```
## License
Licensed under either of

206
deny.toml Normal file
View File

@ -0,0 +1,206 @@
# This template contains all of the possible sections and their default values
# Note that all fields that take a lint level have these possible values:
# * deny - An error will be produced and the check will fail
# * warn - A warning will be produced, but the check will not fail
# * allow - No warning or error will be produced, though in some cases a note
# will be
# The values provided in this template are the default values that will be used
# when any section or field is not specified in your own configuration
# If 1 or more target triples (and optionally, target_features) are specified,
# only the specified targets will be checked when running `cargo deny check`.
# This means, if a particular package is only ever used as a target specific
# dependency, such as, for example, the `nix` crate only being used via the
# `target_family = "unix"` configuration, that only having windows targets in
# this list would mean the nix crate, as well as any of its exclusive
# dependencies not shared by any other crates, would be ignored, as the target
# list here is effectively saying which targets you are building for.
targets = [
# The triple can be any string, but only the target triples built in to
# rustc (as of 1.40) can be checked against actual config expressions
#{ triple = "x86_64-unknown-linux-musl" },
# You can also specify which target_features you promise are enabled for a
# particular target. target_features are currently not validated against
# the actual valid features supported by the target architecture.
#{ triple = "wasm32-unknown-unknown", features = ["atomics"] },
]
# This section is considered when running `cargo deny check advisories`
# More documentation for the advisories section can be found here:
# https://embarkstudios.github.io/cargo-deny/checks/advisories/cfg.html
[advisories]
# The path where the advisory database is cloned/fetched into
db-path = "~/.cargo/advisory-db"
# The url(s) of the advisory databases to use
db-urls = ["https://github.com/rustsec/advisory-db"]
# The lint level for security vulnerabilities
vulnerability = "deny"
# The lint level for unmaintained crates
unmaintained = "warn"
# The lint level for crates that have been yanked from their source registry
yanked = "warn"
# The lint level for crates with security notices. Note that as of
# 2019-12-17 there are no security notice advisories in
# https://github.com/rustsec/advisory-db
notice = "warn"
# A list of advisory IDs to ignore. Note that ignored advisories will still
# output a note when they are encountered.
ignore = [
#"RUSTSEC-0000-0000",
]
# Threshold for security vulnerabilities, any vulnerability with a CVSS score
# lower than the range specified will be ignored. Note that ignored advisories
# will still output a note when they are encountered.
# * None - CVSS Score 0.0
# * Low - CVSS Score 0.1 - 3.9
# * Medium - CVSS Score 4.0 - 6.9
# * High - CVSS Score 7.0 - 8.9
# * Critical - CVSS Score 9.0 - 10.0
#severity-threshold =
# This section is considered when running `cargo deny check licenses`
# More documentation for the licenses section can be found here:
# https://embarkstudios.github.io/cargo-deny/checks/licenses/cfg.html
[licenses]
# The lint level for crates which do not have a detectable license
unlicensed = "deny"
# List of explictly allowed licenses
# See https://spdx.org/licenses/ for list of possible licenses
# [possible values: any SPDX 3.11 short identifier (+ optional exception)].
allow = [
"MIT",
"Apache-2.0",
"BSD-3-Clause",
"Unicode-DFS-2016",
"BSL-1.0",
"ISC",
]
# List of explictly disallowed licenses
# See https://spdx.org/licenses/ for list of possible licenses
# [possible values: any SPDX 3.11 short identifier (+ optional exception)].
deny = [
#"Nokia",
]
# Lint level for licenses considered copyleft
copyleft = "warn"
# Blanket approval or denial for OSI-approved or FSF Free/Libre licenses
# * both - The license will be approved if it is both OSI-approved *AND* FSF
# * either - The license will be approved if it is either OSI-approved *OR* FSF
# * osi-only - The license will be approved if is OSI-approved *AND NOT* FSF
# * fsf-only - The license will be approved if is FSF *AND NOT* OSI-approved
# * neither - This predicate is ignored and the default lint level is used
allow-osi-fsf-free = "neither"
# Lint level used when no other predicates are matched
# 1. License isn't in the allow or deny lists
# 2. License isn't copyleft
# 3. License isn't OSI/FSF, or allow-osi-fsf-free = "neither"
default = "deny"
# The confidence threshold for detecting a license from license text.
# The higher the value, the more closely the license text must be to the
# canonical license text of a valid SPDX license file.
# [possible values: any between 0.0 and 1.0].
confidence-threshold = 0.8
# Allow 1 or more licenses on a per-crate basis, so that particular licenses
# aren't accepted for every possible crate as with the normal allow list
exceptions = [
# Each entry is the crate and version constraint, and its specific allow
# list
#{ allow = ["Zlib"], name = "adler32", version = "*" },
]
# Some crates don't have (easily) machine readable licensing information,
# adding a clarification entry for it allows you to manually specify the
# licensing information
[[licenses.clarify]]
# The name of the crate the clarification applies to
name = "encoding_rs"
# The optional version constraint for the crate
#version = "*"
# The SPDX expression for the license requirements of the crate
expression = "(Apache-2.0 OR MIT) AND BSD-3-Clause"
# One or more files in the crate's source used as the "source of truth" for
# the license expression. If the contents match, the clarification will be used
# when running the license check, otherwise the clarification will be ignored
# and the crate will be checked normally, which may produce warnings or errors
# depending on the rest of your configuration
license-files = [
# Each entry is a crate relative path, and the (opaque) hash of its contents
{ path = "COPYRIGHT", hash = 0x39f8ad31 }
]
[licenses.private]
# If true, ignores workspace crates that aren't published, or are only
# published to private registries
ignore = false
# One or more private registries that you might publish crates to, if a crate
# is only published to private registries, and ignore is true, the crate will
# not have its license(s) checked
registries = [
#"https://sekretz.com/registry
]
# This section is considered when running `cargo deny check bans`.
# More documentation about the 'bans' section can be found here:
# https://embarkstudios.github.io/cargo-deny/checks/bans/cfg.html
[bans]
# Lint level for when multiple versions of the same crate are detected
multiple-versions = "warn"
# Lint level for when a crate version requirement is `*`
wildcards = "allow"
# The graph highlighting used when creating dotgraphs for crates
# with multiple versions
# * lowest-version - The path to the lowest versioned duplicate is highlighted
# * simplest-path - The path to the version with the fewest edges is highlighted
# * all - Both lowest-version and simplest-path are used
highlight = "all"
# List of crates that are allowed. Use with care!
allow = [
#{ name = "ansi_term", version = "=0.11.0" },
]
# List of crates to deny
deny = [
# Each entry the name of a crate and a version range. If version is
# not specified, all versions will be matched.
#{ name = "ansi_term", version = "=0.11.0" },
#
# Wrapper crates can optionally be specified to allow the crate when it
# is a direct dependency of the otherwise banned crate
#{ name = "ansi_term", version = "=0.11.0", wrappers = [] },
]
# Certain crates/versions that will be skipped when doing duplicate detection.
skip = [
#{ name = "ansi_term", version = "=0.11.0" },
]
# Similarly to `skip` allows you to skip certain crates during duplicate
# detection. Unlike skip, it also includes the entire tree of transitive
# dependencies starting at the specified crate, up to a certain depth, which is
# by default infinite
skip-tree = [
#{ name = "ansi_term", version = "=0.11.0", depth = 20 },
]
# This section is considered when running `cargo deny check sources`.
# More documentation about the 'sources' section can be found here:
# https://embarkstudios.github.io/cargo-deny/checks/sources/cfg.html
[sources]
# Lint level for what to happen when a crate from a crate registry that is not
# in the allow list is encountered
unknown-registry = "warn"
# Lint level for what to happen when a crate from a git repository that is not
# in the allow list is encountered
unknown-git = "warn"
# List of URLs for allowed crate registries. Defaults to the crates.io index
# if not specified. If it is specified but empty, no registries are allowed.
allow-registry = ["https://github.com/rust-lang/crates.io-index"]
# List of URLs for allowed Git repositories
allow-git = []
[sources.allow-org]
# 1 or more github.com organizations to allow git sources for
github = ["encounter"]
# 1 or more gitlab.com organizations to allow git sources for
#gitlab = [""]
# 1 or more bitbucket.org organizations to allow git sources for
#bitbucket = [""]

42
nod/Cargo.toml Normal file
View File

@ -0,0 +1,42 @@
[package]
name = "nod"
version = "1.2.0"
edition = "2021"
rust-version = "1.73.0"
authors = ["Luke Street <luke@street.dev>"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/encounter/nod-rs"
documentation = "https://docs.rs/nod"
readme = "../README.md"
description = """
Library for reading GameCube and Wii disc images.
"""
keywords = ["gamecube", "wii", "iso", "wbfs", "rvz"]
categories = ["command-line-utilities", "parser-implementations"]
[features]
default = ["compress-bzip2", "compress-lzma", "compress-zlib", "compress-zstd"]
asm = ["sha1/asm"]
compress-bzip2 = ["bzip2"]
compress-lzma = ["liblzma"]
compress-zlib = ["adler", "miniz_oxide"]
compress-zstd = ["zstd"]
[dependencies]
adler = { version = "1.0", optional = true }
aes = "0.8"
base16ct = "0.2"
bzip2 = { version = "0.4", features = ["static"], optional = true }
cbc = "0.1"
digest = "0.10"
dyn-clone = "1.0"
encoding_rs = "0.8"
itertools = "0.12"
liblzma = { version = "0.2", features = ["static"], optional = true }
log = "0.4"
miniz_oxide = { version = "0.7", optional = true }
rayon = "1.8"
sha1 = "0.10"
thiserror = "1.0"
zerocopy = { version = "0.7", features = ["alloc", "derive"] }
zstd = { version = "0.13", optional = true }

199
nod/src/disc/gcn.rs Normal file
View File

@ -0,0 +1,199 @@
use std::{
cmp::min,
io,
io::{Read, Seek, SeekFrom},
mem::size_of,
};
use zerocopy::{FromBytes, FromZeroes};
use crate::{
disc::{
ApploaderHeader, DiscHeader, DolHeader, PartitionBase, PartitionHeader, PartitionMeta,
BI2_SIZE, BOOT_SIZE, SECTOR_SIZE,
},
fst::{Node, NodeKind},
io::block::{Block, BlockIO},
streams::{ReadStream, SharedWindowedReadStream},
util::read::{read_box, read_box_slice, read_vec},
Result, ResultContext,
};
pub struct PartitionGC {
io: Box<dyn BlockIO>,
block: Block,
block_buf: Box<[u8]>,
block_idx: u32,
sector_buf: Box<[u8; SECTOR_SIZE]>,
sector: u32,
pos: u64,
disc_header: Box<DiscHeader>,
}
impl Clone for PartitionGC {
fn clone(&self) -> Self {
Self {
io: self.io.clone(),
block: Block::default(),
block_buf: <u8>::new_box_slice_zeroed(self.block_buf.len()),
block_idx: u32::MAX,
sector_buf: <[u8; SECTOR_SIZE]>::new_box_zeroed(),
sector: u32::MAX,
pos: 0,
disc_header: self.disc_header.clone(),
}
}
}
impl PartitionGC {
pub fn new(inner: Box<dyn BlockIO>, disc_header: Box<DiscHeader>) -> Result<Box<Self>> {
let block_size = inner.block_size();
Ok(Box::new(Self {
io: inner,
block: Block::default(),
block_buf: <u8>::new_box_slice_zeroed(block_size as usize),
block_idx: u32::MAX,
sector_buf: <[u8; SECTOR_SIZE]>::new_box_zeroed(),
sector: u32::MAX,
pos: 0,
disc_header,
}))
}
pub fn into_inner(self) -> Box<dyn BlockIO> { self.io }
}
impl Read for PartitionGC {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let sector = (self.pos / SECTOR_SIZE as u64) as u32;
let block_idx = (sector as u64 * SECTOR_SIZE as u64 / self.block_buf.len() as u64) as u32;
// Read new block if necessary
if block_idx != self.block_idx {
self.block = self.io.read_block(self.block_buf.as_mut(), block_idx, None)?;
self.block_idx = block_idx;
}
// Copy sector if necessary
if sector != self.sector {
self.block.copy_raw(
self.sector_buf.as_mut(),
self.block_buf.as_ref(),
sector,
&self.disc_header,
)?;
self.sector = sector;
}
let offset = (self.pos % SECTOR_SIZE as u64) as usize;
let len = min(buf.len(), SECTOR_SIZE - offset);
buf[..len].copy_from_slice(&self.sector_buf[offset..offset + len]);
self.pos += len as u64;
Ok(len)
}
}
impl Seek for PartitionGC {
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
self.pos = match pos {
SeekFrom::Start(v) => v,
SeekFrom::End(_) => {
return Err(io::Error::new(
io::ErrorKind::Unsupported,
"GCPartitionReader: SeekFrom::End is not supported".to_string(),
));
}
SeekFrom::Current(v) => self.pos.saturating_add_signed(v),
};
Ok(self.pos)
}
}
impl PartitionBase for PartitionGC {
fn meta(&mut self) -> Result<Box<PartitionMeta>> {
self.seek(SeekFrom::Start(0)).context("Seeking to partition metadata")?;
read_part_meta(self, false)
}
fn open_file(&mut self, node: &Node) -> io::Result<SharedWindowedReadStream> {
assert_eq!(node.kind(), NodeKind::File);
self.new_window(node.offset(false), node.length())
}
fn ideal_buffer_size(&self) -> usize { SECTOR_SIZE }
}
pub(crate) fn read_part_meta(
reader: &mut dyn ReadStream,
is_wii: bool,
) -> Result<Box<PartitionMeta>> {
// boot.bin
let raw_boot: Box<[u8; BOOT_SIZE]> = read_box(reader).context("Reading boot.bin")?;
let partition_header = PartitionHeader::ref_from(&raw_boot[size_of::<DiscHeader>()..]).unwrap();
// bi2.bin
let raw_bi2: Box<[u8; BI2_SIZE]> = read_box(reader).context("Reading bi2.bin")?;
// apploader.bin
let mut raw_apploader: Vec<u8> =
read_vec(reader, size_of::<ApploaderHeader>()).context("Reading apploader header")?;
let apploader_header = ApploaderHeader::ref_from(raw_apploader.as_slice()).unwrap();
raw_apploader.resize(
size_of::<ApploaderHeader>()
+ apploader_header.size.get() as usize
+ apploader_header.trailer_size.get() as usize,
0,
);
reader
.read_exact(&mut raw_apploader[size_of::<ApploaderHeader>()..])
.context("Reading apploader")?;
// fst.bin
reader
.seek(SeekFrom::Start(partition_header.fst_offset(is_wii)))
.context("Seeking to FST offset")?;
let raw_fst: Box<[u8]> = read_box_slice(reader, partition_header.fst_size(is_wii) as usize)
.with_context(|| {
format!(
"Reading partition FST (offset {}, size {})",
partition_header.fst_offset(is_wii),
partition_header.fst_size(is_wii)
)
})?;
// main.dol
reader
.seek(SeekFrom::Start(partition_header.dol_offset(is_wii)))
.context("Seeking to DOL offset")?;
let mut raw_dol: Vec<u8> =
read_vec(reader, size_of::<DolHeader>()).context("Reading DOL header")?;
let dol_header = DolHeader::ref_from(raw_dol.as_slice()).unwrap();
let dol_size = dol_header
.text_offs
.iter()
.zip(&dol_header.text_sizes)
.map(|(offs, size)| offs.get() + size.get())
.chain(
dol_header
.data_offs
.iter()
.zip(&dol_header.data_sizes)
.map(|(offs, size)| offs.get() + size.get()),
)
.max()
.unwrap_or(size_of::<DolHeader>() as u32);
raw_dol.resize(dol_size as usize, 0);
reader.read_exact(&mut raw_dol[size_of::<DolHeader>()..]).context("Reading DOL")?;
Ok(Box::new(PartitionMeta {
raw_boot,
raw_bi2,
raw_apploader: raw_apploader.into_boxed_slice(),
raw_fst,
raw_dol: raw_dol.into_boxed_slice(),
raw_ticket: None,
raw_tmd: None,
raw_cert_chain: None,
raw_h3_table: None,
}))
}

204
nod/src/disc/hashes.rs Normal file
View File

@ -0,0 +1,204 @@
use std::{
io::{Read, Seek, SeekFrom},
sync::{Arc, Mutex},
time::Instant,
};
use rayon::iter::{IntoParallelIterator, ParallelIterator};
use sha1::{Digest, Sha1};
use zerocopy::FromZeroes;
use crate::{
array_ref, array_ref_mut,
disc::{
reader::DiscReader,
wii::{HASHES_SIZE, SECTOR_DATA_SIZE},
},
io::HashBytes,
util::read::read_box_slice,
OpenOptions, Result, ResultContext, SECTOR_SIZE,
};
/// In a sector, following the 0x400 byte block of hashes, each 0x400 bytes of decrypted data is
/// hashed, yielding 31 H0 hashes.
/// Then, 8 sectors are aggregated into a subgroup, and the 31 H0 hashes for each sector are hashed,
/// yielding 8 H1 hashes.
/// Then, 8 subgroups are aggregated into a group, and the 8 H1 hashes for each subgroup are hashed,
/// yielding 8 H2 hashes.
/// Finally, the 8 H2 hashes for each group are hashed, yielding 1 H3 hash.
/// The H3 hashes for each group are stored in the partition's H3 table.
#[derive(Clone, Debug)]
pub struct HashTable {
/// SHA-1 hash of each 0x400 byte block of decrypted data.
pub h0_hashes: Box<[HashBytes]>,
/// SHA-1 hash of the 31 H0 hashes for each sector.
pub h1_hashes: Box<[HashBytes]>,
/// SHA-1 hash of the 8 H1 hashes for each subgroup.
pub h2_hashes: Box<[HashBytes]>,
/// SHA-1 hash of the 8 H2 hashes for each group.
pub h3_hashes: Box<[HashBytes]>,
}
#[derive(Clone, FromZeroes)]
struct HashResult {
h0_hashes: [HashBytes; 1984],
h1_hashes: [HashBytes; 64],
h2_hashes: [HashBytes; 8],
h3_hash: HashBytes,
}
impl HashTable {
fn new(num_sectors: u32) -> Self {
let num_sectors = num_sectors.next_multiple_of(64) as usize;
let num_data_hashes = num_sectors * 31;
let num_subgroups = num_sectors / 8;
let num_groups = num_subgroups / 8;
Self {
h0_hashes: HashBytes::new_box_slice_zeroed(num_data_hashes),
h1_hashes: HashBytes::new_box_slice_zeroed(num_sectors),
h2_hashes: HashBytes::new_box_slice_zeroed(num_subgroups),
h3_hashes: HashBytes::new_box_slice_zeroed(num_groups),
}
}
fn extend(&mut self, group_index: usize, result: &HashResult) {
*array_ref_mut![self.h0_hashes, group_index * 1984, 1984] = result.h0_hashes;
*array_ref_mut![self.h1_hashes, group_index * 64, 64] = result.h1_hashes;
*array_ref_mut![self.h2_hashes, group_index * 8, 8] = result.h2_hashes;
self.h3_hashes[group_index] = result.h3_hash;
}
}
pub fn rebuild_hashes(reader: &mut DiscReader) -> Result<()> {
const NUM_H0_HASHES: usize = SECTOR_DATA_SIZE / HASHES_SIZE;
log::info!(
"Rebuilding hashes for Wii partition data (using {} threads)",
rayon::current_num_threads()
);
let start = Instant::now();
// Precompute hashes for zeroed sectors.
const ZERO_H0_BYTES: &[u8] = &[0u8; HASHES_SIZE];
let zero_h0_hash = hash_bytes(ZERO_H0_BYTES);
let partitions = reader.partitions();
let mut hash_tables = Vec::with_capacity(partitions.len());
for part in partitions {
let part_sectors = part.data_end_sector - part.data_start_sector;
let hash_table = HashTable::new(part_sectors);
log::debug!(
"Rebuilding hashes: {} sectors, {} subgroups, {} groups",
hash_table.h1_hashes.len(),
hash_table.h2_hashes.len(),
hash_table.h3_hashes.len()
);
let group_count = hash_table.h3_hashes.len();
let mutex = Arc::new(Mutex::new(hash_table));
(0..group_count).into_par_iter().try_for_each_with(
(reader.open_partition(part.index, &OpenOptions::default())?, mutex.clone()),
|(stream, mutex), h3_index| -> Result<()> {
let mut result = HashResult::new_box_zeroed();
let mut data_buf = <u8>::new_box_slice_zeroed(SECTOR_DATA_SIZE);
let mut h3_hasher = Sha1::new();
for h2_index in 0..8 {
let mut h2_hasher = Sha1::new();
for h1_index in 0..8 {
let sector = h1_index + h2_index * 8;
let part_sector = sector as u32 + h3_index as u32 * 64;
let mut h1_hasher = Sha1::new();
if part_sector >= part_sectors {
for h0_index in 0..NUM_H0_HASHES {
result.h0_hashes[h0_index + sector * 31] = zero_h0_hash;
h1_hasher.update(zero_h0_hash);
}
} else {
stream
.seek(SeekFrom::Start(part_sector as u64 * SECTOR_DATA_SIZE as u64))
.with_context(|| format!("Seeking to sector {}", part_sector))?;
stream
.read_exact(&mut data_buf)
.with_context(|| format!("Reading sector {}", part_sector))?;
for h0_index in 0..NUM_H0_HASHES {
let h0_hash = hash_bytes(array_ref![
data_buf,
h0_index * HASHES_SIZE,
HASHES_SIZE
]);
result.h0_hashes[h0_index + sector * 31] = h0_hash;
h1_hasher.update(h0_hash);
}
};
let h1_hash = h1_hasher.finalize().into();
result.h1_hashes[sector] = h1_hash;
h2_hasher.update(h1_hash);
}
let h2_hash = h2_hasher.finalize().into();
result.h2_hashes[h2_index] = h2_hash;
h3_hasher.update(h2_hash);
}
result.h3_hash = h3_hasher.finalize().into();
let mut hash_table = mutex.lock().map_err(|_| "Failed to lock mutex")?;
hash_table.extend(h3_index, &result);
Ok(())
},
)?;
let hash_table = Arc::try_unwrap(mutex)
.map_err(|_| "Failed to unwrap Arc")?
.into_inner()
.map_err(|_| "Failed to lock mutex")?;
hash_tables.push(hash_table);
}
// Verify against H3 table
for (part, hash_table) in reader.partitions.clone().iter().zip(hash_tables.iter()) {
log::debug!(
"Verifying H3 table for partition {} (count {})",
part.index,
hash_table.h3_hashes.len()
);
reader
.seek(SeekFrom::Start(
part.start_sector as u64 * SECTOR_SIZE as u64 + part.header.h3_table_off(),
))
.context("Seeking to H3 table")?;
let h3_table: Box<[HashBytes]> =
read_box_slice(reader, hash_table.h3_hashes.len()).context("Reading H3 table")?;
let mut mismatches = 0;
for (idx, (expected_hash, h3_hash)) in
h3_table.iter().zip(hash_table.h3_hashes.iter()).enumerate()
{
if expected_hash != h3_hash {
let mut got_bytes = [0u8; 40];
let got = base16ct::lower::encode_str(h3_hash, &mut got_bytes).unwrap();
let mut expected_bytes = [0u8; 40];
let expected =
base16ct::lower::encode_str(expected_hash, &mut expected_bytes).unwrap();
log::debug!(
"Partition {} H3 table does not match:\n\tindex {}\n\texpected: {}\n\tgot: {}",
part.index, idx, expected, got
);
mismatches += 1;
}
}
if mismatches > 0 {
log::warn!("Partition {} H3 table has {} hash mismatches", part.index, mismatches);
}
}
for (part, hash_table) in reader.partitions.iter_mut().zip(hash_tables) {
part.hash_table = Some(hash_table);
}
log::info!("Rebuilt hashes in {:?}", start.elapsed());
Ok(())
}
#[inline]
pub fn hash_bytes(buf: &[u8]) -> HashBytes {
let mut hasher = Sha1::new();
hasher.update(buf);
hasher.finalize().into()
}

382
nod/src/disc/mod.rs Normal file
View File

@ -0,0 +1,382 @@
//! Disc type related logic (GameCube, Wii)
use std::{
borrow::Cow,
ffi::CStr,
fmt::{Debug, Display, Formatter},
io,
mem::size_of,
str::from_utf8,
};
use dyn_clone::DynClone;
use zerocopy::{big_endian::*, AsBytes, FromBytes, FromZeroes};
use crate::{
disc::wii::{Ticket, TmdHeader},
fst::Node,
static_assert,
streams::{ReadStream, SharedWindowedReadStream},
Fst, Result,
};
pub(crate) mod gcn;
pub(crate) mod hashes;
pub(crate) mod reader;
pub(crate) mod wii;
/// Size in bytes of a disc sector.
pub const SECTOR_SIZE: usize = 0x8000;
/// Shared GameCube & Wii disc header.
///
/// This header is always at the start of the disc image and within each Wii partition.
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
#[repr(C, align(4))]
pub struct DiscHeader {
/// Game ID (e.g. GM8E01 for Metroid Prime)
pub game_id: [u8; 6],
/// Used in multi-disc games
pub disc_num: u8,
/// Disc version
pub disc_version: u8,
/// Audio streaming enabled
pub audio_streaming: u8,
/// Audio streaming buffer size
pub audio_stream_buf_size: u8,
/// Padding
_pad1: [u8; 14],
/// If this is a Wii disc, this will be 0x5D1C9EA3
pub wii_magic: U32,
/// If this is a GameCube disc, this will be 0xC2339F3D
pub gcn_magic: U32,
/// Game title
pub game_title: [u8; 64],
/// If 1, disc omits partition hashes
pub no_partition_hashes: u8,
/// If 1, disc omits partition encryption
pub no_partition_encryption: u8,
/// Padding
_pad2: [u8; 926],
}
static_assert!(size_of::<DiscHeader>() == 0x400);
impl DiscHeader {
/// Game ID as a string.
pub fn game_id_str(&self) -> &str { from_utf8(&self.game_id).unwrap_or("[invalid]") }
/// Game title as a string.
pub fn game_title_str(&self) -> &str {
CStr::from_bytes_until_nul(&self.game_title)
.ok()
.and_then(|c| c.to_str().ok())
.unwrap_or("[invalid]")
}
/// Whether this is a GameCube disc.
pub fn is_gamecube(&self) -> bool { self.gcn_magic.get() == 0xC2339F3D }
/// Whether this is a Wii disc.
pub fn is_wii(&self) -> bool { self.wii_magic.get() == 0x5D1C9EA3 }
}
/// A header describing the contents of a disc partition.
///
/// **GameCube**: Always follows the disc header.
///
/// **Wii**: Follows the disc header within each partition.
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
#[repr(C, align(4))]
pub struct PartitionHeader {
/// Debug monitor offset
pub debug_mon_offset: U32,
/// Debug monitor load address
pub debug_load_address: U32,
/// Padding
_pad1: [u8; 0x18],
/// Offset to main DOL (Wii: >> 2)
pub dol_offset: U32,
/// Offset to file system table (Wii: >> 2)
pub fst_offset: U32,
/// File system size (Wii: >> 2)
pub fst_size: U32,
/// File system max size (Wii: >> 2)
pub fst_max_size: U32,
/// File system table load address
pub fst_memory_address: U32,
/// User position
pub user_position: U32,
/// User size
pub user_size: U32,
/// Padding
_pad2: [u8; 4],
}
static_assert!(size_of::<PartitionHeader>() == 0x40);
impl PartitionHeader {
/// Offset within the partition to the main DOL.
pub fn dol_offset(&self, is_wii: bool) -> u64 {
if is_wii {
self.dol_offset.get() as u64 * 4
} else {
self.dol_offset.get() as u64
}
}
/// Offset within the partition to the file system table (FST).
pub fn fst_offset(&self, is_wii: bool) -> u64 {
if is_wii {
self.fst_offset.get() as u64 * 4
} else {
self.fst_offset.get() as u64
}
}
/// Size of the file system table (FST).
pub fn fst_size(&self, is_wii: bool) -> u64 {
if is_wii {
self.fst_size.get() as u64 * 4
} else {
self.fst_size.get() as u64
}
}
/// Maximum size of the file system table (FST) across multi-disc games.
pub fn fst_max_size(&self, is_wii: bool) -> u64 {
if is_wii {
self.fst_max_size.get() as u64 * 4
} else {
self.fst_max_size.get() as u64
}
}
}
/// Apploader header.
#[derive(Debug, PartialEq, Clone, FromBytes, FromZeroes, AsBytes)]
#[repr(C, align(4))]
pub struct ApploaderHeader {
/// Apploader build date
pub date: [u8; 16],
/// Entry point
pub entry_point: U32,
/// Apploader size
pub size: U32,
/// Apploader trailer size
pub trailer_size: U32,
/// Padding
_pad: [u8; 4],
}
impl ApploaderHeader {
/// Apploader build date as a string.
pub fn date_str(&self) -> Option<&str> {
CStr::from_bytes_until_nul(&self.date).ok().and_then(|c| c.to_str().ok())
}
}
/// Maximum number of text sections in a DOL.
pub const DOL_MAX_TEXT_SECTIONS: usize = 7;
/// Maximum number of data sections in a DOL.
pub const DOL_MAX_DATA_SECTIONS: usize = 11;
/// Dolphin executable (DOL) header.
#[derive(Debug, Clone, FromBytes, FromZeroes)]
pub struct DolHeader {
/// Text section offsets
pub text_offs: [U32; DOL_MAX_TEXT_SECTIONS],
/// Data section offsets
pub data_offs: [U32; DOL_MAX_DATA_SECTIONS],
/// Text section addresses
pub text_addrs: [U32; DOL_MAX_TEXT_SECTIONS],
/// Data section addresses
pub data_addrs: [U32; DOL_MAX_DATA_SECTIONS],
/// Text section sizes
pub text_sizes: [U32; DOL_MAX_TEXT_SECTIONS],
/// Data section sizes
pub data_sizes: [U32; DOL_MAX_DATA_SECTIONS],
/// BSS address
pub bss_addr: U32,
/// BSS size
pub bss_size: U32,
/// Entry point
pub entry_point: U32,
/// Padding
_pad: [u8; 0x1C],
}
static_assert!(size_of::<DolHeader>() == 0x100);
/// The kind of disc partition.
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub enum PartitionKind {
/// Data partition.
Data,
/// Update partition.
Update,
/// Channel partition.
Channel,
/// Other partition kind.
Other(u32),
}
impl Display for PartitionKind {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
Self::Data => write!(f, "Data"),
Self::Update => write!(f, "Update"),
Self::Channel => write!(f, "Channel"),
Self::Other(v) => {
let bytes = v.to_be_bytes();
write!(f, "Other ({:08X}, {})", v, String::from_utf8_lossy(&bytes))
}
}
}
}
impl PartitionKind {
/// Returns the directory name for the partition kind.
pub fn dir_name(&self) -> Cow<str> {
match self {
Self::Data => Cow::Borrowed("DATA"),
Self::Update => Cow::Borrowed("UPDATE"),
Self::Channel => Cow::Borrowed("CHANNEL"),
Self::Other(v) => {
let bytes = v.to_be_bytes();
Cow::Owned(format!("P-{}", String::from_utf8_lossy(&bytes)))
}
}
}
}
impl From<u32> for PartitionKind {
fn from(v: u32) -> Self {
match v {
0 => Self::Data,
1 => Self::Update,
2 => Self::Channel,
v => Self::Other(v),
}
}
}
/// An open disc partition.
pub trait PartitionBase: DynClone + ReadStream + Send + Sync {
/// Reads the partition header and file system table.
fn meta(&mut self) -> Result<Box<PartitionMeta>>;
/// Seeks the read stream to the specified file system node
/// and returns a windowed stream.
///
/// # Examples
///
/// Basic usage:
/// ```no_run
/// use std::io::Read;
///
/// use nod::{Disc, PartitionKind};
///
/// fn main() -> nod::Result<()> {
/// let disc = Disc::new("path/to/file.iso")?;
/// let mut partition = disc.open_partition_kind(PartitionKind::Data)?;
/// let meta = partition.meta()?;
/// let fst = meta.fst()?;
/// if let Some((_, node)) = fst.find("/MP3/Worlds.txt") {
/// let mut s = String::new();
/// partition
/// .open_file(node)
/// .expect("Failed to open file stream")
/// .read_to_string(&mut s)
/// .expect("Failed to read file");
/// println!("{}", s);
/// }
/// Ok(())
/// }
/// ```
fn open_file(&mut self, node: &Node) -> io::Result<SharedWindowedReadStream>;
/// The ideal size for buffered reads from this partition.
/// GameCube discs have a data block size of 0x8000,
/// whereas Wii discs have a data block size of 0x7C00.
fn ideal_buffer_size(&self) -> usize;
}
dyn_clone::clone_trait_object!(PartitionBase);
/// Size of the disc header and partition header (boot.bin)
pub const BOOT_SIZE: usize = size_of::<DiscHeader>() + size_of::<PartitionHeader>();
/// Size of the debug and region information (bi2.bin)
pub const BI2_SIZE: usize = 0x2000;
/// Extra disc partition data. (DOL, FST, etc.)
#[derive(Clone, Debug)]
pub struct PartitionMeta {
/// Disc and partition header (boot.bin)
pub raw_boot: Box<[u8; BOOT_SIZE]>,
/// Debug and region information (bi2.bin)
pub raw_bi2: Box<[u8; BI2_SIZE]>,
/// Apploader (apploader.bin)
pub raw_apploader: Box<[u8]>,
/// File system table (fst.bin)
pub raw_fst: Box<[u8]>,
/// Main binary (main.dol)
pub raw_dol: Box<[u8]>,
/// Ticket (ticket.bin, Wii only)
pub raw_ticket: Option<Box<[u8]>>,
/// TMD (tmd.bin, Wii only)
pub raw_tmd: Option<Box<[u8]>>,
/// Certificate chain (cert.bin, Wii only)
pub raw_cert_chain: Option<Box<[u8]>>,
/// H3 hash table (h3.bin, Wii only)
pub raw_h3_table: Option<Box<[u8]>>,
}
impl PartitionMeta {
/// A view into the disc header.
pub fn header(&self) -> &DiscHeader {
DiscHeader::ref_from(&self.raw_boot[..size_of::<DiscHeader>()]).unwrap()
}
/// A view into the partition header.
pub fn partition_header(&self) -> &PartitionHeader {
PartitionHeader::ref_from(&self.raw_boot[size_of::<DiscHeader>()..]).unwrap()
}
/// A view into the apploader header.
pub fn apploader_header(&self) -> &ApploaderHeader {
ApploaderHeader::ref_from_prefix(&self.raw_apploader).unwrap()
}
/// A view into the file system table (FST).
pub fn fst(&self) -> Result<Fst, &'static str> { Fst::new(&self.raw_fst) }
/// A view into the DOL header.
pub fn dol_header(&self) -> &DolHeader { DolHeader::ref_from_prefix(&self.raw_dol).unwrap() }
/// A view into the ticket. (Wii only)
pub fn ticket(&self) -> Option<&Ticket> {
self.raw_ticket.as_ref().and_then(|v| Ticket::ref_from(v))
}
/// A view into the TMD. (Wii only)
pub fn tmd_header(&self) -> Option<&TmdHeader> {
self.raw_tmd.as_ref().and_then(|v| TmdHeader::ref_from_prefix(v))
}
}
/// The size of a single-layer MiniDVD. (1.4 GB)
///
/// GameCube games and some third-party Wii discs (Datel) use this format.
pub const MINI_DVD_SIZE: u64 = 1_459_978_240;
/// The size of a single-layer DVD. (4.7 GB)
///
/// The vast majority of Wii games use this format.
pub const SL_DVD_SIZE: u64 = 4_699_979_776;
/// The size of a dual-layer DVD. (8.5 GB)
///
/// A few larger Wii games use this format.
/// (Super Smash Bros. Brawl, Metroid Prime Trilogy, etc.)
pub const DL_DVD_SIZE: u64 = 8_511_160_320;

315
nod/src/disc/reader.rs Normal file
View File

@ -0,0 +1,315 @@
use std::{
cmp::min,
io,
io::{Read, Seek, SeekFrom},
};
use zerocopy::FromZeroes;
use crate::{
disc::{
gcn::PartitionGC,
hashes::{rebuild_hashes, HashTable},
wii::{PartitionWii, WiiPartEntry, WiiPartGroup, WiiPartitionHeader, WII_PART_GROUP_OFF},
DL_DVD_SIZE, MINI_DVD_SIZE, SL_DVD_SIZE,
},
io::block::{Block, BlockIO, PartitionInfo},
util::read::{read_box, read_from, read_vec},
DiscHeader, DiscMeta, Error, OpenOptions, PartitionBase, PartitionHeader, PartitionKind,
Result, ResultContext, SECTOR_SIZE,
};
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
pub enum EncryptionMode {
Encrypted,
Decrypted,
}
pub struct DiscReader {
io: Box<dyn BlockIO>,
block: Block,
block_buf: Box<[u8]>,
block_idx: u32,
sector_buf: Box<[u8; SECTOR_SIZE]>,
sector_idx: u32,
pos: u64,
mode: EncryptionMode,
disc_header: Box<DiscHeader>,
pub(crate) partitions: Vec<PartitionInfo>,
hash_tables: Vec<HashTable>,
}
impl Clone for DiscReader {
fn clone(&self) -> Self {
Self {
io: self.io.clone(),
block: Block::default(),
block_buf: <u8>::new_box_slice_zeroed(self.block_buf.len()),
block_idx: u32::MAX,
sector_buf: <[u8; SECTOR_SIZE]>::new_box_zeroed(),
sector_idx: u32::MAX,
pos: 0,
mode: self.mode,
disc_header: self.disc_header.clone(),
partitions: self.partitions.clone(),
hash_tables: self.hash_tables.clone(),
}
}
}
impl DiscReader {
pub fn new(inner: Box<dyn BlockIO>, options: &OpenOptions) -> Result<Self> {
let block_size = inner.block_size();
let meta = inner.meta();
let mut reader = Self {
io: inner,
block: Block::default(),
block_buf: <u8>::new_box_slice_zeroed(block_size as usize),
block_idx: u32::MAX,
sector_buf: <[u8; SECTOR_SIZE]>::new_box_zeroed(),
sector_idx: u32::MAX,
pos: 0,
mode: if options.rebuild_encryption {
EncryptionMode::Encrypted
} else {
EncryptionMode::Decrypted
},
disc_header: DiscHeader::new_box_zeroed(),
partitions: vec![],
hash_tables: vec![],
};
let disc_header: Box<DiscHeader> = read_box(&mut reader).context("Reading disc header")?;
reader.disc_header = disc_header;
if reader.disc_header.is_wii() {
reader.partitions = read_partition_info(&mut reader)?;
// Rebuild hashes if the format requires it
if (options.rebuild_encryption || options.validate_hashes) && meta.needs_hash_recovery {
rebuild_hashes(&mut reader)?;
}
}
reader.reset();
Ok(reader)
}
pub fn reset(&mut self) {
self.block = Block::default();
self.block_buf.fill(0);
self.block_idx = u32::MAX;
self.sector_buf.fill(0);
self.sector_idx = u32::MAX;
self.pos = 0;
}
pub fn disc_size(&self) -> u64 {
self.io.meta().disc_size.unwrap_or_else(|| guess_disc_size(&self.partitions))
}
pub fn header(&self) -> &DiscHeader { &self.disc_header }
pub fn partitions(&self) -> &[PartitionInfo] { &self.partitions }
pub fn meta(&self) -> DiscMeta { self.io.meta() }
/// Opens a new, decrypted partition read stream for the specified partition index.
pub fn open_partition(
&self,
index: usize,
options: &OpenOptions,
) -> Result<Box<dyn PartitionBase>> {
if self.disc_header.is_gamecube() {
if index == 0 {
Ok(PartitionGC::new(self.io.clone(), self.disc_header.clone())?)
} else {
Err(Error::DiscFormat("GameCube discs only have one partition".to_string()))
}
} else if let Some(part) = self.partitions.get(index) {
Ok(PartitionWii::new(self.io.clone(), self.disc_header.clone(), part, options)?)
} else {
Err(Error::DiscFormat(format!("Partition {index} not found")))
}
}
/// Opens a new, decrypted partition read stream for the first partition matching
/// the specified kind.
pub fn open_partition_kind(
&self,
kind: PartitionKind,
options: &OpenOptions,
) -> Result<Box<dyn PartitionBase>> {
if self.disc_header.is_gamecube() {
if kind == PartitionKind::Data {
Ok(PartitionGC::new(self.io.clone(), self.disc_header.clone())?)
} else {
Err(Error::DiscFormat("GameCube discs only have a data partition".to_string()))
}
} else if let Some(part) = self.partitions.iter().find(|v| v.kind == kind) {
Ok(PartitionWii::new(self.io.clone(), self.disc_header.clone(), part, options)?)
} else {
Err(Error::DiscFormat(format!("Partition type {kind} not found")))
}
}
}
impl Read for DiscReader {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let block_idx = (self.pos / self.block_buf.len() as u64) as u32;
let abs_sector = (self.pos / SECTOR_SIZE as u64) as u32;
let partition = if self.disc_header.is_wii() {
self.partitions.iter().find(|part| {
abs_sector >= part.data_start_sector && abs_sector < part.data_end_sector
})
} else {
None
};
// Read new block
if block_idx != self.block_idx {
self.block = self.io.read_block(self.block_buf.as_mut(), block_idx, partition)?;
self.block_idx = block_idx;
}
// Read new sector into buffer
if abs_sector != self.sector_idx {
if let Some(partition) = partition {
match self.mode {
EncryptionMode::Decrypted => self.block.decrypt(
self.sector_buf.as_mut(),
self.block_buf.as_ref(),
abs_sector,
partition,
)?,
EncryptionMode::Encrypted => self.block.encrypt(
self.sector_buf.as_mut(),
self.block_buf.as_ref(),
abs_sector,
partition,
)?,
}
} else {
self.block.copy_raw(
self.sector_buf.as_mut(),
self.block_buf.as_ref(),
abs_sector,
&self.disc_header,
)?;
}
self.sector_idx = abs_sector;
}
// Read from sector buffer
let offset = (self.pos % SECTOR_SIZE as u64) as usize;
let len = min(buf.len(), SECTOR_SIZE - offset);
buf[..len].copy_from_slice(&self.sector_buf[offset..offset + len]);
self.pos += len as u64;
Ok(len)
}
}
impl Seek for DiscReader {
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
self.pos = match pos {
SeekFrom::Start(v) => v,
SeekFrom::End(_) => {
return Err(io::Error::new(
io::ErrorKind::Unsupported,
"BlockIOReader: SeekFrom::End is not supported".to_string(),
));
}
SeekFrom::Current(v) => self.pos.saturating_add_signed(v),
};
Ok(self.pos)
}
}
fn read_partition_info(reader: &mut DiscReader) -> Result<Vec<PartitionInfo>> {
reader.seek(SeekFrom::Start(WII_PART_GROUP_OFF)).context("Seeking to partition groups")?;
let part_groups: [WiiPartGroup; 4] = read_from(reader).context("Reading partition groups")?;
let mut part_info = Vec::new();
for (group_idx, group) in part_groups.iter().enumerate() {
let part_count = group.part_count.get();
if part_count == 0 {
continue;
}
reader
.seek(SeekFrom::Start(group.part_entry_off()))
.with_context(|| format!("Seeking to partition group {group_idx}"))?;
let entries: Vec<WiiPartEntry> = read_vec(reader, part_count as usize)
.with_context(|| format!("Reading partition group {group_idx}"))?;
for (part_idx, entry) in entries.iter().enumerate() {
let offset = entry.offset();
reader
.seek(SeekFrom::Start(offset))
.with_context(|| format!("Seeking to partition data {group_idx}:{part_idx}"))?;
let header: Box<WiiPartitionHeader> = read_box(reader)
.with_context(|| format!("Reading partition header {group_idx}:{part_idx}"))?;
let key = header.ticket.decrypt_title_key()?;
let start_offset = entry.offset();
if start_offset % SECTOR_SIZE as u64 != 0 {
return Err(Error::DiscFormat(format!(
"Partition {group_idx}:{part_idx} offset is not sector aligned",
)));
}
let data_start_offset = entry.offset() + header.data_off();
let data_end_offset = data_start_offset + header.data_size();
if data_start_offset % SECTOR_SIZE as u64 != 0
|| data_end_offset % SECTOR_SIZE as u64 != 0
{
return Err(Error::DiscFormat(format!(
"Partition {group_idx}:{part_idx} data is not sector aligned",
)));
}
let mut info = PartitionInfo {
index: part_info.len(),
kind: entry.kind.get().into(),
start_sector: (start_offset / SECTOR_SIZE as u64) as u32,
data_start_sector: (data_start_offset / SECTOR_SIZE as u64) as u32,
data_end_sector: (data_end_offset / SECTOR_SIZE as u64) as u32,
key,
header,
disc_header: DiscHeader::new_box_zeroed(),
partition_header: PartitionHeader::new_box_zeroed(),
hash_table: None,
};
let mut partition_reader = PartitionWii::new(
reader.io.clone(),
reader.disc_header.clone(),
&info,
&OpenOptions::default(),
)?;
info.disc_header = read_box(&mut partition_reader).context("Reading disc header")?;
info.partition_header =
read_box(&mut partition_reader).context("Reading partition header")?;
part_info.push(info);
}
}
Ok(part_info)
}
fn guess_disc_size(part_info: &[PartitionInfo]) -> u64 {
let max_offset = part_info
.iter()
.flat_map(|v| {
let offset = v.start_sector as u64 * SECTOR_SIZE as u64;
[
offset + v.header.tmd_off() + v.header.tmd_size(),
offset + v.header.cert_chain_off() + v.header.cert_chain_size(),
offset + v.header.h3_table_off() + v.header.h3_table_size(),
offset + v.header.data_off() + v.header.data_size(),
]
})
.max()
.unwrap_or(0x50000);
// TODO add FST offsets (decrypted partitions)
if max_offset <= MINI_DVD_SIZE && !part_info.iter().any(|v| v.kind == PartitionKind::Data) {
// Datel disc
MINI_DVD_SIZE
} else if max_offset < SL_DVD_SIZE {
SL_DVD_SIZE
} else {
DL_DVD_SIZE
}
}

449
nod/src/disc/wii.rs Normal file
View File

@ -0,0 +1,449 @@
use std::{
cmp::min,
ffi::CStr,
io,
io::{Read, Seek, SeekFrom},
mem::size_of,
};
use sha1::{Digest, Sha1};
use zerocopy::{big_endian::*, AsBytes, FromBytes, FromZeroes};
use crate::{
array_ref,
disc::{
gcn::{read_part_meta, PartitionGC},
PartitionBase, PartitionMeta, SECTOR_SIZE,
},
fst::{Node, NodeKind},
io::{
aes_decrypt,
block::{Block, BlockIO, PartitionInfo},
KeyBytes,
},
static_assert,
streams::{ReadStream, SharedWindowedReadStream},
util::{div_rem, read::read_box_slice},
DiscHeader, Error, OpenOptions, Result, ResultContext,
};
/// Size in bytes of the hashes block in a Wii disc sector
pub(crate) const HASHES_SIZE: usize = 0x400;
/// Size in bytes of the data block in a Wii disc sector (excluding hashes)
pub(crate) const SECTOR_DATA_SIZE: usize = SECTOR_SIZE - HASHES_SIZE; // 0x7C00
// ppki (Retail)
const RVL_CERT_ISSUER_PPKI_TICKET: &str = "Root-CA00000001-XS00000003";
#[rustfmt::skip]
const RETAIL_COMMON_KEYS: [KeyBytes; 3] = [
/* RVL_KEY_RETAIL */
[0xeb, 0xe4, 0x2a, 0x22, 0x5e, 0x85, 0x93, 0xe4, 0x48, 0xd9, 0xc5, 0x45, 0x73, 0x81, 0xaa, 0xf7],
/* RVL_KEY_KOREAN */
[0x63, 0xb8, 0x2b, 0xb4, 0xf4, 0x61, 0x4e, 0x2e, 0x13, 0xf2, 0xfe, 0xfb, 0xba, 0x4c, 0x9b, 0x7e],
/* vWii_KEY_RETAIL */
[0x30, 0xbf, 0xc7, 0x6e, 0x7c, 0x19, 0xaf, 0xbb, 0x23, 0x16, 0x33, 0x30, 0xce, 0xd7, 0xc2, 0x8d],
];
// dpki (Debug)
const RVL_CERT_ISSUER_DPKI_TICKET: &str = "Root-CA00000002-XS00000006";
#[rustfmt::skip]
const DEBUG_COMMON_KEYS: [KeyBytes; 3] = [
/* RVL_KEY_DEBUG */
[0xa1, 0x60, 0x4a, 0x6a, 0x71, 0x23, 0xb5, 0x29, 0xae, 0x8b, 0xec, 0x32, 0xc8, 0x16, 0xfc, 0xaa],
/* RVL_KEY_KOREAN_DEBUG */
[0x67, 0x45, 0x8b, 0x6b, 0xc6, 0x23, 0x7b, 0x32, 0x69, 0x98, 0x3c, 0x64, 0x73, 0x48, 0x33, 0x66],
/* vWii_KEY_DEBUG */
[0x2f, 0x5c, 0x1b, 0x29, 0x44, 0xe7, 0xfd, 0x6f, 0xc3, 0x97, 0x96, 0x4b, 0x05, 0x76, 0x91, 0xfa],
];
#[derive(Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
#[repr(C, align(4))]
pub(crate) struct WiiPartEntry {
pub(crate) offset: U32,
pub(crate) kind: U32,
}
static_assert!(size_of::<WiiPartEntry>() == 8);
impl WiiPartEntry {
pub(crate) fn offset(&self) -> u64 { (self.offset.get() as u64) << 2 }
}
pub(crate) const WII_PART_GROUP_OFF: u64 = 0x40000;
#[derive(Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
#[repr(C, align(4))]
pub(crate) struct WiiPartGroup {
pub(crate) part_count: U32,
pub(crate) part_entry_off: U32,
}
static_assert!(size_of::<WiiPartGroup>() == 8);
impl WiiPartGroup {
pub(crate) fn part_entry_off(&self) -> u64 { (self.part_entry_off.get() as u64) << 2 }
}
#[derive(Debug, Clone, PartialEq, FromBytes, FromZeroes, AsBytes)]
#[repr(C, align(4))]
pub struct SignedHeader {
/// Signature type, always 0x00010001 (RSA-2048)
pub sig_type: U32,
/// RSA-2048 signature
pub sig: [u8; 256],
_pad: [u8; 60],
}
static_assert!(size_of::<SignedHeader>() == 0x140);
#[derive(Debug, Clone, PartialEq, Default, FromBytes, FromZeroes, AsBytes)]
#[repr(C, align(4))]
pub struct TicketTimeLimit {
pub enable_time_limit: U32,
pub time_limit: U32,
}
static_assert!(size_of::<TicketTimeLimit>() == 8);
#[derive(Debug, Clone, PartialEq, FromBytes, FromZeroes, AsBytes)]
#[repr(C, align(4))]
pub struct Ticket {
pub header: SignedHeader,
pub sig_issuer: [u8; 64],
pub ecdh: [u8; 60],
pub version: u8,
_pad1: U16,
pub title_key: KeyBytes,
_pad2: u8,
pub ticket_id: [u8; 8],
pub console_id: [u8; 4],
pub title_id: [u8; 8],
_pad3: U16,
pub ticket_title_version: U16,
pub permitted_titles_mask: U32,
pub permit_mask: U32,
pub title_export_allowed: u8,
pub common_key_idx: u8,
_pad4: [u8; 48],
pub content_access_permissions: [u8; 64],
_pad5: [u8; 2],
pub time_limits: [TicketTimeLimit; 8],
}
static_assert!(size_of::<Ticket>() == 0x2A4);
impl Ticket {
pub fn decrypt_title_key(&self) -> Result<KeyBytes> {
let mut iv: KeyBytes = [0; 16];
iv[..8].copy_from_slice(&self.title_id);
let cert_issuer_ticket =
CStr::from_bytes_until_nul(&self.sig_issuer).ok().and_then(|c| c.to_str().ok());
let common_keys = match cert_issuer_ticket {
Some(RVL_CERT_ISSUER_PPKI_TICKET) => &RETAIL_COMMON_KEYS,
Some(RVL_CERT_ISSUER_DPKI_TICKET) => &DEBUG_COMMON_KEYS,
Some(v) => {
return Err(Error::DiscFormat(format!("unknown certificate issuer {:?}", v)));
}
None => {
return Err(Error::DiscFormat("failed to parse certificate issuer".to_string()));
}
};
let common_key = common_keys.get(self.common_key_idx as usize).ok_or(Error::DiscFormat(
format!("unknown common key index {}", self.common_key_idx),
))?;
let mut title_key = self.title_key;
aes_decrypt(common_key, iv, &mut title_key);
Ok(title_key)
}
}
#[derive(Debug, Clone, PartialEq, FromBytes, FromZeroes, AsBytes)]
#[repr(C, align(4))]
pub struct TmdHeader {
pub header: SignedHeader,
pub sig_issuer: [u8; 64],
pub version: u8,
pub ca_crl_version: u8,
pub signer_crl_version: u8,
pub is_vwii: u8,
pub ios_id: [u8; 8],
pub title_id: [u8; 8],
pub title_type: u32,
pub group_id: U16,
_pad1: [u8; 2],
pub region: U16,
pub ratings: KeyBytes,
_pad2: [u8; 12],
pub ipc_mask: [u8; 12],
_pad3: [u8; 18],
pub access_flags: U32,
pub title_version: U16,
pub num_contents: U16,
pub boot_idx: U16,
pub minor_version: U16,
}
static_assert!(size_of::<TmdHeader>() == 0x1E4);
pub const H3_TABLE_SIZE: usize = 0x18000;
#[derive(Debug, Clone, PartialEq, FromBytes, FromZeroes, AsBytes)]
#[repr(C, align(4))]
pub struct WiiPartitionHeader {
pub ticket: Ticket,
tmd_size: U32,
tmd_off: U32,
cert_chain_size: U32,
cert_chain_off: U32,
h3_table_off: U32,
data_off: U32,
data_size: U32,
}
static_assert!(size_of::<WiiPartitionHeader>() == 0x2C0);
impl WiiPartitionHeader {
pub fn tmd_size(&self) -> u64 { self.tmd_size.get() as u64 }
pub fn tmd_off(&self) -> u64 { (self.tmd_off.get() as u64) << 2 }
pub fn cert_chain_size(&self) -> u64 { self.cert_chain_size.get() as u64 }
pub fn cert_chain_off(&self) -> u64 { (self.cert_chain_off.get() as u64) << 2 }
pub fn h3_table_off(&self) -> u64 { (self.h3_table_off.get() as u64) << 2 }
pub fn h3_table_size(&self) -> u64 { H3_TABLE_SIZE as u64 }
pub fn data_off(&self) -> u64 { (self.data_off.get() as u64) << 2 }
pub fn data_size(&self) -> u64 { (self.data_size.get() as u64) << 2 }
}
pub struct PartitionWii {
io: Box<dyn BlockIO>,
partition: PartitionInfo,
block: Block,
block_buf: Box<[u8]>,
block_idx: u32,
sector_buf: Box<[u8; SECTOR_SIZE]>,
sector: u32,
pos: u64,
verify: bool,
raw_tmd: Box<[u8]>,
raw_cert_chain: Box<[u8]>,
raw_h3_table: Box<[u8]>,
}
impl Clone for PartitionWii {
fn clone(&self) -> Self {
Self {
io: self.io.clone(),
partition: self.partition.clone(),
block: Block::default(),
block_buf: <u8>::new_box_slice_zeroed(self.block_buf.len()),
block_idx: u32::MAX,
sector_buf: <[u8; SECTOR_SIZE]>::new_box_zeroed(),
sector: u32::MAX,
pos: 0,
verify: self.verify,
raw_tmd: self.raw_tmd.clone(),
raw_cert_chain: self.raw_cert_chain.clone(),
raw_h3_table: self.raw_h3_table.clone(),
}
}
}
impl PartitionWii {
pub fn new(
inner: Box<dyn BlockIO>,
disc_header: Box<DiscHeader>,
partition: &PartitionInfo,
options: &OpenOptions,
) -> Result<Box<Self>> {
let block_size = inner.block_size();
let mut reader = PartitionGC::new(inner, disc_header)?;
// Read TMD, cert chain, and H3 table
let offset = partition.start_sector as u64 * SECTOR_SIZE as u64;
reader
.seek(SeekFrom::Start(offset + partition.header.tmd_off()))
.context("Seeking to TMD offset")?;
let raw_tmd: Box<[u8]> = read_box_slice(&mut reader, partition.header.tmd_size() as usize)
.context("Reading TMD")?;
reader
.seek(SeekFrom::Start(offset + partition.header.cert_chain_off()))
.context("Seeking to cert chain offset")?;
let raw_cert_chain: Box<[u8]> =
read_box_slice(&mut reader, partition.header.cert_chain_size() as usize)
.context("Reading cert chain")?;
reader
.seek(SeekFrom::Start(offset + partition.header.h3_table_off()))
.context("Seeking to H3 table offset")?;
let raw_h3_table: Box<[u8]> =
read_box_slice(&mut reader, H3_TABLE_SIZE).context("Reading H3 table")?;
Ok(Box::new(Self {
io: reader.into_inner(),
partition: partition.clone(),
block: Block::default(),
block_buf: <u8>::new_box_slice_zeroed(block_size as usize),
block_idx: u32::MAX,
sector_buf: <[u8; SECTOR_SIZE]>::new_box_zeroed(),
sector: u32::MAX,
pos: 0,
verify: options.validate_hashes,
raw_tmd,
raw_cert_chain,
raw_h3_table,
}))
}
}
impl Read for PartitionWii {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let part_sector = (self.pos / SECTOR_DATA_SIZE as u64) as u32;
let abs_sector = self.partition.data_start_sector + part_sector;
if abs_sector >= self.partition.data_end_sector {
return Ok(0);
}
let block_idx =
(abs_sector as u64 * SECTOR_SIZE as u64 / self.block_buf.len() as u64) as u32;
// Read new block if necessary
if block_idx != self.block_idx {
self.block =
self.io.read_block(self.block_buf.as_mut(), block_idx, Some(&self.partition))?;
self.block_idx = block_idx;
}
// Decrypt sector if necessary
if abs_sector != self.sector {
self.block.decrypt(
self.sector_buf.as_mut(),
self.block_buf.as_ref(),
abs_sector,
&self.partition,
)?;
if self.verify {
verify_hashes(self.sector_buf.as_ref(), part_sector, self.raw_h3_table.as_ref())?;
}
self.sector = abs_sector;
}
let offset = (self.pos % SECTOR_DATA_SIZE as u64) as usize;
let len = min(buf.len(), SECTOR_DATA_SIZE - offset);
buf[..len]
.copy_from_slice(&self.sector_buf[HASHES_SIZE + offset..HASHES_SIZE + offset + len]);
self.pos += len as u64;
Ok(len)
}
}
impl Seek for PartitionWii {
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
self.pos = match pos {
SeekFrom::Start(v) => v,
SeekFrom::End(_) => {
return Err(io::Error::new(
io::ErrorKind::Unsupported,
"WiiPartitionReader: SeekFrom::End is not supported".to_string(),
));
}
SeekFrom::Current(v) => self.pos.saturating_add_signed(v),
};
Ok(self.pos)
}
}
#[inline(always)]
pub(crate) fn as_digest(slice: &[u8; 20]) -> digest::Output<Sha1> { (*slice).into() }
fn verify_hashes(buf: &[u8; SECTOR_SIZE], part_sector: u32, h3_table: &[u8]) -> io::Result<()> {
let (cluster, sector) = div_rem(part_sector as usize, 8);
let (group, sub_group) = div_rem(cluster, 8);
// H0 hashes
for i in 0..31 {
let mut hash = Sha1::new();
hash.update(array_ref![buf, (i + 1) * 0x400, 0x400]);
let expected = as_digest(array_ref![buf, i * 20, 20]);
let output = hash.finalize();
if output != expected {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("Invalid H0 hash! (block {:?}) {:x}\n\texpected {:x}", i, output, expected),
));
}
}
// H1 hash
{
let mut hash = Sha1::new();
hash.update(array_ref![buf, 0, 0x26C]);
let expected = as_digest(array_ref![buf, 0x280 + sector * 20, 20]);
let output = hash.finalize();
if output != expected {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"Invalid H1 hash! (subgroup {:?}) {:x}\n\texpected {:x}",
sector, output, expected
),
));
}
}
// H2 hash
{
let mut hash = Sha1::new();
hash.update(array_ref![buf, 0x280, 0xA0]);
let expected = as_digest(array_ref![buf, 0x340 + sub_group * 20, 20]);
let output = hash.finalize();
if output != expected {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"Invalid H2 hash! (group {:?}) {:x}\n\texpected {:x}",
sub_group, output, expected
),
));
}
}
// H3 hash
{
let mut hash = Sha1::new();
hash.update(array_ref![buf, 0x340, 0xA0]);
let expected = as_digest(array_ref![h3_table, group * 20, 20]);
let output = hash.finalize();
if output != expected {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("Invalid H3 hash! {:x}\n\texpected {:x}", output, expected),
));
}
}
Ok(())
}
impl PartitionBase for PartitionWii {
fn meta(&mut self) -> Result<Box<PartitionMeta>> {
self.seek(SeekFrom::Start(0)).context("Seeking to partition header")?;
let mut meta = read_part_meta(self, true)?;
meta.raw_ticket = Some(Box::from(self.partition.header.ticket.as_bytes()));
meta.raw_tmd = Some(self.raw_tmd.clone());
meta.raw_cert_chain = Some(self.raw_cert_chain.clone());
meta.raw_h3_table = Some(self.raw_h3_table.clone());
Ok(meta)
}
fn open_file(&mut self, node: &Node) -> io::Result<SharedWindowedReadStream> {
assert_eq!(node.kind(), NodeKind::File);
self.new_window(node.offset(true), node.length())
}
fn ideal_buffer_size(&self) -> usize { SECTOR_DATA_SIZE }
}

170
nod/src/fst.rs Normal file
View File

@ -0,0 +1,170 @@
//! Disc file system types
use std::{borrow::Cow, ffi::CStr, mem::size_of};
use encoding_rs::SHIFT_JIS;
use zerocopy::{big_endian::*, AsBytes, FromBytes, FromZeroes};
use crate::{static_assert, Result};
/// File system node kind.
#[derive(Clone, Debug, PartialEq)]
pub enum NodeKind {
/// Node is a file.
File,
/// Node is a directory.
Directory,
/// Invalid node kind. (Should not normally occur)
Invalid,
}
/// An individual file system node.
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
#[repr(C, align(4))]
pub struct Node {
kind: u8,
// u24 big-endian
name_offset: [u8; 3],
offset: U32,
length: U32,
}
static_assert!(size_of::<Node>() == 12);
impl Node {
/// File system node kind.
pub fn kind(&self) -> NodeKind {
match self.kind {
0 => NodeKind::File,
1 => NodeKind::Directory,
_ => NodeKind::Invalid,
}
}
/// Whether the node is a file.
pub fn is_file(&self) -> bool { self.kind == 0 }
/// Whether the node is a directory.
pub fn is_dir(&self) -> bool { self.kind == 1 }
/// Offset in the string table to the filename.
pub fn name_offset(&self) -> u32 {
u32::from_be_bytes([0, self.name_offset[0], self.name_offset[1], self.name_offset[2]])
}
/// For files, this is the partition offset of the file data. (Wii: >> 2)
///
/// For directories, this is the parent node index in the FST.
pub fn offset(&self, is_wii: bool) -> u64 {
if is_wii && self.kind == 0 {
self.offset.get() as u64 * 4
} else {
self.offset.get() as u64
}
}
/// For files, this is the byte size of the file.
///
/// For directories, this is the child end index in the FST.
///
/// Number of child files and directories recursively is `length - offset`.
pub fn length(&self) -> u64 { self.length.get() as u64 }
}
/// A view into the file system table (FST).
pub struct Fst<'a> {
/// The nodes in the FST.
pub nodes: &'a [Node],
/// The string table containing all file and directory names.
pub string_table: &'a [u8],
}
impl<'a> Fst<'a> {
/// Create a new FST view from a buffer.
pub fn new(buf: &'a [u8]) -> Result<Self, &'static str> {
let Some(root_node) = Node::ref_from_prefix(buf) else {
return Err("FST root node not found");
};
// String table starts after the last node
let string_base = root_node.length() * size_of::<Node>() as u64;
if string_base >= buf.len() as u64 {
return Err("FST string table out of bounds");
}
let (node_buf, string_table) = buf.split_at(string_base as usize);
let nodes = Node::slice_from(node_buf).unwrap();
Ok(Self { nodes, string_table })
}
/// Iterate over the nodes in the FST.
pub fn iter(&self) -> FstIter { FstIter { fst: self, idx: 1 } }
/// Get the name of a node.
pub fn get_name(&self, node: &Node) -> Result<Cow<str>, String> {
let name_buf = self.string_table.get(node.name_offset() as usize..).ok_or_else(|| {
format!(
"FST: name offset {} out of bounds (string table size: {})",
node.name_offset(),
self.string_table.len()
)
})?;
let c_string = CStr::from_bytes_until_nul(name_buf).map_err(|_| {
format!("FST: name at offset {} not null-terminated", node.name_offset())
})?;
let (decoded, _, errors) = SHIFT_JIS.decode(c_string.to_bytes());
if errors {
return Err(format!("FST: Failed to decode name at offset {}", node.name_offset()));
}
Ok(decoded)
}
/// Finds a particular file or directory by path.
pub fn find(&self, path: &str) -> Option<(usize, &Node)> {
let mut split = path.trim_matches('/').split('/');
let mut current = split.next()?;
let mut idx = 1;
let mut stop_at = None;
while let Some(node) = self.nodes.get(idx) {
if self.get_name(node).as_ref().map_or(false, |name| name.eq_ignore_ascii_case(current))
{
if let Some(next) = split.next() {
current = next;
} else {
return Some((idx, node));
}
// Descend into directory
idx += 1;
stop_at = Some(node.length() as usize + idx);
} else if node.is_dir() {
// Skip directory
idx = node.length() as usize;
} else {
// Skip file
idx += 1;
}
if let Some(stop) = stop_at {
if idx >= stop {
break;
}
}
}
None
}
}
/// Iterator over the nodes in an FST.
pub struct FstIter<'a> {
fst: &'a Fst<'a>,
idx: usize,
}
impl<'a> Iterator for FstIter<'a> {
type Item = (usize, &'a Node, Result<Cow<'a, str>, String>);
fn next(&mut self) -> Option<Self::Item> {
let idx = self.idx;
let node = self.fst.nodes.get(idx)?;
let name = self.fst.get_name(node);
self.idx += 1;
Some((idx, node, name))
}
}

350
nod/src/io/block.rs Normal file
View File

@ -0,0 +1,350 @@
use std::{cmp::min, fs, fs::File, io, path::Path};
use dyn_clone::DynClone;
use zerocopy::transmute_ref;
use crate::{
array_ref,
disc::{
hashes::HashTable,
wii::{WiiPartitionHeader, HASHES_SIZE, SECTOR_DATA_SIZE},
SECTOR_SIZE,
},
io::{aes_decrypt, aes_encrypt, KeyBytes, MagicBytes},
util::{lfg::LaggedFibonacci, read::read_from},
DiscHeader, DiscMeta, Error, PartitionHeader, PartitionKind, Result, ResultContext,
};
/// Block I/O trait for reading disc images.
pub trait BlockIO: DynClone + Send + Sync {
/// Reads a block from the disc image.
fn read_block_internal(
&mut self,
out: &mut [u8],
block: u32,
partition: Option<&PartitionInfo>,
) -> io::Result<Block>;
/// Reads a full block from the disc image, combining smaller blocks if necessary.
fn read_block(
&mut self,
out: &mut [u8],
block: u32,
partition: Option<&PartitionInfo>,
) -> io::Result<Block> {
let block_size_internal = self.block_size_internal();
let block_size = self.block_size();
if block_size_internal == block_size {
self.read_block_internal(out, block, partition)
} else {
let mut offset = 0usize;
let mut result = None;
let mut block_idx =
((block as u64 * block_size as u64) / block_size_internal as u64) as u32;
while offset < block_size as usize {
let block = self.read_block_internal(
&mut out[offset..offset + block_size_internal as usize],
block_idx,
partition,
)?;
if result.is_none() {
result = Some(block);
} else if result != Some(block) {
if block == Block::Zero {
out[offset..offset + block_size_internal as usize].fill(0);
} else {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Inconsistent block types in split block",
));
}
}
offset += block_size_internal as usize;
block_idx += 1;
}
Ok(result.unwrap_or_default())
}
}
/// The format's block size in bytes. Can be smaller than the sector size (0x8000).
fn block_size_internal(&self) -> u32;
/// The block size used for processing. Must be a multiple of the sector size (0x8000).
fn block_size(&self) -> u32 { self.block_size_internal().max(SECTOR_SIZE as u32) }
/// Returns extra metadata included in the disc file format, if any.
fn meta(&self) -> DiscMeta;
}
dyn_clone::clone_trait_object!(BlockIO);
/// Creates a new [`BlockIO`] instance.
pub fn open(filename: &Path) -> Result<Box<dyn BlockIO>> {
let path_result = fs::canonicalize(filename);
if let Err(err) = path_result {
return Err(Error::Io(format!("Failed to open {}", filename.display()), err));
}
let path = path_result.as_ref().unwrap();
let meta = fs::metadata(path);
if let Err(err) = meta {
return Err(Error::Io(format!("Failed to open {}", filename.display()), err));
}
if !meta.unwrap().is_file() {
return Err(Error::DiscFormat(format!("Input is not a file: {}", filename.display())));
}
let magic: MagicBytes = {
let mut file =
File::open(path).with_context(|| format!("Opening file {}", filename.display()))?;
read_from(&mut file)
.with_context(|| format!("Reading magic bytes from {}", filename.display()))?
};
let io: Box<dyn BlockIO> = match magic {
crate::io::ciso::CISO_MAGIC => crate::io::ciso::DiscIOCISO::new(path)?,
#[cfg(feature = "compress-zlib")]
crate::io::gcz::GCZ_MAGIC => crate::io::gcz::DiscIOGCZ::new(path)?,
crate::io::nfs::NFS_MAGIC => match path.parent() {
Some(parent) if parent.is_dir() => {
crate::io::nfs::DiscIONFS::new(path.parent().unwrap())?
}
_ => {
return Err(Error::DiscFormat("Failed to locate NFS parent directory".to_string()));
}
},
crate::io::wbfs::WBFS_MAGIC => crate::io::wbfs::DiscIOWBFS::new(path)?,
crate::io::wia::WIA_MAGIC | crate::io::wia::RVZ_MAGIC => {
crate::io::wia::DiscIOWIA::new(path)?
}
_ => crate::io::iso::DiscIOISO::new(path)?,
};
if io.block_size_internal() < SECTOR_SIZE as u32
&& SECTOR_SIZE as u32 % io.block_size_internal() != 0
{
return Err(Error::DiscFormat(format!(
"Sector size {} is not divisible by block size {}",
SECTOR_SIZE,
io.block_size_internal(),
)));
}
if io.block_size() % SECTOR_SIZE as u32 != 0 {
return Err(Error::DiscFormat(format!(
"Block size {} is not a multiple of sector size {}",
io.block_size(),
SECTOR_SIZE
)));
}
Ok(io)
}
/// Wii partition information.
#[derive(Debug, Clone)]
pub struct PartitionInfo {
/// The partition index.
pub index: usize,
/// The kind of disc partition.
pub kind: PartitionKind,
/// The start sector of the partition.
pub start_sector: u32,
/// The start sector of the partition's (encrypted) data.
pub data_start_sector: u32,
/// The end sector of the partition's (encrypted) data.
pub data_end_sector: u32,
/// The AES key for the partition, also known as the "title key".
pub key: KeyBytes,
/// The Wii partition header.
pub header: Box<WiiPartitionHeader>,
/// The disc header within the partition.
pub disc_header: Box<DiscHeader>,
/// The partition header within the partition.
pub partition_header: Box<PartitionHeader>,
/// The hash table for the partition, if rebuilt.
pub hash_table: Option<HashTable>,
}
/// The block kind returned by [`BlockIO::read_block`].
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
pub enum Block {
/// Raw data or encrypted Wii partition data
Raw,
/// Decrypted Wii partition data
PartDecrypted {
/// Whether the sector has its hash block intact
has_hashes: bool,
},
/// Wii partition junk data
Junk,
/// All zeroes
#[default]
Zero,
}
impl Block {
/// Decrypts the block's data (if necessary) and writes it to the output buffer.
pub(crate) fn decrypt(
self,
out: &mut [u8; SECTOR_SIZE],
data: &[u8],
abs_sector: u32,
partition: &PartitionInfo,
) -> io::Result<()> {
let part_sector = abs_sector - partition.data_start_sector;
match self {
Block::Raw => {
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, abs_sector)?);
decrypt_sector(out, partition);
}
Block::PartDecrypted { has_hashes } => {
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, abs_sector)?);
if !has_hashes {
rebuild_hash_block(out, part_sector, partition);
}
}
Block::Junk => {
generate_junk(out, part_sector, Some(partition), &partition.disc_header);
rebuild_hash_block(out, part_sector, partition);
}
Block::Zero => {
out.fill(0);
rebuild_hash_block(out, part_sector, partition);
}
}
Ok(())
}
/// Encrypts the block's data (if necessary) and writes it to the output buffer.
pub(crate) fn encrypt(
self,
out: &mut [u8; SECTOR_SIZE],
data: &[u8],
abs_sector: u32,
partition: &PartitionInfo,
) -> io::Result<()> {
let part_sector = abs_sector - partition.data_start_sector;
match self {
Block::Raw => {
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, abs_sector)?);
}
Block::PartDecrypted { has_hashes } => {
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, abs_sector)?);
if !has_hashes {
rebuild_hash_block(out, part_sector, partition);
}
encrypt_sector(out, partition);
}
Block::Junk => {
generate_junk(out, part_sector, Some(partition), &partition.disc_header);
rebuild_hash_block(out, part_sector, partition);
encrypt_sector(out, partition);
}
Block::Zero => {
out.fill(0);
rebuild_hash_block(out, part_sector, partition);
encrypt_sector(out, partition);
}
}
Ok(())
}
/// Copies the block's raw data to the output buffer.
pub(crate) fn copy_raw(
self,
out: &mut [u8; SECTOR_SIZE],
data: &[u8],
abs_sector: u32,
disc_header: &DiscHeader,
) -> io::Result<()> {
match self {
Block::Raw => {
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, abs_sector)?);
}
Block::PartDecrypted { .. } => {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Cannot copy decrypted data as raw",
));
}
Block::Junk => generate_junk(out, abs_sector, None, disc_header),
Block::Zero => out.fill(0),
}
Ok(())
}
}
#[inline(always)]
fn block_sector<const N: usize>(data: &[u8], sector_idx: u32) -> io::Result<&[u8; N]> {
if data.len() % N != 0 {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("Expected block size {} to be a multiple of {}", data.len(), N),
));
}
let rel_sector = sector_idx % (data.len() / N) as u32;
let offset = rel_sector as usize * N;
data.get(offset..offset + N)
.ok_or_else(|| {
io::Error::new(
io::ErrorKind::InvalidData,
format!(
"Sector {} out of range (block size {}, sector size {})",
rel_sector,
data.len(),
N
),
)
})
.map(|v| unsafe { &*(v as *const [u8] as *const [u8; N]) })
}
fn generate_junk(
out: &mut [u8; SECTOR_SIZE],
sector: u32,
partition: Option<&PartitionInfo>,
disc_header: &DiscHeader,
) {
let (mut pos, mut offset) = if partition.is_some() {
(sector as u64 * SECTOR_DATA_SIZE as u64, HASHES_SIZE)
} else {
(sector as u64 * SECTOR_SIZE as u64, 0)
};
out[..offset].fill(0);
while offset < SECTOR_SIZE {
// The LFG spans a single sector of the decrypted data,
// so we may need to initialize it multiple times
let mut lfg = LaggedFibonacci::default();
lfg.init_with_seed(*array_ref![disc_header.game_id, 0, 4], disc_header.disc_num, pos);
let sector_end = (pos + SECTOR_SIZE as u64) & !(SECTOR_SIZE as u64 - 1);
let len = min(SECTOR_SIZE - offset, (sector_end - pos) as usize);
lfg.fill(&mut out[offset..offset + len]);
pos += len as u64;
offset += len;
}
}
fn rebuild_hash_block(out: &mut [u8; SECTOR_SIZE], part_sector: u32, partition: &PartitionInfo) {
let Some(hash_table) = partition.hash_table.as_ref() else {
return;
};
let sector_idx = part_sector as usize;
let h0_hashes: &[u8; 0x26C] =
transmute_ref!(array_ref![hash_table.h0_hashes, sector_idx * 31, 31]);
out[0..0x26C].copy_from_slice(h0_hashes);
let h1_hashes: &[u8; 0xA0] =
transmute_ref!(array_ref![hash_table.h1_hashes, sector_idx & !7, 8]);
out[0x280..0x320].copy_from_slice(h1_hashes);
let h2_hashes: &[u8; 0xA0] =
transmute_ref!(array_ref![hash_table.h2_hashes, (sector_idx / 8) & !7, 8]);
out[0x340..0x3E0].copy_from_slice(h2_hashes);
}
fn encrypt_sector(out: &mut [u8; SECTOR_SIZE], partition: &PartitionInfo) {
aes_encrypt(&partition.key, [0u8; 16], &mut out[..HASHES_SIZE]);
// Data IV from encrypted hash block
let iv = *array_ref![out, 0x3D0, 16];
aes_encrypt(&partition.key, iv, &mut out[HASHES_SIZE..]);
}
fn decrypt_sector(out: &mut [u8; SECTOR_SIZE], partition: &PartitionInfo) {
// Data IV from encrypted hash block
let iv = *array_ref![out, 0x3D0, 16];
aes_decrypt(&partition.key, [0u8; 16], &mut out[..HASHES_SIZE]);
aes_decrypt(&partition.key, iv, &mut out[HASHES_SIZE..]);
}

132
nod/src/io/ciso.rs Normal file
View File

@ -0,0 +1,132 @@
use std::{
io,
io::{Read, Seek, SeekFrom},
mem::size_of,
path::Path,
};
use zerocopy::{little_endian::*, AsBytes, FromBytes, FromZeroes};
use crate::{
disc::SECTOR_SIZE,
io::{
block::{Block, BlockIO, PartitionInfo},
nkit::NKitHeader,
split::SplitFileReader,
Format, MagicBytes,
},
static_assert,
util::read::read_from,
DiscMeta, Error, Result, ResultContext,
};
pub const CISO_MAGIC: MagicBytes = *b"CISO";
pub const CISO_MAP_SIZE: usize = SECTOR_SIZE - 8;
/// CISO header (little endian)
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
#[repr(C, align(4))]
struct CISOHeader {
magic: MagicBytes,
block_size: U32,
block_present: [u8; CISO_MAP_SIZE],
}
static_assert!(size_of::<CISOHeader>() == SECTOR_SIZE);
#[derive(Clone)]
pub struct DiscIOCISO {
inner: SplitFileReader,
header: CISOHeader,
block_map: [u16; CISO_MAP_SIZE],
nkit_header: Option<NKitHeader>,
}
impl DiscIOCISO {
pub fn new(filename: &Path) -> Result<Box<Self>> {
let mut inner = SplitFileReader::new(filename)?;
// Read header
let header: CISOHeader = read_from(&mut inner).context("Reading CISO header")?;
if header.magic != CISO_MAGIC {
return Err(Error::DiscFormat("Invalid CISO magic".to_string()));
}
// Build block map
let mut block_map = [0u16; CISO_MAP_SIZE];
let mut block = 0u16;
for (presence, out) in header.block_present.iter().zip(block_map.iter_mut()) {
if *presence == 1 {
*out = block;
block += 1;
} else {
*out = u16::MAX;
}
}
let file_size = SECTOR_SIZE as u64 + block as u64 * header.block_size.get() as u64;
if file_size > inner.len() {
return Err(Error::DiscFormat(format!(
"CISO file size mismatch: expected at least {} bytes, got {}",
file_size,
inner.len()
)));
}
// Read NKit header if present (after CISO data)
let nkit_header = if inner.len() > file_size + 4 {
inner.seek(SeekFrom::Start(file_size)).context("Seeking to NKit header")?;
NKitHeader::try_read_from(&mut inner, header.block_size.get(), true)
} else {
None
};
Ok(Box::new(Self { inner, header, block_map, nkit_header }))
}
}
impl BlockIO for DiscIOCISO {
fn read_block_internal(
&mut self,
out: &mut [u8],
block: u32,
_partition: Option<&PartitionInfo>,
) -> io::Result<Block> {
if block >= CISO_MAP_SIZE as u32 {
// Out of bounds
return Ok(Block::Zero);
}
// Find the block in the map
let phys_block = self.block_map[block as usize];
if phys_block == u16::MAX {
// Check if block is junk data
if self.nkit_header.as_ref().and_then(|h| h.is_junk_block(block)).unwrap_or(false) {
return Ok(Block::Junk);
};
// Otherwise, read zeroes
return Ok(Block::Zero);
}
// Read block
let file_offset = size_of::<CISOHeader>() as u64
+ phys_block as u64 * self.header.block_size.get() as u64;
self.inner.seek(SeekFrom::Start(file_offset))?;
self.inner.read_exact(out)?;
Ok(Block::Raw)
}
fn block_size_internal(&self) -> u32 { self.header.block_size.get() }
fn meta(&self) -> DiscMeta {
let mut result = DiscMeta {
format: Format::Ciso,
block_size: Some(self.header.block_size.get()),
..Default::default()
};
if let Some(nkit_header) = &self.nkit_header {
nkit_header.apply(&mut result);
}
result
}
}

189
nod/src/io/gcz.rs Normal file
View File

@ -0,0 +1,189 @@
use std::{
io,
io::{Read, Seek, SeekFrom},
mem::size_of,
path::Path,
};
use adler::adler32_slice;
use miniz_oxide::{inflate, inflate::core::inflate_flags};
use zerocopy::{little_endian::*, AsBytes, FromBytes, FromZeroes};
use zstd::zstd_safe::WriteBuf;
use crate::{
io::{
block::{Block, BlockIO},
split::SplitFileReader,
MagicBytes,
},
static_assert,
util::read::{read_box_slice, read_from},
Compression, DiscMeta, Error, Format, PartitionInfo, Result, ResultContext,
};
pub const GCZ_MAGIC: MagicBytes = [0x01, 0xC0, 0x0B, 0xB1];
/// GCZ header (little endian)
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
#[repr(C, align(4))]
struct GCZHeader {
magic: MagicBytes,
disc_type: U32,
compressed_size: U64,
disc_size: U64,
block_size: U32,
block_count: U32,
}
static_assert!(size_of::<GCZHeader>() == 32);
pub struct DiscIOGCZ {
inner: SplitFileReader,
header: GCZHeader,
block_map: Box<[U64]>,
block_hashes: Box<[U32]>,
block_buf: Box<[u8]>,
data_offset: u64,
}
impl Clone for DiscIOGCZ {
fn clone(&self) -> Self {
Self {
inner: self.inner.clone(),
header: self.header.clone(),
block_map: self.block_map.clone(),
block_hashes: self.block_hashes.clone(),
block_buf: <u8>::new_box_slice_zeroed(self.block_buf.len()),
data_offset: self.data_offset,
}
}
}
impl DiscIOGCZ {
pub fn new(filename: &Path) -> Result<Box<Self>> {
let mut inner = SplitFileReader::new(filename)?;
// Read header
let header: GCZHeader = read_from(&mut inner).context("Reading GCZ header")?;
if header.magic != GCZ_MAGIC {
return Err(Error::DiscFormat("Invalid GCZ magic".to_string()));
}
// Read block map and hashes
let block_count = header.block_count.get();
let block_map =
read_box_slice(&mut inner, block_count as usize).context("Reading GCZ block map")?;
let block_hashes =
read_box_slice(&mut inner, block_count as usize).context("Reading GCZ block hashes")?;
// header + block_count * (u64 + u32)
let data_offset = size_of::<GCZHeader>() as u64 + block_count as u64 * 12;
let block_buf = <u8>::new_box_slice_zeroed(header.block_size.get() as usize);
Ok(Box::new(Self { inner, header, block_map, block_hashes, block_buf, data_offset }))
}
}
impl BlockIO for DiscIOGCZ {
fn read_block_internal(
&mut self,
out: &mut [u8],
block: u32,
_partition: Option<&PartitionInfo>,
) -> io::Result<Block> {
if block >= self.header.block_count.get() {
// Out of bounds
return Ok(Block::Zero);
}
// Find block offset and size
let mut file_offset = self.block_map[block as usize].get();
let mut compressed = true;
if file_offset & (1 << 63) != 0 {
file_offset &= !(1 << 63);
compressed = false;
}
let compressed_size =
((self.block_map.get(block as usize + 1).unwrap_or(&self.header.compressed_size).get()
& !(1 << 63))
- file_offset) as usize;
if compressed_size > self.block_buf.len() {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"Compressed block size exceeds block size: {} > {}",
compressed_size,
self.block_buf.len()
),
));
} else if !compressed && compressed_size != self.block_buf.len() {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"Uncompressed block size does not match block size: {} != {}",
compressed_size,
self.block_buf.len()
),
));
}
// Read block
self.inner.seek(SeekFrom::Start(self.data_offset + file_offset))?;
self.inner.read_exact(&mut self.block_buf[..compressed_size])?;
// Verify block checksum
let checksum = adler32_slice(&self.block_buf[..compressed_size]);
let expected_checksum = self.block_hashes[block as usize].get();
if checksum != expected_checksum {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"Block checksum mismatch: {:#010x} != {:#010x}",
checksum, expected_checksum
),
));
}
if compressed {
// Decompress block
let mut decompressor = inflate::core::DecompressorOxide::new();
let input = &self.block_buf[..compressed_size];
let (status, in_size, out_size) = inflate::core::decompress(
&mut decompressor,
input,
out,
0,
inflate_flags::TINFL_FLAG_PARSE_ZLIB_HEADER
| inflate_flags::TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF,
);
if status != inflate::TINFLStatus::Done
|| in_size != compressed_size
|| out_size != self.block_buf.len()
{
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"Deflate decompression failed: {:?} (in: {}, out: {})",
status, in_size, out_size
),
));
}
} else {
// Copy uncompressed block
out.copy_from_slice(self.block_buf.as_slice());
}
Ok(Block::Raw)
}
fn block_size_internal(&self) -> u32 { self.header.block_size.get() }
fn meta(&self) -> DiscMeta {
DiscMeta {
format: Format::Gcz,
compression: Compression::Deflate,
block_size: Some(self.header.block_size.get()),
lossless: true,
disc_size: Some(self.header.disc_size.get()),
..Default::default()
}
}
}

65
nod/src/io/iso.rs Normal file
View File

@ -0,0 +1,65 @@
use std::{
io,
io::{Read, Seek, SeekFrom},
path::Path,
};
use crate::{
disc::SECTOR_SIZE,
io::{
block::{Block, BlockIO, PartitionInfo},
split::SplitFileReader,
Format,
},
DiscMeta, Result,
};
#[derive(Clone)]
pub struct DiscIOISO {
inner: SplitFileReader,
}
impl DiscIOISO {
pub fn new(filename: &Path) -> Result<Box<Self>> {
let inner = SplitFileReader::new(filename)?;
Ok(Box::new(Self { inner }))
}
}
impl BlockIO for DiscIOISO {
fn read_block_internal(
&mut self,
out: &mut [u8],
block: u32,
_partition: Option<&PartitionInfo>,
) -> io::Result<Block> {
let offset = block as u64 * SECTOR_SIZE as u64;
let total_size = self.inner.len();
if offset >= total_size {
// End of file
return Ok(Block::Zero);
}
self.inner.seek(SeekFrom::Start(offset))?;
if offset + SECTOR_SIZE as u64 > total_size {
// If the last block is not a full sector, fill the rest with zeroes
let read = (total_size - offset) as usize;
self.inner.read_exact(&mut out[..read])?;
out[read..].fill(0);
} else {
self.inner.read_exact(out)?;
}
Ok(Block::Raw)
}
fn block_size_internal(&self) -> u32 { SECTOR_SIZE as u32 }
fn meta(&self) -> DiscMeta {
DiscMeta {
format: Format::Iso,
lossless: true,
disc_size: Some(self.inner.len()),
..Default::default()
}
}
}

136
nod/src/io/mod.rs Normal file
View File

@ -0,0 +1,136 @@
//! Disc file format related logic (CISO, NFS, WBFS, WIA, etc.)
use std::fmt;
pub(crate) mod block;
pub(crate) mod ciso;
#[cfg(feature = "compress-zlib")]
pub(crate) mod gcz;
pub(crate) mod iso;
pub(crate) mod nfs;
pub(crate) mod nkit;
pub(crate) mod split;
pub(crate) mod wbfs;
pub(crate) mod wia;
/// SHA-1 hash bytes
pub(crate) type HashBytes = [u8; 20];
/// AES key bytes
pub(crate) type KeyBytes = [u8; 16];
/// Magic bytes
pub(crate) type MagicBytes = [u8; 4];
/// The disc file format.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
pub enum Format {
/// ISO / GCM (GameCube master disc)
#[default]
Iso,
/// CISO (Compact ISO)
Ciso,
/// GCZ
Gcz,
/// NFS (Wii U VC)
Nfs,
/// RVZ
Rvz,
/// WBFS
Wbfs,
/// WIA
Wia,
}
impl fmt::Display for Format {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Format::Iso => write!(f, "ISO"),
Format::Ciso => write!(f, "CISO"),
Format::Gcz => write!(f, "GCZ"),
Format::Nfs => write!(f, "NFS"),
Format::Rvz => write!(f, "RVZ"),
Format::Wbfs => write!(f, "WBFS"),
Format::Wia => write!(f, "WIA"),
}
}
}
/// The disc file format's compression algorithm.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
pub enum Compression {
/// No compression
#[default]
None,
/// BZIP2
Bzip2,
/// Deflate (GCZ only)
Deflate,
/// LZMA
Lzma,
/// LZMA2
Lzma2,
/// Purge (WIA only)
Purge,
/// Zstandard
Zstandard,
}
impl fmt::Display for Compression {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Compression::None => write!(f, "None"),
Compression::Bzip2 => write!(f, "BZIP2"),
Compression::Deflate => write!(f, "Deflate"),
Compression::Lzma => write!(f, "LZMA"),
Compression::Lzma2 => write!(f, "LZMA2"),
Compression::Purge => write!(f, "Purge"),
Compression::Zstandard => write!(f, "Zstandard"),
}
}
}
/// Extra metadata about the underlying disc file format.
#[derive(Debug, Clone, Default)]
pub struct DiscMeta {
/// The disc file format.
pub format: Format,
/// The format's compression algorithm.
pub compression: Compression,
/// If the format uses blocks, the block size in bytes.
pub block_size: Option<u32>,
/// Whether Wii partitions are stored decrypted in the format.
pub decrypted: bool,
/// Whether the format omits Wii partition data hashes.
pub needs_hash_recovery: bool,
/// Whether the format supports recovering the original disc data losslessly.
pub lossless: bool,
/// The original disc's size in bytes, if stored by the format.
pub disc_size: Option<u64>,
/// The original disc's CRC32 hash, if stored by the format.
pub crc32: Option<u32>,
/// The original disc's MD5 hash, if stored by the format.
pub md5: Option<[u8; 16]>,
/// The original disc's SHA-1 hash, if stored by the format.
pub sha1: Option<[u8; 20]>,
/// The original disc's XXH64 hash, if stored by the format.
pub xxhash64: Option<u64>,
}
/// Encrypts data in-place using AES-128-CBC with the given key and IV.
#[inline(always)]
pub(crate) fn aes_encrypt(key: &KeyBytes, iv: KeyBytes, data: &mut [u8]) {
use aes::cipher::{block_padding::NoPadding, BlockEncryptMut, KeyIvInit};
<cbc::Encryptor<aes::Aes128>>::new(key.into(), &aes::Block::from(iv))
.encrypt_padded_mut::<NoPadding>(data, data.len())
.unwrap(); // Safe: using NoPadding
}
/// Decrypts data in-place using AES-128-CBC with the given key and IV.
#[inline(always)]
pub(crate) fn aes_decrypt(key: &KeyBytes, iv: KeyBytes, data: &mut [u8]) {
use aes::cipher::{block_padding::NoPadding, BlockDecryptMut, KeyIvInit};
<cbc::Decryptor<aes::Aes128>>::new(key.into(), &aes::Block::from(iv))
.decrypt_padded_mut::<NoPadding>(data)
.unwrap(); // Safe: using NoPadding
}

242
nod/src/io/nfs.rs Normal file
View File

@ -0,0 +1,242 @@
use std::{
fs::File,
io,
io::{BufReader, Read, Seek, SeekFrom},
mem::size_of,
path::{Component, Path, PathBuf},
};
use zerocopy::{big_endian::U32, AsBytes, FromBytes, FromZeroes};
use crate::{
disc::SECTOR_SIZE,
io::{
aes_decrypt,
block::{Block, BlockIO, PartitionInfo},
split::SplitFileReader,
Format, KeyBytes, MagicBytes,
},
static_assert,
util::read::read_from,
DiscMeta, Error, Result, ResultContext,
};
pub const NFS_MAGIC: MagicBytes = *b"EGGS";
pub const NFS_END_MAGIC: MagicBytes = *b"SGGE";
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
#[repr(C, align(4))]
struct LBARange {
start_sector: U32,
num_sectors: U32,
}
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
#[repr(C, align(4))]
struct NFSHeader {
magic: MagicBytes,
version: U32,
unk1: U32,
unk2: U32,
num_lba_ranges: U32,
lba_ranges: [LBARange; 61],
end_magic: MagicBytes,
}
static_assert!(size_of::<NFSHeader>() == 0x200);
impl NFSHeader {
fn validate(&self) -> Result<()> {
if self.magic != NFS_MAGIC {
return Err(Error::DiscFormat("Invalid NFS magic".to_string()));
}
if self.num_lba_ranges.get() > 61 {
return Err(Error::DiscFormat("Invalid NFS LBA range count".to_string()));
}
if self.end_magic != NFS_END_MAGIC {
return Err(Error::DiscFormat("Invalid NFS end magic".to_string()));
}
Ok(())
}
fn lba_ranges(&self) -> &[LBARange] { &self.lba_ranges[..self.num_lba_ranges.get() as usize] }
fn calculate_num_files(&self) -> u32 {
let sector_count =
self.lba_ranges().iter().fold(0u32, |acc, range| acc + range.num_sectors.get());
(((sector_count as u64) * (SECTOR_SIZE as u64)
+ (size_of::<NFSHeader>() as u64 + 0xF9FFFFFu64))
/ 0xFA00000u64) as u32
}
fn phys_sector(&self, sector: u32) -> u32 {
let mut cur_sector = 0u32;
for range in self.lba_ranges().iter() {
if sector >= range.start_sector.get()
&& sector - range.start_sector.get() < range.num_sectors.get()
{
return cur_sector + (sector - range.start_sector.get());
}
cur_sector += range.num_sectors.get();
}
u32::MAX
}
}
#[derive(Clone)]
pub struct DiscIONFS {
inner: SplitFileReader,
header: NFSHeader,
raw_size: u64,
disc_size: u64,
key: KeyBytes,
}
impl DiscIONFS {
pub fn new(directory: &Path) -> Result<Box<Self>> {
let mut disc_io = Box::new(Self {
inner: SplitFileReader::empty(),
header: NFSHeader::new_zeroed(),
raw_size: 0,
disc_size: 0,
key: [0; 16],
});
disc_io.load_files(directory)?;
Ok(disc_io)
}
}
impl BlockIO for DiscIONFS {
fn read_block_internal(
&mut self,
out: &mut [u8],
sector: u32,
partition: Option<&PartitionInfo>,
) -> io::Result<Block> {
// Calculate physical sector
let phys_sector = self.header.phys_sector(sector);
if phys_sector == u32::MAX {
// Logical zero sector
return Ok(Block::Zero);
}
// Read sector
let offset = size_of::<NFSHeader>() as u64 + phys_sector as u64 * SECTOR_SIZE as u64;
self.inner.seek(SeekFrom::Start(offset))?;
self.inner.read_exact(out)?;
// Decrypt
let iv_bytes = sector.to_be_bytes();
#[rustfmt::skip]
let iv: KeyBytes = [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
iv_bytes[0], iv_bytes[1], iv_bytes[2], iv_bytes[3],
];
aes_decrypt(&self.key, iv, out);
if partition.is_some() {
Ok(Block::PartDecrypted { has_hashes: true })
} else {
Ok(Block::Raw)
}
}
fn block_size_internal(&self) -> u32 { SECTOR_SIZE as u32 }
fn meta(&self) -> DiscMeta {
DiscMeta { format: Format::Nfs, decrypted: true, ..Default::default() }
}
}
fn get_path<P>(directory: &Path, path: P) -> PathBuf
where P: AsRef<Path> {
let mut buf = directory.to_path_buf();
for component in path.as_ref().components() {
match component {
Component::ParentDir => {
buf.pop();
}
_ => buf.push(component),
}
}
buf
}
fn get_nfs(directory: &Path, num: u32) -> Result<PathBuf> {
let path = get_path(directory, format!("hif_{:06}.nfs", num));
if path.exists() {
Ok(path)
} else {
Err(Error::DiscFormat(format!("Failed to locate {}", path.display())))
}
}
impl DiscIONFS {
pub fn load_files(&mut self, directory: &Path) -> Result<()> {
{
// Load key file
let primary_key_path =
get_path(directory, ["..", "code", "htk.bin"].iter().collect::<PathBuf>());
let secondary_key_path = get_path(directory, "htk.bin");
let mut key_path = primary_key_path.canonicalize();
if key_path.is_err() {
key_path = secondary_key_path.canonicalize();
}
if key_path.is_err() {
return Err(Error::DiscFormat(format!(
"Failed to locate {} or {}",
primary_key_path.display(),
secondary_key_path.display()
)));
}
let resolved_path = key_path.unwrap();
File::open(resolved_path.as_path())
.map_err(|v| Error::Io(format!("Failed to open {}", resolved_path.display()), v))?
.read(&mut self.key)
.map_err(|v| Error::Io(format!("Failed to read {}", resolved_path.display()), v))?;
}
{
// Load header from first file
let path = get_nfs(directory, 0)?;
self.inner.add(&path)?;
let mut file = BufReader::new(
File::open(&path).with_context(|| format!("Opening file {}", path.display()))?,
);
let header: NFSHeader = read_from(&mut file)
.with_context(|| format!("Reading NFS header from file {}", path.display()))?;
header.validate()?;
// log::debug!("{:?}", header);
// Ensure remaining files exist
for i in 1..header.calculate_num_files() {
self.inner.add(&get_nfs(directory, i)?)?;
}
// Calculate sizes
let num_sectors =
header.lba_ranges().iter().map(|range| range.num_sectors.get()).sum::<u32>();
let max_sector = header
.lba_ranges()
.iter()
.map(|range| range.start_sector.get() + range.num_sectors.get())
.max()
.unwrap();
let raw_size = size_of::<NFSHeader>() + (num_sectors as usize * SECTOR_SIZE);
let data_size = max_sector as usize * SECTOR_SIZE;
if raw_size > self.inner.len() as usize {
return Err(Error::DiscFormat(format!(
"NFS raw size mismatch: expected at least {}, got {}",
raw_size,
self.inner.len()
)));
}
self.header = header;
self.raw_size = raw_size as u64;
self.disc_size = data_size as u64;
}
Ok(())
}
}

169
nod/src/io/nkit.rs Normal file
View File

@ -0,0 +1,169 @@
use std::{
io,
io::{Read, Seek, SeekFrom},
};
use crate::{
disc::DL_DVD_SIZE,
io::MagicBytes,
util::read::{read_from, read_u16_be, read_u32_be, read_u64_be, read_vec},
DiscMeta,
};
#[allow(unused)]
#[repr(u16)]
enum NKitHeaderFlags {
Size = 0x1,
Crc32 = 0x2,
Md5 = 0x4,
Sha1 = 0x8,
Xxhash64 = 0x10,
Key = 0x20,
Encrypted = 0x40,
ExtraData = 0x80,
IndexFile = 0x100,
}
const NKIT_HEADER_V1_FLAGS: u16 = NKitHeaderFlags::Crc32 as u16
| NKitHeaderFlags::Md5 as u16
| NKitHeaderFlags::Sha1 as u16
| NKitHeaderFlags::Xxhash64 as u16;
const fn calc_header_size(version: u8, flags: u16, key_len: u32) -> usize {
let mut size = 8;
if version >= 2 {
// header size + flags
size += 4;
}
if flags & NKitHeaderFlags::Size as u16 != 0 {
size += 8;
}
if flags & NKitHeaderFlags::Crc32 as u16 != 0 {
size += 4;
}
if flags & NKitHeaderFlags::Md5 as u16 != 0 {
size += 16;
}
if flags & NKitHeaderFlags::Sha1 as u16 != 0 {
size += 20;
}
if flags & NKitHeaderFlags::Xxhash64 as u16 != 0 {
size += 8;
}
if flags & NKitHeaderFlags::Key as u16 != 0 {
size += key_len as usize + 2;
}
size
}
#[allow(unused)]
#[derive(Debug, Clone)]
pub struct NKitHeader {
pub version: u8,
pub flags: u16,
pub size: Option<u64>,
pub crc32: Option<u32>,
pub md5: Option<[u8; 16]>,
pub sha1: Option<[u8; 20]>,
pub xxhash64: Option<u64>,
/// Bitstream of blocks that are junk data
pub junk_bits: Option<Vec<u8>>,
pub block_size: u32,
}
const VERSION_PREFIX: [u8; 7] = *b"NKIT v";
impl NKitHeader {
pub fn try_read_from<R>(reader: &mut R, block_size: u32, has_junk_bits: bool) -> Option<Self>
where R: Read + Seek + ?Sized {
let magic: MagicBytes = read_from(reader).ok()?;
if magic == *b"NKIT" {
reader.seek(SeekFrom::Current(-4)).ok()?;
match NKitHeader::read_from(reader, block_size, has_junk_bits) {
Ok(header) => Some(header),
Err(e) => {
log::warn!("Failed to read NKit header: {}", e);
None
}
}
} else {
None
}
}
pub fn read_from<R>(reader: &mut R, block_size: u32, has_junk_bits: bool) -> io::Result<Self>
where R: Read + ?Sized {
let version_string: [u8; 8] = read_from(reader)?;
if version_string[0..7] != VERSION_PREFIX
|| version_string[7] < b'1'
|| version_string[7] > b'9'
{
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Invalid NKit header version string",
));
}
let version = version_string[7] - b'0';
let header_size = match version {
1 => calc_header_size(version, NKIT_HEADER_V1_FLAGS, 0) as u16,
2 => read_u16_be(reader)?,
_ => {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("Unsupported NKit header version: {}", version),
));
}
};
let mut remaining_header_size = header_size as usize - 8;
if version >= 2 {
// We read the header size already
remaining_header_size -= 2;
}
let header_bytes = read_vec(reader, remaining_header_size)?;
let mut inner = &header_bytes[..];
let flags = if version == 1 { NKIT_HEADER_V1_FLAGS } else { read_u16_be(&mut inner)? };
let size = (flags & NKitHeaderFlags::Size as u16 != 0)
.then(|| read_u64_be(&mut inner))
.transpose()?;
let crc32 = (flags & NKitHeaderFlags::Crc32 as u16 != 0)
.then(|| read_u32_be(&mut inner))
.transpose()?;
let md5 = (flags & NKitHeaderFlags::Md5 as u16 != 0)
.then(|| read_from::<[u8; 16], _>(&mut inner))
.transpose()?;
let sha1 = (flags & NKitHeaderFlags::Sha1 as u16 != 0)
.then(|| read_from::<[u8; 20], _>(&mut inner))
.transpose()?;
let xxhash64 = (flags & NKitHeaderFlags::Xxhash64 as u16 != 0)
.then(|| read_u64_be(&mut inner))
.transpose()?;
let junk_bits = if has_junk_bits {
let n = DL_DVD_SIZE.div_ceil(block_size as u64).div_ceil(8);
Some(read_vec(reader, n as usize)?)
} else {
None
};
Ok(Self { version, flags, size, crc32, md5, sha1, xxhash64, junk_bits, block_size })
}
pub fn is_junk_block(&self, block: u32) -> Option<bool> {
self.junk_bits
.as_ref()
.and_then(|v| v.get((block / 8) as usize))
.map(|&b| b & (1 << (7 - (block & 7))) != 0)
}
pub fn apply(&self, meta: &mut DiscMeta) {
meta.needs_hash_recovery |= self.junk_bits.is_some();
meta.lossless |= self.size.is_some() && self.junk_bits.is_some();
meta.disc_size = meta.disc_size.or(self.size);
meta.crc32 = self.crc32;
meta.md5 = self.md5;
meta.sha1 = self.sha1;
meta.xxhash64 = self.xxhash64;
}
}

149
nod/src/io/split.rs Normal file
View File

@ -0,0 +1,149 @@
use std::{
cmp::min,
fs::File,
io,
io::{BufReader, Read, Seek, SeekFrom},
path::{Path, PathBuf},
};
use crate::{ErrorContext, Result, ResultContext};
#[derive(Debug)]
pub struct SplitFileReader {
files: Vec<Split<PathBuf>>,
open_file: Option<Split<BufReader<File>>>,
pos: u64,
}
#[derive(Debug, Clone)]
struct Split<T> {
inner: T,
begin: u64,
size: u64,
}
impl<T> Split<T> {
fn contains(&self, pos: u64) -> bool { self.begin <= pos && pos < self.begin + self.size }
}
// .iso.1, .iso.2, etc.
fn split_path_1(input: &Path, index: u32) -> PathBuf {
let input_str = input.to_str().unwrap_or("[INVALID]");
let mut out = input_str.to_string();
out.push('.');
out.push(char::from_digit(index, 10).unwrap());
PathBuf::from(out)
}
// .part1.iso, .part2.iso, etc.
fn split_path_2(input: &Path, index: u32) -> PathBuf {
let extension = input.extension().and_then(|s| s.to_str()).unwrap_or("iso");
let input_without_ext = input.with_extension("");
let input_str = input_without_ext.to_str().unwrap_or("[INVALID]");
let mut out = input_str.to_string();
out.push_str(".part");
out.push(char::from_digit(index, 10).unwrap());
out.push('.');
out.push_str(extension);
PathBuf::from(out)
}
// .wbf1, .wbf2, etc.
fn split_path_3(input: &Path, index: u32) -> PathBuf {
let input_str = input.to_str().unwrap_or("[INVALID]");
let mut chars = input_str.chars();
chars.next_back();
let mut out = chars.as_str().to_string();
out.push(char::from_digit(index, 10).unwrap());
PathBuf::from(out)
}
impl SplitFileReader {
pub fn empty() -> Self { Self { files: Vec::new(), open_file: None, pos: 0 } }
pub fn new(path: &Path) -> Result<Self> {
let mut files = vec![];
let mut begin = 0;
match path.metadata() {
Ok(metadata) => {
files.push(Split { inner: path.to_path_buf(), begin, size: metadata.len() });
begin += metadata.len();
}
Err(e) => {
return Err(e.context(format!("Failed to stat file {}", path.display())));
}
}
for path_fn in [split_path_1, split_path_2, split_path_3] {
let mut index = 1;
loop {
let path = path_fn(path, index);
if let Ok(metadata) = path.metadata() {
files.push(Split { inner: path, begin, size: metadata.len() });
begin += metadata.len();
index += 1;
} else {
break;
}
}
if index > 1 {
break;
}
}
Ok(Self { files, open_file: None, pos: 0 })
}
pub fn add(&mut self, path: &Path) -> Result<()> {
let begin = self.len();
let metadata =
path.metadata().context(format!("Failed to stat file {}", path.display()))?;
self.files.push(Split { inner: path.to_path_buf(), begin, size: metadata.len() });
Ok(())
}
pub fn len(&self) -> u64 { self.files.last().map_or(0, |f| f.begin + f.size) }
}
impl Read for SplitFileReader {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
if self.open_file.is_none() || !self.open_file.as_ref().unwrap().contains(self.pos) {
self.open_file = if let Some(split) = self.files.iter().find(|f| f.contains(self.pos)) {
let mut file = BufReader::new(File::open(&split.inner)?);
// log::info!("Opened file {} at pos {}", split.inner.display(), self.pos);
file.seek(SeekFrom::Start(self.pos - split.begin))?;
Some(Split { inner: file, begin: split.begin, size: split.size })
} else {
None
};
}
let Some(split) = self.open_file.as_mut() else {
return Ok(0);
};
let to_read = min(buf.len(), (split.begin + split.size - self.pos) as usize);
let read = split.inner.read(&mut buf[..to_read])?;
self.pos += read as u64;
Ok(read)
}
}
impl Seek for SplitFileReader {
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
self.pos = match pos {
SeekFrom::Start(pos) => pos,
SeekFrom::Current(offset) => self.pos.saturating_add_signed(offset),
SeekFrom::End(offset) => self.len().saturating_add_signed(offset),
};
if let Some(split) = &mut self.open_file {
if split.contains(self.pos) {
// Seek within the open file
split.inner.seek(SeekFrom::Start(self.pos - split.begin))?;
} else {
self.open_file = None;
}
}
Ok(self.pos)
}
}
impl Clone for SplitFileReader {
fn clone(&self) -> Self { Self { files: self.files.clone(), open_file: None, pos: 0 } }
}

153
nod/src/io/wbfs.rs Normal file
View File

@ -0,0 +1,153 @@
use std::{
io,
io::{Read, Seek, SeekFrom},
mem::size_of,
path::Path,
};
use zerocopy::{big_endian::*, AsBytes, FromBytes, FromZeroes};
use crate::{
io::{
block::{Block, BlockIO, PartitionInfo},
nkit::NKitHeader,
split::SplitFileReader,
DiscMeta, Format, MagicBytes,
},
util::read::{read_box_slice, read_from},
Error, Result, ResultContext,
};
pub const WBFS_MAGIC: MagicBytes = *b"WBFS";
#[derive(Debug, Clone, PartialEq, FromBytes, FromZeroes, AsBytes)]
#[repr(C, align(4))]
struct WBFSHeader {
magic: MagicBytes,
num_sectors: U32,
sector_size_shift: u8,
block_size_shift: u8,
_pad: [u8; 2],
}
impl WBFSHeader {
fn sector_size(&self) -> u32 { 1 << self.sector_size_shift }
fn block_size(&self) -> u32 { 1 << self.block_size_shift }
// fn align_lba(&self, x: u32) -> u32 { (x + self.sector_size() - 1) & !(self.sector_size() - 1) }
//
// fn num_wii_sectors(&self) -> u32 {
// (self.num_sectors.get() / SECTOR_SIZE as u32) * self.sector_size()
// }
//
// fn max_wii_sectors(&self) -> u32 { NUM_WII_SECTORS }
//
// fn num_wbfs_sectors(&self) -> u32 {
// self.num_wii_sectors() >> (self.wbfs_sector_size_shift - 15)
// }
fn max_blocks(&self) -> u32 { NUM_WII_SECTORS >> (self.block_size_shift - 15) }
}
const DISC_HEADER_SIZE: usize = 0x100;
const NUM_WII_SECTORS: u32 = 143432 * 2; // Double layer discs
#[derive(Clone)]
pub struct DiscIOWBFS {
inner: SplitFileReader,
/// WBFS header
header: WBFSHeader,
/// Map of Wii LBAs to WBFS LBAs
block_map: Box<[U16]>,
/// Optional NKit header
nkit_header: Option<NKitHeader>,
}
impl DiscIOWBFS {
pub fn new(filename: &Path) -> Result<Box<Self>> {
let mut inner = SplitFileReader::new(filename)?;
let header: WBFSHeader = read_from(&mut inner).context("Reading WBFS header")?;
if header.magic != WBFS_MAGIC {
return Err(Error::DiscFormat("Invalid WBFS magic".to_string()));
}
let file_len = inner.len();
let expected_file_len = header.num_sectors.get() as u64 * header.sector_size() as u64;
if file_len != expected_file_len {
return Err(Error::DiscFormat(format!(
"Invalid WBFS file size: {}, expected {}",
file_len, expected_file_len
)));
}
let disc_table: Box<[u8]> =
read_box_slice(&mut inner, header.sector_size() as usize - size_of::<WBFSHeader>())
.context("Reading WBFS disc table")?;
if disc_table[0] != 1 {
return Err(Error::DiscFormat("WBFS doesn't contain a disc".to_string()));
}
if disc_table[1../*max_disc as usize*/].iter().any(|&x| x != 0) {
return Err(Error::DiscFormat("Only single WBFS discs are supported".to_string()));
}
// Read WBFS LBA map
inner
.seek(SeekFrom::Start(header.sector_size() as u64 + DISC_HEADER_SIZE as u64))
.context("Seeking to WBFS LBA table")?; // Skip header
let block_map: Box<[U16]> = read_box_slice(&mut inner, header.max_blocks() as usize)
.context("Reading WBFS LBA table")?;
// Read NKit header if present (always at 0x10000)
inner.seek(SeekFrom::Start(0x10000)).context("Seeking to NKit header")?;
let nkit_header = NKitHeader::try_read_from(&mut inner, header.block_size(), true);
Ok(Box::new(Self { inner, header, block_map, nkit_header }))
}
}
impl BlockIO for DiscIOWBFS {
fn read_block_internal(
&mut self,
out: &mut [u8],
block: u32,
_partition: Option<&PartitionInfo>,
) -> io::Result<Block> {
let block_size = self.header.block_size();
if block >= self.header.max_blocks() {
return Ok(Block::Zero);
}
// Find the block in the map
let phys_block = self.block_map[block as usize].get();
if phys_block == 0 {
// Check if block is junk data
if self.nkit_header.as_ref().and_then(|h| h.is_junk_block(block)).unwrap_or(false) {
return Ok(Block::Junk);
}
// Otherwise, read zeroes
return Ok(Block::Zero);
}
// Read block
let block_start = block_size as u64 * phys_block as u64;
self.inner.seek(SeekFrom::Start(block_start))?;
self.inner.read_exact(out)?;
Ok(Block::Raw)
}
fn block_size_internal(&self) -> u32 { self.header.block_size() }
fn meta(&self) -> DiscMeta {
let mut result = DiscMeta {
format: Format::Wbfs,
block_size: Some(self.header.block_size()),
..Default::default()
};
if let Some(nkit_header) = &self.nkit_header {
nkit_header.apply(&mut result);
}
result
}
}

913
nod/src/io/wia.rs Normal file
View File

@ -0,0 +1,913 @@
use std::{
io,
io::{Read, Seek, SeekFrom},
mem::size_of,
path::Path,
};
use zerocopy::{big_endian::*, AsBytes, FromBytes, FromZeroes};
use crate::{
disc::{
hashes::hash_bytes,
wii::{HASHES_SIZE, SECTOR_DATA_SIZE},
SECTOR_SIZE,
},
io::{
block::{Block, BlockIO, PartitionInfo},
nkit::NKitHeader,
split::SplitFileReader,
Compression, Format, HashBytes, KeyBytes, MagicBytes,
},
static_assert,
util::{
compress::{lzma2_props_decode, lzma_props_decode, new_lzma2_decoder, new_lzma_decoder},
lfg::LaggedFibonacci,
read::{read_box_slice, read_from, read_u16_be, read_vec},
take_seek::TakeSeekExt,
},
DiscMeta, Error, Result, ResultContext,
};
pub const WIA_MAGIC: MagicBytes = *b"WIA\x01";
pub const RVZ_MAGIC: MagicBytes = *b"RVZ\x01";
/// This struct is stored at offset 0x0 and is 0x48 bytes long. The wit source code says its format
/// will never be changed.
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
#[repr(C, align(4))]
pub struct WIAFileHeader {
pub magic: MagicBytes,
/// The WIA format version.
///
/// A short note from the wit source code about how version numbers are encoded:
///
/// ```c
/// //-----------------------------------------------------
/// // Format of version number: AABBCCDD = A.BB | A.BB.CC
/// // If D != 0x00 && D != 0xff => append: 'beta' D
/// //-----------------------------------------------------
/// ```
pub version: U32,
/// If the reading program supports the version of WIA indicated here, it can read the file.
///
/// [version](Self::version) can be higher than `version_compatible`.
pub version_compatible: U32,
/// The size of the [WIADisc] struct.
pub disc_size: U32,
/// The SHA-1 hash of the [WIADisc] struct.
///
/// The number of bytes to hash is determined by [disc_size](Self::disc_size).
pub disc_hash: HashBytes,
/// The original size of the ISO.
pub iso_file_size: U64,
/// The size of this file.
pub wia_file_size: U64,
/// The SHA-1 hash of this struct, up to but not including `file_head_hash` itself.
pub file_head_hash: HashBytes,
}
static_assert!(size_of::<WIAFileHeader>() == 0x48);
impl WIAFileHeader {
pub fn validate(&self) -> Result<()> {
// Check magic
if self.magic != WIA_MAGIC && self.magic != RVZ_MAGIC {
return Err(Error::DiscFormat(format!("Invalid WIA/RVZ magic: {:#X?}", self.magic)));
}
// Check file head hash
let bytes = self.as_bytes();
verify_hash(&bytes[..bytes.len() - size_of::<HashBytes>()], &self.file_head_hash)?;
// Check version compatibility
if self.version_compatible.get() < 0x30000 {
return Err(Error::DiscFormat(format!(
"WIA/RVZ version {:#X} is not supported",
self.version_compatible
)));
}
Ok(())
}
pub fn is_rvz(&self) -> bool { self.magic == RVZ_MAGIC }
}
/// Disc kind
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum DiscKind {
/// GameCube disc
GameCube,
/// Wii disc
Wii,
}
impl TryFrom<u32> for DiscKind {
type Error = Error;
fn try_from(value: u32) -> Result<Self> {
match value {
1 => Ok(Self::GameCube),
2 => Ok(Self::Wii),
v => Err(Error::DiscFormat(format!("Invalid disc type {}", v))),
}
}
}
/// Compression type
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum WIACompression {
/// No compression.
None,
/// (WIA only) See [WIASegment]
Purge,
/// BZIP2 compression
Bzip2,
/// LZMA compression
Lzma,
/// LZMA2 compression
Lzma2,
/// (RVZ only) Zstandard compression
Zstandard,
}
impl TryFrom<u32> for WIACompression {
type Error = Error;
fn try_from(value: u32) -> Result<Self> {
match value {
0 => Ok(Self::None),
1 => Ok(Self::Purge),
2 => Ok(Self::Bzip2),
3 => Ok(Self::Lzma),
4 => Ok(Self::Lzma2),
5 => Ok(Self::Zstandard),
v => Err(Error::DiscFormat(format!("Invalid compression type {}", v))),
}
}
}
const DISC_HEAD_SIZE: usize = 0x80;
/// This struct is stored at offset 0x48, immediately after [WIAFileHeader].
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
#[repr(C, align(4))]
pub struct WIADisc {
/// The disc type. (1 = GameCube, 2 = Wii)
pub disc_type: U32,
/// The compression type.
pub compression: U32,
/// The compression level used by the compressor.
///
/// The possible values are compressor-specific.
///
/// RVZ only:
/// > This is signed (instead of unsigned) to support negative compression levels in
/// [Zstandard](WIACompression::Zstandard) (RVZ only).
pub compression_level: I32,
/// The size of the chunks that data is divided into.
///
/// WIA only:
/// > Must be a multiple of 2 MiB.
///
/// RVZ only:
/// > Chunk sizes smaller than 2 MiB are supported. The following applies when using a chunk size
/// smaller than 2 MiB:
/// > - The chunk size must be at least 32 KiB and must be a power of two. (Just like with WIA,
/// sizes larger than 2 MiB do not have to be a power of two, they just have to be an integer
/// multiple of 2 MiB.)
/// > - For Wii partition data, each chunk contains one [WIAExceptionList] which contains
/// exceptions for that chunk (and no other chunks). Offset 0 refers to the first hash of the
/// current chunk, not the first hash of the full 2 MiB of data.
pub chunk_size: U32,
/// The first 0x80 bytes of the disc image.
pub disc_head: [u8; DISC_HEAD_SIZE],
/// The number of [WIAPartition] structs.
pub num_partitions: U32,
/// The size of one [WIAPartition] struct.
///
/// If this is smaller than the size of [WIAPartition], fill the missing bytes with 0x00.
pub partition_type_size: U32,
/// The offset in the file where the [WIAPartition] structs are stored (uncompressed).
pub partition_offset: U64,
/// The SHA-1 hash of the [WIAPartition] structs.
///
/// The number of bytes to hash is determined by `num_partitions * partition_type_size`.
pub partition_hash: HashBytes,
/// The number of [WIARawData] structs.
pub num_raw_data: U32,
/// The offset in the file where the [WIARawData] structs are stored (compressed).
pub raw_data_offset: U64,
/// The total compressed size of the [WIARawData] structs.
pub raw_data_size: U32,
/// The number of [WIAGroup] structs.
pub num_groups: U32,
/// The offset in the file where the [WIAGroup] structs are stored (compressed).
pub group_offset: U64,
/// The total compressed size of the [WIAGroup] structs.
pub group_size: U32,
/// The number of used bytes in the [compr_data](Self::compr_data) array.
pub compr_data_len: u8,
/// Compressor specific data.
///
/// If the compression method is [None](WIACompression::None), [Purge](WIACompression::Purge),
/// [Bzip2](WIACompression::Bzip2), or [Zstandard](WIACompression::Zstandard) (RVZ only),
/// [compr_data_len](Self::compr_data_len) is 0. If the compression method is
/// [Lzma](WIACompression::Lzma) or [Lzma2](WIACompression::Lzma2), the compressor specific data is
/// stored in the format used by the 7-Zip SDK. It needs to be converted if you are using e.g.
/// liblzma.
///
/// For [Lzma](WIACompression::Lzma), the data is 5 bytes long. The first byte encodes the `lc`,
/// `pb`, and `lp` parameters, and the four other bytes encode the dictionary size in little
/// endian.
pub compr_data: [u8; 7],
}
static_assert!(size_of::<WIADisc>() == 0xDC);
impl WIADisc {
pub fn validate(&self) -> Result<()> {
DiscKind::try_from(self.disc_type.get())?;
WIACompression::try_from(self.compression.get())?;
if self.partition_type_size.get() != size_of::<WIAPartition>() as u32 {
return Err(Error::DiscFormat(format!(
"WIA/RVZ partition type size is {}, expected {}",
self.partition_type_size.get(),
size_of::<WIAPartition>()
)));
}
Ok(())
}
pub fn compression(&self) -> WIACompression {
WIACompression::try_from(self.compression.get()).unwrap()
}
}
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
#[repr(C, align(4))]
pub struct WIAPartitionData {
/// The sector on the disc at which this data starts.
/// One sector is 32 KiB (or 31 KiB excluding hashes).
pub first_sector: U32,
/// The number of sectors on the disc covered by this struct.
/// One sector is 32 KiB (or 31 KiB excluding hashes).
pub num_sectors: U32,
/// The index of the first [WIAGroup] struct that points to the data covered by this struct.
/// The other [WIAGroup] indices follow sequentially.
pub group_index: U32,
/// The number of [WIAGroup] structs used for this data.
pub num_groups: U32,
}
static_assert!(size_of::<WIAPartitionData>() == 0x10);
impl WIAPartitionData {
pub fn contains(&self, sector: u32) -> bool {
let start = self.first_sector.get();
sector >= start && sector < start + self.num_sectors.get()
}
}
/// This struct is used for keeping track of Wii partition data that on the actual disc is encrypted
/// and hashed. This does not include the unencrypted area at the beginning of partitions that
/// contains the ticket, TMD, certificate chain, and H3 table. So for a typical game partition,
/// `pd[0].first_sector * 0x8000` would be 0x0F820000, not 0x0F800000.
///
/// Wii partition data is stored decrypted and with hashes removed. For each 0x8000 bytes on the
/// disc, 0x7C00 bytes are stored in the WIA file (prior to compression). If the hashes are desired,
/// the reading program must first recalculate the hashes as done when creating a Wii disc image
/// from scratch (see <https://wiibrew.org/wiki/Wii_Disc>), and must then apply the hash exceptions
/// which are stored along with the data (see the [WIAExceptionList] section).
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
#[repr(C, align(4))]
pub struct WIAPartition {
/// The title key for this partition (128-bit AES), which can be used for re-encrypting the
/// partition data.
///
/// This key can be used directly, without decrypting it using the Wii common key.
pub partition_key: KeyBytes,
/// To quote the wit source code: `segment 0 is small and defined for management data (boot ..
/// fst). segment 1 takes the remaining data.`
///
/// The point at which wit splits the two segments is the FST end offset rounded up to the next
/// 2 MiB. Giving the first segment a size which is not a multiple of 2 MiB is likely a bad idea
/// (unless the second segment has a size of 0).
pub partition_data: [WIAPartitionData; 2],
}
static_assert!(size_of::<WIAPartition>() == 0x30);
/// This struct is used for keeping track of disc data that is not stored as [WIAPartition].
/// The data is stored as is (other than compression being applied).
///
/// The first [WIARawData] has `raw_data_offset` set to 0x80 and `raw_data_size` set to 0x4FF80,
/// but despite this, it actually contains 0x50000 bytes of data. (However, the first 0x80 bytes
/// should be read from [WIADisc] instead.) This should be handled by rounding the offset down to
/// the previous multiple of 0x8000 (and adding the equivalent amount to the size so that the end
/// offset stays the same), not by special casing the first [WIARawData].
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
#[repr(C, align(4))]
pub struct WIARawData {
/// The offset on the disc at which this data starts.
pub raw_data_offset: U64,
/// The number of bytes on the disc covered by this struct.
pub raw_data_size: U64,
/// The index of the first [WIAGroup] struct that points to the data covered by this struct.
/// The other [WIAGroup] indices follow sequentially.
pub group_index: U32,
/// The number of [WIAGroup] structs used for this data.
pub num_groups: U32,
}
impl WIARawData {
pub fn start_offset(&self) -> u64 { self.raw_data_offset.get() & !(SECTOR_SIZE as u64 - 1) }
pub fn start_sector(&self) -> u32 { (self.start_offset() / SECTOR_SIZE as u64) as u32 }
pub fn end_offset(&self) -> u64 { self.raw_data_offset.get() + self.raw_data_size.get() }
pub fn end_sector(&self) -> u32 { (self.end_offset() / SECTOR_SIZE as u64) as u32 }
pub fn contains(&self, sector: u32) -> bool {
sector >= self.start_sector() && sector < self.end_sector()
}
}
/// This struct points directly to the actual disc data, stored compressed.
///
/// The data is interpreted differently depending on whether the [WIAGroup] is referenced by a
/// [WIAPartitionData] or a [WIARawData] (see the [WIAPartition] section for details).
///
/// A [WIAGroup] normally contains chunk_size bytes of decompressed data
/// (or `chunk_size / 0x8000 * 0x7C00` for Wii partition data when not counting hashes), not
/// counting any [WIAExceptionList] structs. However, the last [WIAGroup] of a [WIAPartitionData]
/// or [WIARawData] contains less data than that if `num_sectors * 0x8000` (for [WIAPartitionData])
/// or `raw_data_size` (for [WIARawData]) is not evenly divisible by `chunk_size`.
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
#[repr(C, align(4))]
pub struct WIAGroup {
/// The offset in the file where the compressed data is stored.
///
/// Stored as a `u32`, divided by 4.
pub data_offset: U32,
/// The size of the compressed data, including any [WIAExceptionList] structs. 0 is a special
/// case meaning that every byte of the decompressed data is 0x00 and the [WIAExceptionList]
/// structs (if there are supposed to be any) contain 0 exceptions.
pub data_size: U32,
}
/// Compared to [WIAGroup], [RVZGroup] changes the meaning of the most significant bit of
/// [data_size](Self::data_size) and adds one additional attribute.
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
#[repr(C, align(4))]
pub struct RVZGroup {
/// The offset in the file where the compressed data is stored, divided by 4.
pub data_offset: U32,
/// The most significant bit is 1 if the data is compressed using the compression method
/// indicated in [WIADisc], and 0 if it is not compressed. The lower 31 bits are the size of
/// the compressed data, including any [WIAExceptionList] structs. The lower 31 bits being 0 is
/// a special case meaning that every byte of the decompressed and unpacked data is 0x00 and
/// the [WIAExceptionList] structs (if there are supposed to be any) contain 0 exceptions.
pub data_size_and_flag: U32,
/// The size after decompressing but before decoding the RVZ packing.
/// If this is 0, RVZ packing is not used for this group.
pub rvz_packed_size: U32,
}
impl RVZGroup {
pub fn data_size(&self) -> u32 { self.data_size_and_flag.get() & 0x7FFFFFFF }
pub fn is_compressed(&self) -> bool { self.data_size_and_flag.get() & 0x80000000 != 0 }
}
impl From<&WIAGroup> for RVZGroup {
fn from(value: &WIAGroup) -> Self {
Self {
data_offset: value.data_offset,
data_size_and_flag: U32::new(value.data_size.get() | 0x80000000),
rvz_packed_size: U32::new(0),
}
}
}
/// This struct represents a 20-byte difference between the recalculated hash data and the original
/// hash data. (See also [WIAExceptionList])
///
/// When recalculating hashes for a [WIAGroup] with a size which is not evenly divisible by 2 MiB
/// (with the size of the hashes included), the missing bytes should be treated as zeroes for the
/// purpose of hashing. (wit's writing code seems to act as if the reading code does not assume that
/// these missing bytes are zero, but both wit's and Dolphin's reading code treat them as zero.
/// Dolphin's writing code assumes that the reading code treats them as zero.)
///
/// wit's writing code only outputs [WIAException] structs for mismatches in the actual hash
/// data, not in the padding data (which normally only contains zeroes). Dolphin's writing code
/// outputs [WIAException] structs for both hash data and padding data. When Dolphin needs to
/// write [WIAException] structs for a padding area which is 32 bytes long, it writes one which
/// covers the first 20 bytes of the padding area and one which covers the last 20 bytes of the
/// padding area, generating 12 bytes of overlap between the [WIAException] structs.
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
#[repr(C, align(2))]
pub struct WIAException {
/// The offset among the hashes. The offsets 0x0000-0x0400 here map to the offsets 0x0000-0x0400
/// in the full 2 MiB of data, the offsets 0x0400-0x0800 here map to the offsets 0x8000-0x8400
/// in the full 2 MiB of data, and so on.
///
/// The offsets start over at 0 for each new [WIAExceptionList].
pub offset: U16,
/// The hash that the automatically generated hash at the given offset needs to be replaced
/// with.
///
/// The replacement should happen after calculating all hashes for the current 2 MiB of data
/// but before encrypting the hashes.
pub hash: HashBytes,
}
/// Each [WIAGroup] of Wii partition data contains one or more [WIAExceptionList] structs before
/// the actual data, one for each 2 MiB of data in the [WIAGroup]. The number of [WIAExceptionList]
/// structs per [WIAGroup] is always `chunk_size / 0x200000`, even for a [WIAGroup] which contains
/// less data than normal due to it being at the end of a partition.
///
/// For memory management reasons, programs which read WIA files might place a limit on how many
/// exceptions there can be in a [WIAExceptionList]. Dolphin's reading code has a limit of
/// `52 × 64 = 3328` (unless the compression method is [None](WIACompression::None) or
/// [Purge](WIACompression::Purge), in which case there is no limit), which is enough to cover all
/// hashes and all padding. wit's reading code seems to be written as if `47 × 64 = 3008` is the
/// maximum it needs to be able to handle, which is enough to cover all hashes but not any padding.
/// However, because wit allocates more memory than needed, it seems to be possible to exceed 3008
/// by some amount without problems. It should be safe for writing code to assume that reading code
/// can handle at least 3328 exceptions per [WIAExceptionList].
///
/// Somewhat ironically, there are exceptions to how [WIAExceptionList] structs are handled:
///
/// For the compression method [Purge](WIACompression::Purge), the [WIAExceptionList] structs are
/// stored uncompressed (in other words, before the first [WIASegment]). For
/// [Bzip2](WIACompression::Bzip2), [Lzma](WIACompression::Lzma) and [Lzma2](WIACompression::Lzma2), they are
/// compressed along with the rest of the data.
///
/// For the compression methods [None](WIACompression::None) and [Purge](WIACompression::Purge), if the
/// end offset of the last [WIAExceptionList] is not evenly divisible by 4, padding is inserted
/// after it so that the data afterwards will start at a 4 byte boundary. This padding is not
/// inserted for the other compression methods.
type WIAExceptionList = Box<[WIAException]>;
#[derive(Clone)]
pub enum Decompressor {
None,
#[cfg(feature = "compress-bzip2")]
Bzip2,
#[cfg(feature = "compress-lzma")]
Lzma(Box<[u8]>),
#[cfg(feature = "compress-lzma")]
Lzma2(Box<[u8]>),
#[cfg(feature = "compress-zstd")]
Zstandard,
}
impl Decompressor {
pub fn new(disc: &WIADisc) -> Result<Self> {
let data = &disc.compr_data[..disc.compr_data_len as usize];
match disc.compression() {
WIACompression::None => Ok(Self::None),
#[cfg(feature = "compress-bzip2")]
WIACompression::Bzip2 => Ok(Self::Bzip2),
#[cfg(feature = "compress-lzma")]
WIACompression::Lzma => Ok(Self::Lzma(Box::from(data))),
#[cfg(feature = "compress-lzma")]
WIACompression::Lzma2 => Ok(Self::Lzma2(Box::from(data))),
#[cfg(feature = "compress-zstd")]
WIACompression::Zstandard => Ok(Self::Zstandard),
comp => Err(Error::DiscFormat(format!("Unsupported WIA/RVZ compression: {:?}", comp))),
}
}
pub fn wrap<'a, R>(&mut self, reader: R) -> io::Result<Box<dyn Read + 'a>>
where R: Read + 'a {
Ok(match self {
Decompressor::None => Box::new(reader),
#[cfg(feature = "compress-bzip2")]
Decompressor::Bzip2 => Box::new(bzip2::read::BzDecoder::new(reader)),
#[cfg(feature = "compress-lzma")]
Decompressor::Lzma(data) => {
let options = lzma_props_decode(data)?;
Box::new(new_lzma_decoder(reader, &options)?)
}
#[cfg(feature = "compress-lzma")]
Decompressor::Lzma2(data) => {
let options = lzma2_props_decode(data)?;
Box::new(new_lzma2_decoder(reader, &options)?)
}
#[cfg(feature = "compress-zstd")]
Decompressor::Zstandard => Box::new(zstd::stream::Decoder::new(reader)?),
})
}
}
pub struct DiscIOWIA {
inner: SplitFileReader,
header: WIAFileHeader,
disc: WIADisc,
partitions: Box<[WIAPartition]>,
raw_data: Box<[WIARawData]>,
groups: Box<[RVZGroup]>,
nkit_header: Option<NKitHeader>,
decompressor: Decompressor,
group: u32,
group_data: Vec<u8>,
exception_lists: Vec<WIAExceptionList>,
}
impl Clone for DiscIOWIA {
fn clone(&self) -> Self {
Self {
inner: self.inner.clone(),
header: self.header.clone(),
disc: self.disc.clone(),
partitions: self.partitions.clone(),
raw_data: self.raw_data.clone(),
groups: self.groups.clone(),
nkit_header: self.nkit_header.clone(),
decompressor: self.decompressor.clone(),
group: u32::MAX,
group_data: Vec::new(),
exception_lists: Vec::new(),
}
}
}
fn verify_hash(buf: &[u8], expected: &HashBytes) -> Result<()> {
let out = hash_bytes(buf);
if out != *expected {
let mut got_bytes = [0u8; 40];
let got = base16ct::lower::encode_str(&out, &mut got_bytes).unwrap(); // Safe: fixed buffer size
let mut expected_bytes = [0u8; 40];
let expected = base16ct::lower::encode_str(expected, &mut expected_bytes).unwrap(); // Safe: fixed buffer size
return Err(Error::DiscFormat(format!(
"WIA/RVZ hash mismatch: {}, expected {}",
got, expected
)));
}
Ok(())
}
impl DiscIOWIA {
pub fn new(filename: &Path) -> Result<Box<Self>> {
let mut inner = SplitFileReader::new(filename)?;
// Load & verify file header
let header: WIAFileHeader = read_from(&mut inner).context("Reading WIA/RVZ file header")?;
header.validate()?;
let is_rvz = header.is_rvz();
// log::debug!("Header: {:?}", header);
// Load & verify disc header
let mut disc_buf: Vec<u8> = read_vec(&mut inner, header.disc_size.get() as usize)
.context("Reading WIA/RVZ disc header")?;
verify_hash(&disc_buf, &header.disc_hash)?;
disc_buf.resize(size_of::<WIADisc>(), 0);
let disc = WIADisc::read_from(disc_buf.as_slice()).unwrap();
disc.validate()?;
// if !options.rebuild_hashes {
// // If we're not rebuilding hashes, disable partition hashes in disc header
// disc.disc_head[0x60] = 1;
// }
// if !options.rebuild_encryption {
// // If we're not re-encrypting, disable partition encryption in disc header
// disc.disc_head[0x61] = 1;
// }
// log::debug!("Disc: {:?}", disc);
// Read NKit header if present (after disc header)
let nkit_header = NKitHeader::try_read_from(&mut inner, disc.chunk_size.get(), false);
// Load & verify partition headers
inner
.seek(SeekFrom::Start(disc.partition_offset.get()))
.context("Seeking to WIA/RVZ partition headers")?;
let partitions: Box<[WIAPartition]> =
read_box_slice(&mut inner, disc.num_partitions.get() as usize)
.context("Reading WIA/RVZ partition headers")?;
verify_hash(partitions.as_ref().as_bytes(), &disc.partition_hash)?;
// log::debug!("Partitions: {:?}", partitions);
// Create decompressor
let mut decompressor = Decompressor::new(&disc)?;
// Load raw data headers
let raw_data: Box<[WIARawData]> = {
inner
.seek(SeekFrom::Start(disc.raw_data_offset.get()))
.context("Seeking to WIA/RVZ raw data headers")?;
let mut reader = decompressor
.wrap((&mut inner).take(disc.raw_data_size.get() as u64))
.context("Creating WIA/RVZ decompressor")?;
read_box_slice(&mut reader, disc.num_raw_data.get() as usize)
.context("Reading WIA/RVZ raw data headers")?
};
// Validate raw data alignment
for (idx, rd) in raw_data.iter().enumerate() {
let start_offset = rd.start_offset();
let end_offset = rd.end_offset();
if (start_offset % SECTOR_SIZE as u64) != 0 || (end_offset % SECTOR_SIZE as u64) != 0 {
return Err(Error::DiscFormat(format!(
"WIA/RVZ raw data {} not aligned to sector: {:#X}..{:#X}",
idx, start_offset, end_offset
)));
}
}
// log::debug!("Raw data: {:?}", raw_data);
// Load group headers
let groups = {
inner
.seek(SeekFrom::Start(disc.group_offset.get()))
.context("Seeking to WIA/RVZ group headers")?;
let mut reader = decompressor
.wrap((&mut inner).take(disc.group_size.get() as u64))
.context("Creating WIA/RVZ decompressor")?;
if is_rvz {
read_box_slice(&mut reader, disc.num_groups.get() as usize)
.context("Reading WIA/RVZ group headers")?
} else {
let wia_groups: Box<[WIAGroup]> =
read_box_slice(&mut reader, disc.num_groups.get() as usize)
.context("Reading WIA/RVZ group headers")?;
wia_groups.iter().map(RVZGroup::from).collect()
}
// log::debug!("Groups: {:?}", groups);
};
Ok(Box::new(Self {
header,
disc,
partitions,
raw_data,
groups,
inner,
nkit_header,
decompressor,
group: u32::MAX,
group_data: vec![],
exception_lists: vec![],
}))
}
}
fn read_exception_lists<R>(
reader: &mut R,
in_partition: bool,
chunk_size: u32,
) -> io::Result<Vec<WIAExceptionList>>
where
R: Read + ?Sized,
{
if !in_partition {
return Ok(vec![]);
}
// One exception list for each 2 MiB of data
let num_exception_list = (chunk_size as usize).div_ceil(0x200000);
// log::debug!("Num exception list: {:?}", num_exception_list);
let mut exception_lists = Vec::with_capacity(num_exception_list);
for i in 0..num_exception_list {
let num_exceptions = read_u16_be(reader)?;
let exceptions: Box<[WIAException]> = read_box_slice(reader, num_exceptions as usize)?;
if !exceptions.is_empty() {
log::debug!("Exception list {}: {:?}", i, exceptions);
}
exception_lists.push(exceptions);
}
Ok(exception_lists)
}
impl BlockIO for DiscIOWIA {
fn read_block_internal(
&mut self,
out: &mut [u8],
sector: u32,
partition: Option<&PartitionInfo>,
) -> io::Result<Block> {
let chunk_size = self.disc.chunk_size.get();
let sectors_per_chunk = chunk_size / SECTOR_SIZE as u32;
let (group_index, group_sector, partition_offset) = if let Some(partition) = partition {
// Find the partition
let Some(wia_part) = self.partitions.get(partition.index) else {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
format!("Couldn't find WIA/RVZ partition index {}", partition.index),
));
};
// Sanity check partition sector ranges
let wia_part_start = wia_part.partition_data[0].first_sector.get();
let wia_part_end = wia_part.partition_data[1].first_sector.get()
+ wia_part.partition_data[1].num_sectors.get();
if partition.data_start_sector != wia_part_start
|| partition.data_end_sector != wia_part_end
{
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
format!(
"WIA/RVZ partition sector mismatch: {}..{} != {}..{}",
wia_part_start,
wia_part_end,
partition.data_start_sector,
partition.data_end_sector
),
));
}
// Find the partition data for the sector
let Some(pd) = wia_part.partition_data.iter().find(|pd| pd.contains(sector)) else {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
format!("Couldn't find WIA/RVZ partition data for sector {}", sector),
));
};
// Find the group index for the sector
let part_data_sector = sector - pd.first_sector.get();
let part_group_index = part_data_sector / sectors_per_chunk;
let part_group_sector = part_data_sector % sectors_per_chunk;
if part_group_index >= pd.num_groups.get() {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
format!(
"WIA/RVZ partition group index out of range: {} >= {}",
part_group_index,
pd.num_groups.get()
),
));
}
// Calculate the group offset within the partition
let part_group_offset =
(((part_group_index * sectors_per_chunk) + pd.first_sector.get())
- wia_part.partition_data[0].first_sector.get()) as u64
* SECTOR_DATA_SIZE as u64;
(pd.group_index.get() + part_group_index, part_group_sector, part_group_offset)
} else {
let Some(rd) = self.raw_data.iter().find(|d| d.contains(sector)) else {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
format!("Couldn't find WIA/RVZ raw data for sector {}", sector),
));
};
// Find the group index for the sector
let data_sector = sector - (rd.raw_data_offset.get() / SECTOR_SIZE as u64) as u32;
let group_index = data_sector / sectors_per_chunk;
let group_sector = data_sector % sectors_per_chunk;
if group_index >= rd.num_groups.get() {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
format!(
"WIA/RVZ raw data group index out of range: {} >= {}",
group_index,
rd.num_groups.get()
),
));
}
(rd.group_index.get() + group_index, group_sector, 0)
};
// Fetch the group
let Some(group) = self.groups.get(group_index as usize) else {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
format!("Couldn't find WIA/RVZ group index {}", group_index),
));
};
// Special case for all-zero data
if group.data_size() == 0 {
self.exception_lists.clear();
return Ok(Block::Zero);
}
// Read group data if necessary
if group_index != self.group {
let group_data_size = if partition.is_some() {
// Within a partition, hashes are excluded from the data size
(sectors_per_chunk * SECTOR_DATA_SIZE as u32) as usize
} else {
chunk_size as usize
};
self.group_data = Vec::with_capacity(group_data_size);
let group_data_start = group.data_offset.get() as u64 * 4;
self.inner.seek(SeekFrom::Start(group_data_start))?;
let mut reader = (&mut self.inner).take_seek(group.data_size() as u64);
let uncompressed_exception_lists =
matches!(self.disc.compression(), WIACompression::None | WIACompression::Purge)
|| !group.is_compressed();
if uncompressed_exception_lists {
self.exception_lists = read_exception_lists(
&mut reader,
partition.is_some(),
self.disc.chunk_size.get(),
)?;
// Align to 4
let rem = reader.stream_position()? % 4;
if rem != 0 {
reader.seek(SeekFrom::Current((4 - rem) as i64))?;
}
}
let mut reader: Box<dyn Read> = if group.is_compressed() {
self.decompressor.wrap(reader)?
} else {
Box::new(reader)
};
if !uncompressed_exception_lists {
self.exception_lists = read_exception_lists(
reader.as_mut(),
partition.is_some(),
self.disc.chunk_size.get(),
)?;
}
if group.rvz_packed_size.get() > 0 {
// Decode RVZ packed data
let mut lfg = LaggedFibonacci::default();
loop {
let mut size_bytes = [0u8; 4];
match reader.read_exact(&mut size_bytes) {
Ok(_) => {}
Err(e) if e.kind() == io::ErrorKind::UnexpectedEof => break,
Err(e) => {
return Err(io::Error::new(e.kind(), "Failed to read RVZ packed size"));
}
}
let size = u32::from_be_bytes(size_bytes);
let cur_data_len = self.group_data.len();
if size & 0x80000000 != 0 {
// Junk data
let size = size & 0x7FFFFFFF;
lfg.init_with_reader(reader.as_mut())?;
lfg.skip(
((partition_offset + cur_data_len as u64) % SECTOR_SIZE as u64)
as usize,
);
self.group_data.resize(cur_data_len + size as usize, 0);
lfg.fill(&mut self.group_data[cur_data_len..]);
} else {
// Real data
self.group_data.resize(cur_data_len + size as usize, 0);
reader.read_exact(&mut self.group_data[cur_data_len..])?;
}
}
} else {
// Read and decompress data
reader.read_to_end(&mut self.group_data)?;
}
self.group = group_index;
}
// Read sector from cached group data
if partition.is_some() {
let sector_data_start = group_sector as usize * SECTOR_DATA_SIZE;
out[..HASHES_SIZE].fill(0);
out[HASHES_SIZE..SECTOR_SIZE].copy_from_slice(
&self.group_data[sector_data_start..sector_data_start + SECTOR_DATA_SIZE],
);
Ok(Block::PartDecrypted { has_hashes: false })
} else {
let sector_data_start = group_sector as usize * SECTOR_SIZE;
out.copy_from_slice(
&self.group_data[sector_data_start..sector_data_start + SECTOR_SIZE],
);
Ok(Block::Raw)
}
}
fn block_size_internal(&self) -> u32 {
// WIA/RVZ chunks aren't always the full size, so we'll consider the
// block size to be one sector, and handle the complexity ourselves.
SECTOR_SIZE as u32
}
fn meta(&self) -> DiscMeta {
let mut result = DiscMeta {
format: if self.header.is_rvz() { Format::Rvz } else { Format::Wia },
block_size: Some(self.disc.chunk_size.get()),
compression: match self.disc.compression() {
WIACompression::None => Compression::None,
WIACompression::Purge => Compression::Purge,
WIACompression::Bzip2 => Compression::Bzip2,
WIACompression::Lzma => Compression::Lzma,
WIACompression::Lzma2 => Compression::Lzma2,
WIACompression::Zstandard => Compression::Zstandard,
},
decrypted: true,
needs_hash_recovery: true,
lossless: true,
disc_size: Some(self.header.iso_file_size.get()),
..Default::default()
};
if let Some(nkit_header) = &self.nkit_header {
nkit_header.apply(&mut result);
}
result
}
}

205
nod/src/lib.rs Normal file
View File

@ -0,0 +1,205 @@
#![warn(missing_docs)]
//! Library for traversing & reading Nintendo Optical Disc (GameCube and Wii) images.
//!
//! Originally based on the C++ library [nod](https://github.com/AxioDL/nod),
//! but does not currently support authoring.
//!
//! Currently supported file formats:
//! - ISO (GCM)
//! - WIA / RVZ
//! - WBFS (+ NKit 2 lossless)
//! - CISO (+ NKit 2 lossless)
//! - NFS (Wii U VC)
//! - GCZ
//!
//! # Examples
//!
//! Opening a disc image and reading a file:
//!
//! ```no_run
//! use std::io::Read;
//!
//! // Open a disc image and the first data partition.
//! let disc = nod::Disc::new("path/to/file.iso")
//! .expect("Failed to open disc");
//! let mut partition = disc.open_partition_kind(nod::PartitionKind::Data)
//! .expect("Failed to open data partition");
//!
//! // Read partition metadata and the file system table.
//! let meta = partition.meta()
//! .expect("Failed to read partition metadata");
//! let fst = meta.fst()
//! .expect("File system table is invalid");
//!
//! // Find a file by path and read it into a string.
//! if let Some((_, node)) = fst.find("/MP3/Worlds.txt") {
//! let mut s = String::new();
//! partition
//! .open_file(node)
//! .expect("Failed to open file stream")
//! .read_to_string(&mut s)
//! .expect("Failed to read file");
//! println!("{}", s);
//! }
//! ```
//!
//! Converting a disc image to raw ISO:
//!
//! ```no_run
//! // Enable `rebuild_encryption` to ensure the output is a valid ISO.
//! let options = nod::OpenOptions { rebuild_encryption: true, ..Default::default() };
//! let mut disc = nod::Disc::new_with_options("path/to/file.rvz", &options)
//! .expect("Failed to open disc");
//!
//! // Read directly from the open disc and write to the output file.
//! let mut out = std::fs::File::create("output.iso")
//! .expect("Failed to create output file");
//! std::io::copy(&mut disc, &mut out)
//! .expect("Failed to write data");
//! ```
use std::{
io::{Read, Seek},
path::Path,
};
pub use disc::{
ApploaderHeader, DiscHeader, DolHeader, PartitionBase, PartitionHeader, PartitionKind,
PartitionMeta, BI2_SIZE, BOOT_SIZE, SECTOR_SIZE,
};
pub use fst::{Fst, Node, NodeKind};
pub use io::{block::PartitionInfo, Compression, DiscMeta, Format};
pub use streams::ReadStream;
mod disc;
mod fst;
mod io;
mod streams;
mod util;
/// Error types for nod.
#[derive(thiserror::Error, Debug)]
pub enum Error {
/// An error for disc format related issues.
#[error("disc format error: {0}")]
DiscFormat(String),
/// A general I/O error.
#[error("I/O error: {0}")]
Io(String, #[source] std::io::Error),
/// An unknown error.
#[error("error: {0}")]
Other(String),
}
impl From<&str> for Error {
fn from(s: &str) -> Error { Error::Other(s.to_string()) }
}
impl From<String> for Error {
fn from(s: String) -> Error { Error::Other(s) }
}
/// Helper result type for [`Error`].
pub type Result<T, E = Error> = core::result::Result<T, E>;
/// Helper trait for adding context to errors.
pub trait ErrorContext {
/// Adds context to an error.
fn context(self, context: impl Into<String>) -> Error;
}
impl ErrorContext for std::io::Error {
fn context(self, context: impl Into<String>) -> Error { Error::Io(context.into(), self) }
}
/// Helper trait for adding context to result errors.
pub trait ResultContext<T> {
/// Adds context to a result error.
fn context(self, context: impl Into<String>) -> Result<T>;
/// Adds context to a result error using a closure.
fn with_context<F>(self, f: F) -> Result<T>
where F: FnOnce() -> String;
}
impl<T, E> ResultContext<T> for Result<T, E>
where E: ErrorContext
{
fn context(self, context: impl Into<String>) -> Result<T> {
self.map_err(|e| e.context(context))
}
fn with_context<F>(self, f: F) -> Result<T>
where F: FnOnce() -> String {
self.map_err(|e| e.context(f()))
}
}
/// Options for opening a disc image.
#[derive(Default, Debug, Clone)]
pub struct OpenOptions {
/// Wii: Rebuild partition data encryption and hashes if the underlying format stores data
/// decrypted or with hashes removed. (e.g. WIA/RVZ, NFS)
pub rebuild_encryption: bool,
/// Wii: Validate partition data hashes while reading the disc image.
pub validate_hashes: bool,
}
/// An open disc image and read stream.
///
/// This is the primary entry point for reading disc images.
pub struct Disc {
reader: disc::reader::DiscReader,
options: OpenOptions,
}
impl Disc {
/// Opens a disc image from a file path.
pub fn new<P: AsRef<Path>>(path: P) -> Result<Disc> {
Disc::new_with_options(path, &OpenOptions::default())
}
/// Opens a disc image from a file path with custom options.
pub fn new_with_options<P: AsRef<Path>>(path: P, options: &OpenOptions) -> Result<Disc> {
let io = io::block::open(path.as_ref())?;
let reader = disc::reader::DiscReader::new(io, options)?;
Ok(Disc { reader, options: options.clone() })
}
/// The disc's primary header.
pub fn header(&self) -> &DiscHeader { self.reader.header() }
/// Returns extra metadata included in the disc file format, if any.
pub fn meta(&self) -> DiscMeta { self.reader.meta() }
/// The disc's size in bytes, or an estimate if not stored by the format.
pub fn disc_size(&self) -> u64 { self.reader.disc_size() }
/// A list of Wii partitions on the disc.
///
/// **GameCube**: This will return an empty slice.
pub fn partitions(&self) -> &[PartitionInfo] { self.reader.partitions() }
/// Opens a decrypted partition read stream for the specified partition index.
///
/// **GameCube**: `index` must always be 0.
pub fn open_partition(&self, index: usize) -> Result<Box<dyn PartitionBase>> {
self.reader.open_partition(index, &self.options)
}
/// Opens a decrypted partition read stream for the first partition matching
/// the specified kind.
///
/// **GameCube**: `kind` must always be [`PartitionKind::Data`].
pub fn open_partition_kind(&self, kind: PartitionKind) -> Result<Box<dyn PartitionBase>> {
self.reader.open_partition_kind(kind, &self.options)
}
}
impl Read for Disc {
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> { self.reader.read(buf) }
}
impl Seek for Disc {
fn seek(&mut self, pos: std::io::SeekFrom) -> std::io::Result<u64> { self.reader.seek(pos) }
}

80
nod/src/streams.rs Normal file
View File

@ -0,0 +1,80 @@
//! Common stream types
use std::{
io,
io::{Read, Seek, SeekFrom},
};
/// A helper trait for seekable read streams.
pub trait ReadStream: Read + Seek {
/// Creates a windowed read sub-stream with offset and size.
///
/// Seeks underlying stream immediately.
fn new_window(&mut self, offset: u64, size: u64) -> io::Result<SharedWindowedReadStream> {
self.seek(SeekFrom::Start(offset))?;
Ok(SharedWindowedReadStream { base: self.as_dyn(), begin: offset, end: offset + size })
}
/// Retrieves a type-erased reference to the stream.
fn as_dyn(&mut self) -> &mut dyn ReadStream;
}
impl<T> ReadStream for T
where T: Read + Seek
{
fn as_dyn(&mut self) -> &mut dyn ReadStream { self }
}
/// A non-owning window into an existing [`ReadStream`].
pub struct SharedWindowedReadStream<'a> {
/// A reference to the base stream.
pub base: &'a mut dyn ReadStream,
/// The beginning of the window in bytes.
pub begin: u64,
/// The end of the window in bytes.
pub end: u64,
}
impl<'a> SharedWindowedReadStream<'a> {
/// Modifies the current window & seeks to the beginning of the window.
pub fn set_window(&mut self, begin: u64, end: u64) -> io::Result<()> {
self.base.seek(SeekFrom::Start(begin))?;
self.begin = begin;
self.end = end;
Ok(())
}
}
impl<'a> Read for SharedWindowedReadStream<'a> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let pos = self.stream_position()?;
let size = self.end - self.begin;
if pos == size {
return Ok(0);
}
self.base.read(if pos + buf.len() as u64 > size {
&mut buf[..(size - pos) as usize]
} else {
buf
})
}
}
impl<'a> Seek for SharedWindowedReadStream<'a> {
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
let result = self.base.seek(match pos {
SeekFrom::Start(p) => SeekFrom::Start(self.begin + p),
SeekFrom::End(p) => SeekFrom::End(self.end as i64 + p),
SeekFrom::Current(_) => pos,
})?;
if result < self.begin || result > self.end {
Err(io::Error::from(io::ErrorKind::UnexpectedEof))
} else {
Ok(result - self.begin)
}
}
fn stream_position(&mut self) -> io::Result<u64> {
Ok(self.base.stream_position()? - self.begin)
}
}

92
nod/src/util/compress.rs Normal file
View File

@ -0,0 +1,92 @@
use std::{io, io::Read};
/// Decodes the LZMA Properties byte (lc/lp/pb).
/// See `lzma_lzma_lclppb_decode` in `liblzma/lzma/lzma_decoder.c`.
#[cfg(feature = "compress-lzma")]
pub fn lzma_lclppb_decode(options: &mut liblzma::stream::LzmaOptions, byte: u8) -> io::Result<()> {
let mut d = byte as u32;
if d >= (9 * 5 * 5) {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("Invalid LZMA props byte: {}", d),
));
}
options.literal_context_bits(d % 9);
d /= 9;
options.position_bits(d / 5);
options.literal_position_bits(d % 5);
Ok(())
}
/// Decodes LZMA properties.
/// See `lzma_lzma_props_decode` in `liblzma/lzma/lzma_decoder.c`.
#[cfg(feature = "compress-lzma")]
pub fn lzma_props_decode(props: &[u8]) -> io::Result<liblzma::stream::LzmaOptions> {
use crate::array_ref;
if props.len() != 5 {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("Invalid LZMA props length: {}", props.len()),
));
}
let mut options = liblzma::stream::LzmaOptions::new();
lzma_lclppb_decode(&mut options, props[0])?;
options.dict_size(u32::from_le_bytes(*array_ref!(props, 1, 4)));
Ok(options)
}
/// Decodes LZMA2 properties.
/// See `lzma_lzma2_props_decode` in `liblzma/lzma/lzma2_decoder.c`.
#[cfg(feature = "compress-lzma")]
pub fn lzma2_props_decode(props: &[u8]) -> io::Result<liblzma::stream::LzmaOptions> {
use std::cmp::Ordering;
if props.len() != 1 {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("Invalid LZMA2 props length: {}", props.len()),
));
}
let d = props[0] as u32;
let mut options = liblzma::stream::LzmaOptions::new();
options.dict_size(match d.cmp(&40) {
Ordering::Greater => {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("Invalid LZMA2 props byte: {}", d),
));
}
Ordering::Equal => u32::MAX,
Ordering::Less => (2 | (d & 1)) << (d / 2 + 11),
});
Ok(options)
}
/// Creates a new raw LZMA decoder with the given options.
#[cfg(feature = "compress-lzma")]
pub fn new_lzma_decoder<R>(
reader: R,
options: &liblzma::stream::LzmaOptions,
) -> io::Result<liblzma::read::XzDecoder<R>>
where
R: Read,
{
let mut filters = liblzma::stream::Filters::new();
filters.lzma1(options);
let stream = liblzma::stream::Stream::new_raw_decoder(&filters).map_err(io::Error::from)?;
Ok(liblzma::read::XzDecoder::new_stream(reader, stream))
}
/// Creates a new raw LZMA2 decoder with the given options.
#[cfg(feature = "compress-lzma")]
pub fn new_lzma2_decoder<R>(
reader: R,
options: &liblzma::stream::LzmaOptions,
) -> io::Result<liblzma::read::XzDecoder<R>>
where
R: Read,
{
let mut filters = liblzma::stream::Filters::new();
filters.lzma2(options);
let stream = liblzma::stream::Stream::new_raw_decoder(&filters).map_err(io::Error::from)?;
Ok(liblzma::read::XzDecoder::new_stream(reader, stream))
}

135
nod/src/util/lfg.rs Normal file
View File

@ -0,0 +1,135 @@
use std::{cmp::min, io, io::Read};
use zerocopy::{transmute_ref, AsBytes};
use crate::disc::SECTOR_SIZE;
pub const LFG_K: usize = 521;
pub const LFG_J: usize = 32;
pub const SEED_SIZE: usize = 17;
/// Lagged Fibonacci generator for Wii partition junk data.
///
/// References (license CC0-1.0):
/// https://github.com/dolphin-emu/dolphin/blob/a0f555648c27ec0c928f6b1e1fcad5e2d7c4d0c4/docs/WiaAndRvz.md
/// https://github.com/dolphin-emu/dolphin/blob/a0f555648c27ec0c928f6b1e1fcad5e2d7c4d0c4/Source/Core/DiscIO/LaggedFibonacciGenerator.cpp
pub struct LaggedFibonacci {
buffer: [u32; LFG_K],
position: usize,
}
impl Default for LaggedFibonacci {
fn default() -> Self { Self { buffer: [0u32; LFG_K], position: 0 } }
}
impl LaggedFibonacci {
fn init(&mut self) {
for i in SEED_SIZE..LFG_K {
self.buffer[i] =
(self.buffer[i - 17] << 23) ^ (self.buffer[i - 16] >> 9) ^ self.buffer[i - 1];
}
// Instead of doing the "shift by 18 instead of 16" oddity when actually outputting the data,
// we can do the shifting (and byteswapping) at this point to make the output code simpler.
for x in self.buffer.iter_mut() {
*x = ((*x & 0xFF00FFFF) | (*x >> 2 & 0x00FF0000)).swap_bytes();
}
for _ in 0..4 {
self.forward();
}
}
pub fn init_with_seed(&mut self, init: [u8; 4], disc_num: u8, partition_offset: u64) {
let seed = u32::from_be_bytes([
init[2],
init[1],
init[3].wrapping_add(init[2]),
init[0].wrapping_add(init[1]),
]) ^ disc_num as u32;
let sector = (partition_offset / SECTOR_SIZE as u64) as u32;
let sector_offset = partition_offset % SECTOR_SIZE as u64;
let mut n = seed.wrapping_mul(0x260BCD5) ^ sector.wrapping_mul(0x1EF29123);
for i in 0..SEED_SIZE {
let mut v = 0u32;
for _ in 0..LFG_J {
n = n.wrapping_mul(0x5D588B65).wrapping_add(1);
v = (v >> 1) | (n & 0x80000000);
}
self.buffer[i] = v;
}
self.buffer[16] ^= self.buffer[0] >> 9 ^ self.buffer[16] << 23;
self.position = 0;
self.init();
self.skip(sector_offset as usize);
}
pub fn init_with_reader<R>(&mut self, reader: &mut R) -> io::Result<()>
where R: Read + ?Sized {
reader.read_exact(self.buffer[..SEED_SIZE].as_bytes_mut())?;
for x in self.buffer[..SEED_SIZE].iter_mut() {
*x = u32::from_be(*x);
}
self.position = 0;
self.init();
Ok(())
}
pub fn forward(&mut self) {
for i in 0..LFG_J {
self.buffer[i] ^= self.buffer[i + LFG_K - LFG_J];
}
for i in LFG_J..LFG_K {
self.buffer[i] ^= self.buffer[i - LFG_J];
}
}
pub fn skip(&mut self, n: usize) {
self.position += n;
while self.position >= LFG_K * 4 {
self.forward();
self.position -= LFG_K * 4;
}
}
pub fn fill(&mut self, mut buf: &mut [u8]) {
while !buf.is_empty() {
let len = min(buf.len(), LFG_K * 4 - self.position);
let bytes: &[u8; LFG_K * 4] = transmute_ref!(&self.buffer);
buf[..len].copy_from_slice(&bytes[self.position..self.position + len]);
self.position += len;
buf = &mut buf[len..];
if self.position == LFG_K * 4 {
self.forward();
self.position = 0;
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_init_with_seed_1() {
let mut lfg = LaggedFibonacci::default();
lfg.init_with_seed([0x47, 0x41, 0x4c, 0x45], 0, 0x600000);
let mut buf = [0u8; 16];
lfg.fill(&mut buf);
assert_eq!(buf, [
0xE9, 0x47, 0x67, 0xBD, 0x41, 0x50, 0x4D, 0x5D, 0x61, 0x48, 0xB1, 0x99, 0xA0, 0x12,
0x0C, 0xBA
]);
}
#[test]
fn test_init_with_seed_2() {
let mut lfg = LaggedFibonacci::default();
lfg.init_with_seed([0x47, 0x41, 0x4c, 0x45], 0, 0x608000);
let mut buf = [0u8; 16];
lfg.fill(&mut buf);
assert_eq!(buf, [
0xE2, 0xBB, 0xBD, 0x77, 0xDA, 0xB2, 0x22, 0x42, 0x1C, 0x0C, 0x0B, 0xFC, 0xAC, 0x06,
0xEA, 0xD0
]);
}
}

46
nod/src/util/mod.rs Normal file
View File

@ -0,0 +1,46 @@
use std::ops::{Div, Rem};
pub(crate) mod compress;
pub(crate) mod lfg;
pub(crate) mod read;
pub(crate) mod take_seek;
#[inline(always)]
pub(crate) fn div_rem<T>(x: T, y: T) -> (T, T)
where T: Div<Output = T> + Rem<Output = T> + Copy {
let quot = x / y;
let rem = x % y;
(quot, rem)
}
/// Creates a fixed-size array reference from a slice.
#[macro_export]
macro_rules! array_ref {
($slice:expr, $offset:expr, $size:expr) => {{
#[inline(always)]
fn to_array<T>(slice: &[T]) -> &[T; $size] {
unsafe { &*(slice.as_ptr() as *const [_; $size]) }
}
to_array(&$slice[$offset..$offset + $size])
}};
}
/// Creates a mutable fixed-size array reference from a slice.
#[macro_export]
macro_rules! array_ref_mut {
($slice:expr, $offset:expr, $size:expr) => {{
#[inline(always)]
fn to_array<T>(slice: &mut [T]) -> &mut [T; $size] {
unsafe { &mut *(slice.as_ptr() as *mut [_; $size]) }
}
to_array(&mut $slice[$offset..$offset + $size])
}};
}
/// Compile-time assertion.
#[macro_export]
macro_rules! static_assert {
($condition:expr) => {
const _: () = core::assert!($condition);
};
}

71
nod/src/util/read.rs Normal file
View File

@ -0,0 +1,71 @@
use std::{io, io::Read};
use zerocopy::{AsBytes, FromBytes, FromZeroes};
#[inline(always)]
pub fn read_from<T, R>(reader: &mut R) -> io::Result<T>
where
T: FromBytes + FromZeroes + AsBytes,
R: Read + ?Sized,
{
let mut ret = <T>::new_zeroed();
reader.read_exact(ret.as_bytes_mut())?;
Ok(ret)
}
#[inline(always)]
pub fn read_vec<T, R>(reader: &mut R, count: usize) -> io::Result<Vec<T>>
where
T: FromBytes + FromZeroes + AsBytes,
R: Read + ?Sized,
{
let mut ret = <T>::new_vec_zeroed(count);
reader.read_exact(ret.as_mut_slice().as_bytes_mut())?;
Ok(ret)
}
#[inline(always)]
pub fn read_box<T, R>(reader: &mut R) -> io::Result<Box<T>>
where
T: FromBytes + FromZeroes + AsBytes,
R: Read + ?Sized,
{
let mut ret = <T>::new_box_zeroed();
reader.read_exact(ret.as_mut().as_bytes_mut())?;
Ok(ret)
}
#[inline(always)]
pub fn read_box_slice<T, R>(reader: &mut R, count: usize) -> io::Result<Box<[T]>>
where
T: FromBytes + FromZeroes + AsBytes,
R: Read + ?Sized,
{
let mut ret = <T>::new_box_slice_zeroed(count);
reader.read_exact(ret.as_mut().as_bytes_mut())?;
Ok(ret)
}
#[inline(always)]
pub fn read_u16_be<R>(reader: &mut R) -> io::Result<u16>
where R: Read + ?Sized {
let mut buf = [0u8; 2];
reader.read_exact(&mut buf)?;
Ok(u16::from_be_bytes(buf))
}
#[inline(always)]
pub fn read_u32_be<R>(reader: &mut R) -> io::Result<u32>
where R: Read + ?Sized {
let mut buf = [0u8; 4];
reader.read_exact(&mut buf)?;
Ok(u32::from_be_bytes(buf))
}
#[inline(always)]
pub fn read_u64_be<R>(reader: &mut R) -> io::Result<u64>
where R: Read + ?Sized {
let mut buf = [0u8; 8];
reader.read_exact(&mut buf)?;
Ok(u64::from_be_bytes(buf))
}

127
nod/src/util/take_seek.rs Normal file
View File

@ -0,0 +1,127 @@
// From https://github.com/jam1garner/binrw/blob/e96a1320287ec83d1f471525ffa380800ec9e124/binrw/src/io/take_seek.rs
// MIT License
//
// Copyright (c) jam1garner and other contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
#![allow(dead_code)]
//! Types for seekable reader adapters which limit the number of bytes read from
//! the underlying reader.
use std::io::{Read, Result, Seek, SeekFrom};
/// Read adapter which limits the bytes read from an underlying reader, with
/// seek support.
///
/// This struct is generally created by importing the [`TakeSeekExt`] extension
/// and calling [`take_seek`] on a reader.
///
/// [`take_seek`]: TakeSeekExt::take_seek
#[derive(Debug)]
pub struct TakeSeek<T> {
inner: T,
pos: u64,
end: u64,
}
impl<T> TakeSeek<T> {
/// Gets a reference to the underlying reader.
pub fn get_ref(&self) -> &T { &self.inner }
/// Gets a mutable reference to the underlying reader.
///
/// Care should be taken to avoid modifying the internal I/O state of the
/// underlying reader as doing so may corrupt the internal limit of this
/// `TakeSeek`.
pub fn get_mut(&mut self) -> &mut T { &mut self.inner }
/// Consumes this wrapper, returning the wrapped value.
pub fn into_inner(self) -> T { self.inner }
/// Returns the number of bytes that can be read before this instance will
/// return EOF.
///
/// # Note
///
/// This instance may reach EOF after reading fewer bytes than indicated by
/// this method if the underlying [`Read`] instance reaches EOF.
pub fn limit(&self) -> u64 { self.end.saturating_sub(self.pos) }
}
impl<T: Seek> TakeSeek<T> {
/// Sets the number of bytes that can be read before this instance will
/// return EOF. This is the same as constructing a new `TakeSeek` instance,
/// so the amount of bytes read and the previous limit value dont matter
/// when calling this method.
///
/// # Panics
///
/// Panics if the inner stream returns an error from `stream_position`.
pub fn set_limit(&mut self, limit: u64) {
let pos = self.inner.stream_position().expect("cannot get position for `set_limit`");
self.pos = pos;
self.end = pos + limit;
}
}
impl<T: Read> Read for TakeSeek<T> {
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
let limit = self.limit();
// Don't call into inner reader at all at EOF because it may still block
if limit == 0 {
return Ok(0);
}
// Lint: It is impossible for this cast to truncate because the value
// being cast is the minimum of two values, and one of the value types
// is already `usize`.
#[allow(clippy::cast_possible_truncation)]
let max = (buf.len() as u64).min(limit) as usize;
let n = self.inner.read(&mut buf[0..max])?;
self.pos += n as u64;
Ok(n)
}
}
impl<T: Seek> Seek for TakeSeek<T> {
fn seek(&mut self, pos: SeekFrom) -> Result<u64> {
self.pos = self.inner.seek(pos)?;
Ok(self.pos)
}
fn stream_position(&mut self) -> Result<u64> { Ok(self.pos) }
}
/// An extension trait that implements `take_seek()` for compatible streams.
pub trait TakeSeekExt {
/// Creates an adapter which will read at most `limit` bytes from the
/// wrapped stream.
fn take_seek(self, limit: u64) -> TakeSeek<Self>
where Self: Sized;
}
impl<T: Read + Seek> TakeSeekExt for T {
fn take_seek(mut self, limit: u64) -> TakeSeek<Self>
where Self: Sized {
let pos = self.stream_position().expect("cannot get position for `take_seek`");
TakeSeek { inner: self, pos, end: pos + limit }
}
}

46
nodtool/Cargo.toml Normal file
View File

@ -0,0 +1,46 @@
[package]
name = "nodtool"
version = "1.2.0"
edition = "2021"
rust-version = "1.73.0"
authors = ["Luke Street <luke@street.dev>"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/encounter/nod-rs"
documentation = "https://docs.rs/nod"
readme = "../README.md"
description = """
CLI tool for verifying and converting GameCube and Wii disc images.
"""
keywords = ["gamecube", "wii", "iso", "wbfs", "rvz"]
categories = ["command-line-utilities", "parser-implementations"]
build = "build.rs"
[features]
asm = ["md-5/asm", "nod/asm", "sha1/asm"]
nightly = ["crc32fast/nightly"]
[dependencies]
argp = "0.3.0"
base16ct = "0.2.0"
crc32fast = "1.4.0"
digest = "0.10.7"
enable-ansi-support = "0.2.1"
indicatif = "0.17.8"
itertools = "0.12.1"
log = "0.4.20"
md-5 = "0.10.6"
nod = { path = "../nod" }
sha1 = "0.10.6"
size = "0.4.1"
supports-color = "3.0.0"
tracing = "0.1.40"
tracing-attributes = "0.1.27"
tracing-subscriber = { version = "0.3.18", features = ["env-filter"] }
xxhash-rust = { version = "0.8.10", features = ["xxh64"] }
zerocopy = { version = "0.7.32", features = ["alloc", "derive"] }
[build-dependencies]
hex = { version = "0.4.3", features = ["serde"] }
quick-xml = { version = "0.31.0", features = ["serialize"] }
serde = { version = "1.0.197", features = ["derive"] }
zerocopy = { version = "0.7.32", features = ["alloc", "derive"] }

9958
nodtool/assets/redump-gc.dat Normal file

File diff suppressed because it is too large Load Diff

18863
nodtool/assets/redump-wii.dat Normal file

File diff suppressed because it is too large Load Diff

110
nodtool/build.rs Normal file
View File

@ -0,0 +1,110 @@
use std::{
env,
fs::File,
io::{BufReader, BufWriter, Write},
mem::size_of,
path::Path,
};
use hex::deserialize as deserialize_hex;
use serde::Deserialize;
use zerocopy::AsBytes;
// Keep in sync with build.rs
#[derive(Clone, Debug, AsBytes)]
#[repr(C, align(4))]
struct Header {
entry_count: u32,
entry_size: u32,
}
// Keep in sync with redump.rs
#[derive(Clone, Debug, AsBytes)]
#[repr(C, align(4))]
struct GameEntry {
crc32: u32,
string_table_offset: u32,
sectors: u32,
md5: [u8; 16],
sha1: [u8; 20],
}
fn main() {
let output = std::process::Command::new("git")
.args(["rev-parse", "HEAD"])
.output()
.expect("Failed to execute git");
let rev = String::from_utf8(output.stdout).expect("Failed to parse git output");
println!("cargo:rustc-env=GIT_COMMIT_SHA={rev}");
println!("cargo:rustc-rerun-if-changed=.git/HEAD");
let out_dir = env::var("OUT_DIR").unwrap();
let dest_path = Path::new(&out_dir).join("parsed-dats.bin");
let mut f = BufWriter::new(File::create(dest_path).unwrap());
// Parse dat files
let mut entries = Vec::<(GameEntry, String)>::new();
for path in ["assets/redump-gc.dat", "assets/redump-wii.dat"] {
println!("cargo:rustc-rerun-if-changed={}", path);
let file = BufReader::new(File::open(path).expect("Failed to open dat file"));
let dat: DatFile = quick_xml::de::from_reader(file).expect("Failed to parse dat file");
entries.extend(dat.games.into_iter().map(|game| {
(
GameEntry {
string_table_offset: 0,
crc32: u32::from_be_bytes(game.rom.crc32),
md5: game.rom.md5,
sha1: game.rom.sha1,
sectors: game.rom.size.div_ceil(0x8000) as u32,
},
game.name,
)
}));
}
// Sort by CRC32
entries.sort_by_key(|(entry, _)| entry.crc32);
// Write game entries
let header =
Header { entry_count: entries.len() as u32, entry_size: size_of::<GameEntry>() as u32 };
f.write_all(header.as_bytes()).unwrap();
let mut string_table_offset = 0u32;
for (entry, name) in &mut entries {
entry.string_table_offset = string_table_offset;
f.write_all(entry.as_bytes()).unwrap();
string_table_offset += name.as_bytes().len() as u32 + 4;
}
// Write string table
for (_, name) in &entries {
f.write_all(&(name.len() as u32).to_le_bytes()).unwrap();
f.write_all(name.as_bytes()).unwrap();
}
f.flush().unwrap();
}
#[derive(Clone, Debug, Deserialize)]
struct DatFile {
#[serde(rename = "game")]
games: Vec<DatGame>,
}
#[derive(Clone, Debug, Deserialize)]
struct DatGame {
#[serde(rename = "@name")]
name: String,
rom: DatGameRom,
}
#[derive(Clone, Debug, Deserialize)]
struct DatGameRom {
#[serde(rename = "@size")]
size: u64,
#[serde(rename = "@crc", deserialize_with = "deserialize_hex")]
crc32: [u8; 4],
#[serde(rename = "@md5", deserialize_with = "deserialize_hex")]
md5: [u8; 16],
#[serde(rename = "@sha1", deserialize_with = "deserialize_hex")]
sha1: [u8; 20],
}

View File

@ -0,0 +1,64 @@
// Originally from https://gist.github.com/suluke/e0c672492126be0a4f3b4f0e1115d77c
//! Extend `argp` to be better integrated with the `cargo` ecosystem
//!
//! For now, this only adds a --version/-V option which causes early-exit.
use std::ffi::OsStr;
use argp::{parser::ParseGlobalOptions, EarlyExit, FromArgs, TopLevelCommand};
struct ArgsOrVersion<T>(T)
where T: FromArgs;
impl<T> TopLevelCommand for ArgsOrVersion<T> where T: FromArgs {}
impl<T> FromArgs for ArgsOrVersion<T>
where T: FromArgs
{
fn _from_args(
command_name: &[&str],
args: &[&OsStr],
parent: Option<&mut dyn ParseGlobalOptions>,
) -> Result<Self, EarlyExit> {
/// Also use argp for catching `--version`-only invocations
#[derive(FromArgs)]
struct Version {
/// Print version information and exit.
#[argp(switch, short = 'V')]
pub version: bool,
}
match Version::from_args(command_name, args) {
Ok(v) => {
if v.version {
println!(
"{} {} {}",
command_name.first().unwrap_or(&""),
env!("CARGO_PKG_VERSION"),
env!("GIT_COMMIT_SHA"),
);
std::process::exit(0);
} else {
// Pass through empty arguments
T::_from_args(command_name, args, parent).map(Self)
}
}
Err(exit) => match exit {
EarlyExit::Help(_help) => {
// TODO: Chain help info from Version
// For now, we just put the switch on T as well
T::from_args(command_name, &["--help"]).map(Self)
}
EarlyExit::Err(_) => T::_from_args(command_name, args, parent).map(Self),
},
}
}
}
/// Create a `FromArgs` type from the current processs `env::args`.
///
/// This function will exit early from the current process if argument parsing was unsuccessful or if information like `--help` was requested.
/// Error messages will be printed to stderr, and `--help` output to stdout.
pub fn from_env<T>() -> T
where T: TopLevelCommand {
argp::parse_args_or_exit::<ArgsOrVersion<T>>(argp::DEFAULT).0
}

View File

@ -0,0 +1,24 @@
use std::path::PathBuf;
use argp::FromArgs;
use crate::util::shared::convert_and_verify;
#[derive(FromArgs, Debug)]
/// Converts a disc image to ISO.
#[argp(subcommand, name = "convert")]
pub struct ConvertArgs {
#[argp(positional)]
/// path to disc image
file: PathBuf,
#[argp(positional)]
/// output ISO file
out: PathBuf,
#[argp(switch)]
/// enable MD5 hashing (slower)
md5: bool,
}
pub fn convert(args: ConvertArgs) -> nod::Result<()> {
convert_and_verify(&args.file, Some(&args.out), args.md5)
}

214
nodtool/src/cmd/extract.rs Normal file
View File

@ -0,0 +1,214 @@
use std::{
borrow::Cow,
fs,
fs::File,
io,
io::{BufWriter, Write},
path::{Path, PathBuf},
};
use argp::FromArgs;
use itertools::Itertools;
use nod::{
Disc, DiscHeader, Fst, Node, OpenOptions, PartitionBase, PartitionKind, PartitionMeta,
ResultContext,
};
use size::{Base, Size};
use zerocopy::AsBytes;
use crate::util::{display, has_extension};
#[derive(FromArgs, Debug)]
/// Extract a disc image.
#[argp(subcommand, name = "extract")]
pub struct ExtractArgs {
#[argp(positional)]
/// Path to disc image
file: PathBuf,
#[argp(positional)]
/// Output directory (optional)
out: Option<PathBuf>,
#[argp(switch, short = 'q')]
/// Quiet output
quiet: bool,
#[argp(switch, short = 'h')]
/// Validate data hashes (Wii only)
validate: bool,
#[argp(option, short = 'p')]
/// Partition to extract (default: data)
/// Options: all, data, update, channel, or a partition index
partition: Option<String>,
}
pub fn extract(args: ExtractArgs) -> nod::Result<()> {
let output_dir: PathBuf;
if let Some(dir) = args.out {
output_dir = dir;
} else if has_extension(&args.file, "nfs") {
// Special logic to extract from content/hif_*.nfs to extracted/..
if let Some(parent) = args.file.parent() {
output_dir = parent.with_file_name("extracted");
} else {
output_dir = args.file.with_extension("");
}
} else {
output_dir = args.file.with_extension("");
}
let disc = Disc::new_with_options(&args.file, &OpenOptions {
rebuild_encryption: false,
validate_hashes: args.validate,
})?;
let header = disc.header();
let is_wii = header.is_wii();
if let Some(partition) = args.partition {
if partition.eq_ignore_ascii_case("all") {
for info in disc.partitions() {
let mut out_dir = output_dir.clone();
out_dir.push(info.kind.dir_name().as_ref());
let mut partition = disc.open_partition(info.index)?;
extract_partition(header, partition.as_mut(), &out_dir, is_wii, args.quiet)?;
}
} else if partition.eq_ignore_ascii_case("data") {
let mut partition = disc.open_partition_kind(PartitionKind::Data)?;
extract_partition(header, partition.as_mut(), &output_dir, is_wii, args.quiet)?;
} else if partition.eq_ignore_ascii_case("update") {
let mut partition = disc.open_partition_kind(PartitionKind::Update)?;
extract_partition(header, partition.as_mut(), &output_dir, is_wii, args.quiet)?;
} else if partition.eq_ignore_ascii_case("channel") {
let mut partition = disc.open_partition_kind(PartitionKind::Channel)?;
extract_partition(header, partition.as_mut(), &output_dir, is_wii, args.quiet)?;
} else {
let idx = partition.parse::<usize>().map_err(|_| "Invalid partition index")?;
let mut partition = disc.open_partition(idx)?;
extract_partition(header, partition.as_mut(), &output_dir, is_wii, args.quiet)?;
}
} else {
let mut partition = disc.open_partition_kind(PartitionKind::Data)?;
extract_partition(header, partition.as_mut(), &output_dir, is_wii, args.quiet)?;
}
Ok(())
}
fn extract_partition(
header: &DiscHeader,
partition: &mut dyn PartitionBase,
out_dir: &Path,
is_wii: bool,
quiet: bool,
) -> nod::Result<()> {
let meta = partition.meta()?;
extract_sys_files(header, meta.as_ref(), out_dir, quiet)?;
// Extract FST
let files_dir = out_dir.join("files");
fs::create_dir_all(&files_dir)
.with_context(|| format!("Creating directory {}", display(&files_dir)))?;
let fst = Fst::new(&meta.raw_fst)?;
let mut path_segments = Vec::<(Cow<str>, usize)>::new();
for (idx, node, name) in fst.iter() {
// Remove ended path segments
let mut new_size = 0;
for (_, end) in path_segments.iter() {
if *end == idx {
break;
}
new_size += 1;
}
path_segments.truncate(new_size);
// Add the new path segment
let end = if node.is_dir() { node.length() as usize } else { idx + 1 };
path_segments.push((name?, end));
let path = path_segments.iter().map(|(name, _)| name.as_ref()).join("/");
if node.is_dir() {
fs::create_dir_all(files_dir.join(&path))
.with_context(|| format!("Creating directory {}", path))?;
} else {
extract_node(node, partition, &files_dir, &path, is_wii, quiet)?;
}
}
Ok(())
}
fn extract_sys_files(
header: &DiscHeader,
data: &PartitionMeta,
out_dir: &Path,
quiet: bool,
) -> nod::Result<()> {
let sys_dir = out_dir.join("sys");
fs::create_dir_all(&sys_dir)
.with_context(|| format!("Creating directory {}", display(&sys_dir)))?;
extract_file(data.raw_boot.as_ref(), &sys_dir.join("boot.bin"), quiet)?;
extract_file(data.raw_bi2.as_ref(), &sys_dir.join("bi2.bin"), quiet)?;
extract_file(data.raw_apploader.as_ref(), &sys_dir.join("apploader.img"), quiet)?;
extract_file(data.raw_fst.as_ref(), &sys_dir.join("fst.bin"), quiet)?;
extract_file(data.raw_dol.as_ref(), &sys_dir.join("main.dol"), quiet)?;
// Wii files
if header.is_wii() {
let disc_dir = out_dir.join("disc");
fs::create_dir_all(&disc_dir)
.with_context(|| format!("Creating directory {}", display(&disc_dir)))?;
extract_file(&header.as_bytes()[..0x100], &disc_dir.join("header.bin"), quiet)?;
}
if let Some(ticket) = data.raw_ticket.as_deref() {
extract_file(ticket, &out_dir.join("ticket.bin"), quiet)?;
}
if let Some(tmd) = data.raw_tmd.as_deref() {
extract_file(tmd, &out_dir.join("tmd.bin"), quiet)?;
}
if let Some(cert_chain) = data.raw_cert_chain.as_deref() {
extract_file(cert_chain, &out_dir.join("cert.bin"), quiet)?;
}
if let Some(h3_table) = data.raw_h3_table.as_deref() {
extract_file(h3_table, &out_dir.join("h3.bin"), quiet)?;
}
Ok(())
}
fn extract_file(bytes: &[u8], out_path: &Path, quiet: bool) -> nod::Result<()> {
if !quiet {
println!(
"Extracting {} (size: {})",
display(out_path),
Size::from_bytes(bytes.len()).format().with_base(Base::Base10)
);
}
fs::write(out_path, bytes).with_context(|| format!("Writing file {}", display(out_path)))?;
Ok(())
}
fn extract_node(
node: &Node,
partition: &mut dyn PartitionBase,
base_path: &Path,
name: &str,
is_wii: bool,
quiet: bool,
) -> nod::Result<()> {
let file_path = base_path.join(name);
if !quiet {
println!(
"Extracting {} (size: {})",
display(&file_path),
Size::from_bytes(node.length()).format().with_base(Base::Base10)
);
}
let file = File::create(&file_path)
.with_context(|| format!("Creating file {}", display(&file_path)))?;
let mut w = BufWriter::with_capacity(partition.ideal_buffer_size(), file);
let mut r = partition.open_file(node).with_context(|| {
format!(
"Opening file {} on disc for reading (offset {}, size {})",
name,
node.offset(is_wii),
node.length()
)
})?;
io::copy(&mut r, &mut w).with_context(|| format!("Extracting file {}", display(&file_path)))?;
w.flush().with_context(|| format!("Flushing file {}", display(&file_path)))?;
Ok(())
}

103
nodtool/src/cmd/info.rs Normal file
View File

@ -0,0 +1,103 @@
use std::path::{Path, PathBuf};
use argp::FromArgs;
use nod::{Disc, OpenOptions, SECTOR_SIZE};
use size::Size;
use crate::util::{display, shared::print_header};
#[derive(FromArgs, Debug)]
/// Displays information about disc images.
#[argp(subcommand, name = "info")]
pub struct InfoArgs {
#[argp(positional)]
/// Path to disc image(s)
file: Vec<PathBuf>,
}
pub fn info(args: InfoArgs) -> nod::Result<()> {
for file in &args.file {
info_file(file)?;
}
Ok(())
}
fn info_file(path: &Path) -> nod::Result<()> {
log::info!("Loading {}", display(path));
let disc = Disc::new_with_options(path, &OpenOptions {
rebuild_encryption: false,
validate_hashes: false,
})?;
let header = disc.header();
let meta = disc.meta();
print_header(header, &meta);
if header.is_wii() {
for (idx, info) in disc.partitions().iter().enumerate() {
println!();
println!("Partition {}", idx);
println!("\tType: {}", info.kind);
let offset = info.start_sector as u64 * SECTOR_SIZE as u64;
println!("\tStart sector: {} (offset {:#X})", info.start_sector, offset);
let data_size =
(info.data_end_sector - info.data_start_sector) as u64 * SECTOR_SIZE as u64;
println!(
"\tData offset / size: {:#X} / {:#X} ({})",
info.data_start_sector as u64 * SECTOR_SIZE as u64,
data_size,
Size::from_bytes(data_size)
);
println!(
"\tTMD offset / size: {:#X} / {:#X}",
offset + info.header.tmd_off(),
info.header.tmd_size()
);
println!(
"\tCert offset / size: {:#X} / {:#X}",
offset + info.header.cert_chain_off(),
info.header.cert_chain_size()
);
println!(
"\tH3 offset / size: {:#X} / {:#X}",
offset + info.header.h3_table_off(),
info.header.h3_table_size()
);
let mut partition = disc.open_partition(idx)?;
let meta = partition.meta()?;
let tmd = meta.tmd_header();
let title_id_str = if let Some(tmd) = tmd {
format!(
"{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}",
tmd.title_id[0],
tmd.title_id[1],
tmd.title_id[2],
tmd.title_id[3],
tmd.title_id[4],
tmd.title_id[5],
tmd.title_id[6],
tmd.title_id[7]
)
} else {
"N/A".to_string()
};
println!("\tTitle: {}", info.disc_header.game_title_str());
println!("\tGame ID: {} ({})", info.disc_header.game_id_str(), title_id_str);
println!(
"\tDisc {}, Revision {}",
info.disc_header.disc_num + 1,
info.disc_header.disc_version
);
}
} else if header.is_gamecube() {
// TODO
} else {
println!(
"Invalid GC/Wii magic: {:#010X}/{:#010X}",
header.gcn_magic.get(),
header.wii_magic.get()
);
}
println!();
Ok(())
}

4
nodtool/src/cmd/mod.rs Normal file
View File

@ -0,0 +1,4 @@
pub mod convert;
pub mod extract;
pub mod info;
pub mod verify;

25
nodtool/src/cmd/verify.rs Normal file
View File

@ -0,0 +1,25 @@
use std::path::PathBuf;
use argp::FromArgs;
use crate::util::shared::convert_and_verify;
#[derive(FromArgs, Debug)]
/// Verifies disc images.
#[argp(subcommand, name = "verify")]
pub struct VerifyArgs {
#[argp(positional)]
/// path to disc image(s)
file: Vec<PathBuf>,
#[argp(switch)]
/// enable MD5 hashing (slower)
md5: bool,
}
pub fn verify(args: VerifyArgs) -> nod::Result<()> {
for file in &args.file {
convert_and_verify(file, None, args.md5)?;
println!();
}
Ok(())
}

22
nodtool/src/lib.rs Normal file
View File

@ -0,0 +1,22 @@
use argp::FromArgs;
pub mod cmd;
pub(crate) mod util;
#[derive(FromArgs, Debug)]
#[argp(subcommand)]
pub enum SubCommand {
Info(cmd::info::InfoArgs),
Extract(cmd::extract::ExtractArgs),
Convert(cmd::convert::ConvertArgs),
Verify(cmd::verify::VerifyArgs),
}
pub fn run(command: SubCommand) -> nod::Result<()> {
match command {
SubCommand::Info(c_args) => cmd::info::info(c_args),
SubCommand::Convert(c_args) => cmd::convert::convert(c_args),
SubCommand::Extract(c_args) => cmd::extract::extract(c_args),
SubCommand::Verify(c_args) => cmd::verify::verify(c_args),
}
}

133
nodtool/src/main.rs Normal file
View File

@ -0,0 +1,133 @@
mod argp_version;
use std::{env, error::Error, ffi::OsStr, fmt, path::PathBuf, str::FromStr};
use argp::{FromArgValue, FromArgs};
use enable_ansi_support::enable_ansi_support;
use nodtool::{run, SubCommand};
use supports_color::Stream;
use tracing::level_filters::LevelFilter;
use tracing_subscriber::EnvFilter;
#[derive(FromArgs, Debug)]
/// Tool for reading GameCube and Wii disc images.
struct TopLevel {
#[argp(subcommand)]
command: SubCommand,
#[argp(option, short = 'C')]
/// Change working directory.
chdir: Option<PathBuf>,
#[argp(option, short = 'L')]
/// Minimum logging level. (Default: info)
/// Possible values: error, warn, info, debug, trace
log_level: Option<LogLevel>,
#[allow(unused)]
#[argp(switch, short = 'V')]
/// Print version information and exit.
version: bool,
#[argp(switch)]
/// Disable color output. (env: NO_COLOR)
no_color: bool,
}
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
enum LogLevel {
Error,
Warn,
Info,
Debug,
Trace,
}
impl FromStr for LogLevel {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(match s {
"error" => Self::Error,
"warn" => Self::Warn,
"info" => Self::Info,
"debug" => Self::Debug,
"trace" => Self::Trace,
_ => return Err(()),
})
}
}
impl fmt::Display for LogLevel {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", match self {
LogLevel::Error => "error",
LogLevel::Warn => "warn",
LogLevel::Info => "info",
LogLevel::Debug => "debug",
LogLevel::Trace => "trace",
})
}
}
impl FromArgValue for LogLevel {
fn from_arg_value(value: &OsStr) -> Result<Self, String> {
String::from_arg_value(value)
.and_then(|s| Self::from_str(&s).map_err(|_| "Invalid log level".to_string()))
}
}
// Duplicated from supports-color so we can check early.
fn env_no_color() -> bool {
match env::var("NO_COLOR").as_deref() {
Ok("") | Ok("0") | Err(_) => false,
Ok(_) => true,
}
}
fn main() {
let args: TopLevel = argp_version::from_env();
let use_colors = if args.no_color || env_no_color() {
false
} else {
// Try to enable ANSI support on Windows.
let _ = enable_ansi_support();
// Disable isatty check for supports-color. (e.g. when used with ninja)
env::set_var("IGNORE_IS_TERMINAL", "1");
supports_color::on(Stream::Stdout).is_some_and(|c| c.has_basic)
};
let format =
tracing_subscriber::fmt::format().with_ansi(use_colors).with_target(false).without_time();
let builder = tracing_subscriber::fmt().event_format(format);
if let Some(level) = args.log_level {
builder
.with_max_level(match level {
LogLevel::Error => LevelFilter::ERROR,
LogLevel::Warn => LevelFilter::WARN,
LogLevel::Info => LevelFilter::INFO,
LogLevel::Debug => LevelFilter::DEBUG,
LogLevel::Trace => LevelFilter::TRACE,
})
.init();
} else {
builder
.with_env_filter(
EnvFilter::builder()
.with_default_directive(LevelFilter::INFO.into())
.from_env_lossy(),
)
.init();
}
let mut result = Ok(());
if let Some(dir) = &args.chdir {
result = env::set_current_dir(dir).map_err(|e| {
nod::Error::Io(format!("Failed to change working directory to '{}'", dir.display()), e)
});
}
result = result.and_then(|_| run(args.command));
if let Err(e) = result {
eprintln!("Failed: {}", e);
if let Some(source) = e.source() {
eprintln!("Caused by: {}", source);
}
std::process::exit(1);
}
}

View File

@ -0,0 +1,96 @@
use std::{
fmt,
sync::{
mpsc::{sync_channel, SyncSender},
Arc,
},
thread,
thread::JoinHandle,
};
use digest::{Digest, Output};
pub type DigestThread = (SyncSender<Arc<[u8]>>, JoinHandle<DigestResult>);
pub fn digest_thread<H>() -> DigestThread
where H: Hasher + Send + 'static {
let (tx, rx) = sync_channel::<Arc<[u8]>>(1);
let handle = thread::spawn(move || {
let mut hasher = H::new();
while let Ok(data) = rx.recv() {
hasher.update(data.as_ref());
}
hasher.finalize()
});
(tx, handle)
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum DigestResult {
Crc32(u32),
Md5([u8; 16]),
Sha1([u8; 20]),
Xxh64(u64),
}
impl DigestResult {
pub fn name(&self) -> &'static str {
match self {
DigestResult::Crc32(_) => "CRC32",
DigestResult::Md5(_) => "MD5",
DigestResult::Sha1(_) => "SHA-1",
DigestResult::Xxh64(_) => "XXH64",
}
}
}
impl fmt::Display for DigestResult {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
DigestResult::Crc32(crc) => write!(f, "{:08x}", crc),
DigestResult::Md5(md5) => write!(f, "{:032x}", <Output<md5::Md5>>::from(*md5)),
DigestResult::Sha1(sha1) => write!(f, "{:040x}", <Output<sha1::Sha1>>::from(*sha1)),
DigestResult::Xxh64(xxh64) => write!(f, "{:016x}", xxh64),
}
}
}
pub trait Hasher {
fn new() -> Self;
fn finalize(self) -> DigestResult;
fn update(&mut self, data: &[u8]);
}
impl Hasher for md5::Md5 {
fn new() -> Self { Digest::new() }
fn finalize(self) -> DigestResult { DigestResult::Md5(Digest::finalize(self).into()) }
fn update(&mut self, data: &[u8]) { Digest::update(self, data) }
}
impl Hasher for sha1::Sha1 {
fn new() -> Self { Digest::new() }
fn finalize(self) -> DigestResult { DigestResult::Sha1(Digest::finalize(self).into()) }
fn update(&mut self, data: &[u8]) { Digest::update(self, data) }
}
impl Hasher for crc32fast::Hasher {
fn new() -> Self { crc32fast::Hasher::new() }
fn finalize(self) -> DigestResult { DigestResult::Crc32(crc32fast::Hasher::finalize(self)) }
fn update(&mut self, data: &[u8]) { crc32fast::Hasher::update(self, data) }
}
impl Hasher for xxhash_rust::xxh64::Xxh64 {
fn new() -> Self { xxhash_rust::xxh64::Xxh64::new(0) }
fn finalize(self) -> DigestResult {
DigestResult::Xxh64(xxhash_rust::xxh64::Xxh64::digest(&self))
}
fn update(&mut self, data: &[u8]) { xxhash_rust::xxh64::Xxh64::update(self, data) }
}

41
nodtool/src/util/mod.rs Normal file
View File

@ -0,0 +1,41 @@
pub mod digest;
pub mod redump;
pub mod shared;
use std::{
fmt,
fmt::Write,
path::{Path, MAIN_SEPARATOR},
};
pub fn display(path: &Path) -> PathDisplay { PathDisplay { path } }
pub struct PathDisplay<'a> {
path: &'a Path,
}
impl<'a> fmt::Display for PathDisplay<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut first = true;
for segment in self.path.iter() {
let segment_str = segment.to_string_lossy();
if segment_str == "." {
continue;
}
if first {
first = false;
} else {
f.write_char(MAIN_SEPARATOR)?;
}
f.write_str(&segment_str)?;
}
Ok(())
}
}
pub fn has_extension(filename: &Path, extension: &str) -> bool {
match filename.extension() {
Some(ext) => ext.eq_ignore_ascii_case(extension),
None => false,
}
}

View File

@ -0,0 +1,58 @@
use std::{mem::size_of, str};
use nod::array_ref;
use zerocopy::{FromBytes, FromZeroes};
#[derive(Clone, Debug)]
pub struct GameResult {
pub name: &'static str,
pub crc32: u32,
pub md5: [u8; 16],
pub sha1: [u8; 20],
}
pub fn find_by_crc32(crc32: u32) -> Option<GameResult> {
let header: &Header = Header::ref_from_prefix(&DATA.0).unwrap();
assert_eq!(header.entry_size as usize, size_of::<GameEntry>());
let entries_size = header.entry_count as usize * size_of::<GameEntry>();
let entries: &[GameEntry] =
GameEntry::slice_from(&DATA.0[size_of::<Header>()..size_of::<Header>() + entries_size])
.unwrap();
let string_table: &[u8] = &DATA.0[size_of::<Header>() + entries_size..];
// Binary search by CRC32
let index = entries.binary_search_by_key(&crc32, |entry| entry.crc32).ok()?;
// Parse the entry
let entry = &entries[index];
let offset = entry.string_table_offset as usize;
let name_size = u32::from_ne_bytes(*array_ref![string_table, offset, 4]) as usize;
let name = str::from_utf8(&string_table[offset + 4..offset + 4 + name_size]).unwrap();
Some(GameResult { name, crc32: entry.crc32, md5: entry.md5, sha1: entry.sha1 })
}
#[repr(C, align(4))]
struct Aligned<T: ?Sized>(T);
const DATA: &Aligned<[u8]> =
&Aligned(*include_bytes!(concat!(env!("OUT_DIR"), "/parsed-dats.bin")));
// Keep in sync with build.rs
#[derive(Clone, Debug, FromBytes, FromZeroes)]
#[repr(C, align(4))]
struct Header {
entry_count: u32,
entry_size: u32,
}
// Keep in sync with build.rs
#[derive(Clone, Debug, FromBytes, FromZeroes)]
#[repr(C, align(4))]
struct GameEntry {
crc32: u32,
string_table_offset: u32,
sectors: u32,
md5: [u8; 16],
sha1: [u8; 20],
}

210
nodtool/src/util/shared.rs Normal file
View File

@ -0,0 +1,210 @@
use std::{
cmp::min,
fmt,
fs::File,
io::{Read, Write},
path::Path,
sync::{mpsc::sync_channel, Arc},
thread,
};
use indicatif::{ProgressBar, ProgressState, ProgressStyle};
use nod::{Compression, Disc, DiscHeader, DiscMeta, OpenOptions, Result, ResultContext};
use size::Size;
use zerocopy::FromZeroes;
use crate::util::{
digest::{digest_thread, DigestResult},
display, redump,
};
pub fn print_header(header: &DiscHeader, meta: &DiscMeta) {
println!("Format: {}", meta.format);
if meta.compression != Compression::None {
println!("Compression: {}", meta.compression);
}
if let Some(block_size) = meta.block_size {
println!("Block size: {}", Size::from_bytes(block_size));
}
println!("Lossless: {}", meta.lossless);
println!(
"Verification data: {}",
meta.crc32.is_some()
|| meta.md5.is_some()
|| meta.sha1.is_some()
|| meta.xxhash64.is_some()
);
println!();
println!("Title: {}", header.game_title_str());
println!("Game ID: {}", header.game_id_str());
println!("Disc {}, Revision {}", header.disc_num + 1, header.disc_version);
if header.no_partition_hashes != 0 {
println!("[!] Disc has no hashes");
}
if header.no_partition_encryption != 0 {
println!("[!] Disc is not encrypted");
}
}
pub fn convert_and_verify(in_file: &Path, out_file: Option<&Path>, md5: bool) -> Result<()> {
println!("Loading {}", display(in_file));
let mut disc = Disc::new_with_options(in_file, &OpenOptions {
rebuild_encryption: true,
validate_hashes: false,
})?;
let header = disc.header();
let meta = disc.meta();
print_header(header, &meta);
let disc_size = disc.disc_size();
let mut file = if let Some(out_file) = out_file {
Some(
File::create(out_file)
.with_context(|| format!("Creating file {}", display(out_file)))?,
)
} else {
None
};
if out_file.is_some() {
println!("\nConverting...");
} else {
println!("\nVerifying...");
}
let pb = ProgressBar::new(disc_size);
pb.set_style(ProgressStyle::with_template("{spinner:.green} [{elapsed_precise}] [{wide_bar:.cyan/blue}] {bytes}/{total_bytes} ({bytes_per_sec}, {eta})")
.unwrap()
.with_key("eta", |state: &ProgressState, w: &mut dyn fmt::Write| {
write!(w, "{:.1}s", state.eta().as_secs_f64()).unwrap()
})
.progress_chars("#>-"));
const BUFFER_SIZE: usize = 1015808; // LCM(0x8000, 0x7C00)
let digest_threads = if md5 {
vec![
digest_thread::<crc32fast::Hasher>(),
digest_thread::<md5::Md5>(),
digest_thread::<sha1::Sha1>(),
digest_thread::<xxhash_rust::xxh64::Xxh64>(),
]
} else {
vec![
digest_thread::<crc32fast::Hasher>(),
digest_thread::<sha1::Sha1>(),
digest_thread::<xxhash_rust::xxh64::Xxh64>(),
]
};
let (w_tx, w_rx) = sync_channel::<Arc<[u8]>>(1);
let w_thread = thread::spawn(move || {
let mut total_written = 0u64;
while let Ok(data) = w_rx.recv() {
if let Some(file) = &mut file {
file.write_all(data.as_ref())
.with_context(|| {
format!("Writing {} bytes at offset {}", data.len(), total_written)
})
.unwrap();
}
total_written += data.len() as u64;
pb.set_position(total_written);
}
if let Some(mut file) = file {
file.flush().context("Flushing output file").unwrap();
}
pb.finish();
});
let mut total_read = 0u64;
let mut buf = <u8>::new_box_slice_zeroed(BUFFER_SIZE);
while total_read < disc_size {
let read = min(BUFFER_SIZE as u64, disc_size - total_read) as usize;
disc.read_exact(&mut buf[..read]).with_context(|| {
format!("Reading {} bytes at disc offset {}", BUFFER_SIZE, total_read)
})?;
let arc = Arc::<[u8]>::from(&buf[..read]);
for (tx, _) in &digest_threads {
tx.send(arc.clone()).map_err(|_| "Sending data to hash thread")?;
}
w_tx.send(arc).map_err(|_| "Sending data to write thread")?;
total_read += read as u64;
}
drop(w_tx); // Close channel
w_thread.join().unwrap();
println!();
if let Some(path) = out_file {
println!("Wrote {} to {}", Size::from_bytes(total_read), display(path));
}
println!();
let mut crc32 = None;
let mut md5 = None;
let mut sha1 = None;
let mut xxh64 = None;
for (tx, handle) in digest_threads {
drop(tx); // Close channel
match handle.join().unwrap() {
DigestResult::Crc32(v) => crc32 = Some(v),
DigestResult::Md5(v) => md5 = Some(v),
DigestResult::Sha1(v) => sha1 = Some(v),
DigestResult::Xxh64(v) => xxh64 = Some(v),
}
}
let redump_entry = crc32.and_then(redump::find_by_crc32);
let expected_crc32 = meta.crc32.or(redump_entry.as_ref().map(|e| e.crc32));
let expected_md5 = meta.md5.or(redump_entry.as_ref().map(|e| e.md5));
let expected_sha1 = meta.sha1.or(redump_entry.as_ref().map(|e| e.sha1));
let expected_xxh64 = meta.xxhash64;
fn print_digest(value: DigestResult, expected: Option<DigestResult>) {
print!("{:<6}: ", value.name());
if let Some(expected) = expected {
if expected != value {
print!("{} ❌ (expected: {})", value, expected);
} else {
print!("{}", value);
}
} else {
print!("{}", value);
}
println!();
}
if let Some(entry) = &redump_entry {
let mut full_match = true;
if let Some(md5) = md5 {
if entry.md5 != md5 {
full_match = false;
}
}
if let Some(sha1) = sha1 {
if entry.sha1 != sha1 {
full_match = false;
}
}
if full_match {
println!("Redump: {}", entry.name);
} else {
println!("Redump: {} ❓ (partial match)", entry.name);
}
} else {
println!("Redump: Not found ❌");
}
if let Some(crc32) = crc32 {
print_digest(DigestResult::Crc32(crc32), expected_crc32.map(DigestResult::Crc32));
}
if let Some(md5) = md5 {
print_digest(DigestResult::Md5(md5), expected_md5.map(DigestResult::Md5));
}
if let Some(sha1) = sha1 {
print_digest(DigestResult::Sha1(sha1), expected_sha1.map(DigestResult::Sha1));
}
if let Some(xxh64) = xxh64 {
print_digest(DigestResult::Xxh64(xxh64), expected_xxh64.map(DigestResult::Xxh64));
}
Ok(())
}

8
rustfmt.toml Normal file
View File

@ -0,0 +1,8 @@
fn_single_line = true
group_imports = "StdExternalCrate"
imports_granularity = "Crate"
overflow_delimited_expr = true
reorder_impl_items = true
use_field_init_shorthand = true
use_small_heuristics = "Max"
where_single_line = true

View File

@ -1,91 +0,0 @@
use std::{env, fs, io};
use std::io::BufWriter;
use std::path::{Path, PathBuf};
use clap::{AppSettings, clap_app};
use file_size;
use nod::disc::{new_disc_base, PartReadStream};
use nod::fst::NodeType;
use nod::io::{has_extension, new_disc_io};
use nod::Result;
fn main() -> Result<()> {
let matches = clap_app!(nodtool =>
(settings: &[
AppSettings::SubcommandRequiredElseHelp,
AppSettings::GlobalVersion,
AppSettings::DeriveDisplayOrder,
AppSettings::VersionlessSubcommands,
])
(global_settings: &[
AppSettings::ColoredHelp,
AppSettings::UnifiedHelpMessage,
])
(version: env!("CARGO_PKG_VERSION"))
(author: "Luke Street <luke@street.dev>")
(about: "Tool for reading GameCube and Wii disc images.")
(long_about: "Tool for reading GameCube and Wii disc images.
Based on <https://github.com/AxioDL/nod>, original authors:
Jack Andersen (jackoalan)
Phillip Stephens (Antidote)")
(@subcommand extract =>
(about: "Extract GameCube & Wii disc images")
(@arg FILE: +required "Path to disc image (ISO or NFS)")
(@arg DIR: "Output directory (optional)")
(@arg quiet: -q "Quiet output")
)
).get_matches();
if let Some(matches) = matches.subcommand_matches("extract") {
let file: PathBuf = PathBuf::from(matches.value_of("FILE").unwrap());
let output_dir: PathBuf;
if let Some(dir) = matches.value_of("DIR") {
output_dir = PathBuf::from(dir);
} else if has_extension(file.as_path(), "nfs") {
// Special logic to extract from content/hif_*.nfs to extracted/..
if let Some(parent) = file.parent() {
output_dir = parent.with_file_name("extracted");
} else {
output_dir = file.with_extension("");
}
} else {
output_dir = file.with_extension("");
}
let mut disc_io = new_disc_io(file.as_path())?;
let disc_base = new_disc_base(disc_io.as_mut())?;
let mut partition = disc_base.get_data_partition(disc_io.as_mut())?;
let header = partition.read_header()?;
extract_node(header.root_node(), partition.as_mut(), output_dir.as_path())?;
}
Result::Ok(())
}
fn extract_node(node: &NodeType, partition: &mut dyn PartReadStream, base_path: &Path) -> io::Result<()> {
match node {
NodeType::File(v) => {
let mut file_path = base_path.to_owned();
file_path.push(v.name.as_ref());
println!("Extracting {} (size: {})", file_path.to_string_lossy(), file_size::fit_4(v.length as u64));
let file = fs::File::create(file_path)?;
let mut buf_writer = BufWriter::with_capacity(partition.ideal_buffer_size(), file);
io::copy(&mut partition.begin_file_stream(v)?, &mut buf_writer)?;
}
NodeType::Directory(v, c) => {
if v.name.is_empty() {
fs::create_dir_all(base_path)?;
for x in c {
extract_node(x, partition, base_path)?;
}
} else {
let mut new_base = base_path.to_owned();
new_base.push(v.name.as_ref());
fs::create_dir_all(&new_base)?;
for x in c {
extract_node(x, partition, new_base.as_path())?;
}
}
}
}
io::Result::Ok(())
}

View File

@ -1,133 +0,0 @@
use std::io;
use std::io::{Read, Seek, SeekFrom};
use binread::prelude::*;
use crate::{div_rem, Result};
use crate::disc::{BI2Header, BUFFER_SIZE, DiscBase, DiscIO, Header, PartHeader, PartReadStream};
use crate::fst::{find_node, Node, node_parser, NodeKind, NodeType};
use crate::streams::{ReadStream, SharedWindowedReadStream};
pub(crate) struct DiscGCN {
pub(crate) header: Header,
}
pub(crate) fn new_disc_gcn(header: Header) -> Result<DiscGCN> {
Result::Ok(DiscGCN {
header
})
}
impl DiscBase for DiscGCN {
fn get_header(&self) -> &Header {
&self.header
}
fn get_data_partition<'a>(&self, disc_io: &'a mut dyn DiscIO) -> Result<Box<dyn PartReadStream + 'a>> {
Result::Ok(Box::from(GCPartReadStream {
stream: disc_io.begin_read_stream(0)?,
offset: 0,
cur_block: u64::MAX,
buf: [0; BUFFER_SIZE],
}))
}
}
struct GCPartReadStream<'a> {
stream: Box<dyn ReadStream + 'a>,
offset: u64,
cur_block: u64,
buf: [u8; BUFFER_SIZE],
}
impl<'a> Read for GCPartReadStream<'a> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let (mut block, mut block_offset) = div_rem(self.offset as usize, BUFFER_SIZE);
let mut rem = buf.len();
let mut read: usize = 0;
while rem > 0 {
if block != self.cur_block as usize {
self.stream.read(&mut self.buf)?;
self.cur_block = block as u64;
}
let mut cache_size = rem;
if cache_size + block_offset > BUFFER_SIZE {
cache_size = BUFFER_SIZE - block_offset;
}
buf[read..read + cache_size]
.copy_from_slice(&self.buf[block_offset..block_offset + cache_size]);
read += cache_size;
rem -= cache_size;
block_offset = 0;
block += 1;
}
self.offset += buf.len() as u64;
io::Result::Ok(buf.len())
}
}
impl<'a> Seek for GCPartReadStream<'a> {
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
self.offset = match pos {
SeekFrom::Start(v) => v,
SeekFrom::End(v) => (self.stable_stream_len()? as i64 + v) as u64,
SeekFrom::Current(v) => (self.offset as i64 + v) as u64,
};
let block = self.offset / BUFFER_SIZE as u64;
if block != self.cur_block {
self.stream.seek(SeekFrom::Start(block * BUFFER_SIZE as u64))?;
self.cur_block = u64::MAX;
}
io::Result::Ok(self.offset)
}
fn stream_position(&mut self) -> io::Result<u64> {
io::Result::Ok(self.offset)
}
}
impl<'a> ReadStream for GCPartReadStream<'a> {
fn stable_stream_len(&mut self) -> io::Result<u64> {
self.stream.stable_stream_len()
}
}
impl<'a> PartReadStream for GCPartReadStream<'a> {
fn begin_file_stream(&mut self, node: &Node) -> io::Result<SharedWindowedReadStream> {
assert_eq!(node.kind, NodeKind::File);
io::Result::Ok(self.new_window(node.offset as u64, node.length as u64)?)
}
fn read_header(&mut self) -> Result<Box<dyn PartHeader>> {
self.seek(SeekFrom::Start(0))?;
Result::Ok(Box::from(self.read_be::<GCPartition>()?))
}
fn ideal_buffer_size(&self) -> usize {
BUFFER_SIZE
}
}
#[derive(Clone, Debug, PartialEq, BinRead)]
pub(crate) struct GCPartition {
header: Header,
bi2_header: BI2Header,
#[br(seek_before = SeekFrom::Start(header.fst_off as u64))]
#[br(parse_with = node_parser)]
root_node: NodeType,
}
impl PartHeader for GCPartition {
fn root_node(&self) -> &NodeType {
&self.root_node
}
fn find_node(&self, path: &str) -> Option<&NodeType> {
find_node(&self.root_node, path)
}
}

View File

@ -1,182 +0,0 @@
//! Disc type related logic (GameCube, Wii)
use std::fmt::Debug;
use std::io;
use binread::{BinReaderExt, NullString, prelude::*};
use crate::{Error, Result};
use crate::disc::{gcn::new_disc_gcn, wii::new_disc_wii};
use crate::fst::{Node, NodeType};
use crate::io::DiscIO;
use crate::streams::{ReadStream, SharedWindowedReadStream};
pub(crate) mod gcn;
pub(crate) mod wii;
/// Shared GameCube & Wii disc header
#[derive(Clone, Debug, PartialEq, BinRead)]
pub struct Header {
pub game_id: [u8; 6],
/// Used in multi-disc games
pub disc_num: u8,
pub disc_version: u8,
pub audio_streaming: u8,
pub audio_stream_buf_size: u8,
#[br(pad_before(14))]
/// If this is a Wii disc, this will be 0x5D1C9EA3
pub wii_magic: u32,
/// If this is a GameCube disc, this will be 0xC2339F3D
pub gcn_magic: u32,
#[br(pad_size_to(64), map = NullString::into_string)]
pub game_title: String,
/// Disable hash verification
pub disable_hash_verification: u8,
/// Disable disc encryption and H3 hash table loading and verification
pub disable_disc_enc: u8,
#[br(pad_before(0x39e))]
pub debug_mon_off: u32,
pub debug_load_addr: u32,
#[br(pad_before(0x18))]
/// Offset to main DOL (Wii: >> 2)
pub dol_off: u32,
/// Offset to file system table (Wii: >> 2)
pub fst_off: u32,
/// File system size
pub fst_sz: u32,
/// File system max size
pub fst_max_sz: u32,
pub fst_memory_address: u32,
pub user_position: u32,
#[br(pad_after(4))]
pub user_sz: u32,
}
#[derive(Debug, PartialEq, BinRead, Copy, Clone)]
pub(crate) struct BI2Header {
pub(crate) debug_monitor_size: i32,
pub(crate) sim_mem_size: i32,
pub(crate) arg_offset: u32,
pub(crate) debug_flag: u32,
pub(crate) trk_address: u32,
pub(crate) trk_size: u32,
pub(crate) country_code: u32,
pub(crate) unk1: u32,
pub(crate) unk2: u32,
pub(crate) unk3: u32,
pub(crate) dol_limit: u32,
#[br(pad_after(0x1fd0))]
pub(crate) unk4: u32,
}
pub(crate) const BUFFER_SIZE: usize = 0x8000;
/// Contains a disc's header & partition information.
pub trait DiscBase {
/// Retrieves the disc's header.
fn get_header(&self) -> &Header;
/// Opens a new partition read stream for the first data partition.
///
/// # Examples
///
/// Basic usage:
/// ```
/// use nod::disc::new_disc_base;
/// use nod::io::new_disc_io;
///
/// let mut disc_io = new_disc_io("path/to/file".as_ref())?;
/// let disc_base = new_disc_base(disc_io.as_mut())?;
/// let mut partition = disc_base.get_data_partition(disc_io.as_mut())?;
/// ```
fn get_data_partition<'a>(&self, disc_io: &'a mut dyn DiscIO) -> Result<Box<dyn PartReadStream + 'a>>;
}
/// Creates a new [`DiscBase`] instance.
///
/// # Examples
///
/// Basic usage:
/// ```
/// use nod::io::new_disc_io;
/// use nod::disc::new_disc_base;
///
/// let mut disc_io = new_disc_io("path/to/file".as_ref())?;
/// let disc_base = new_disc_base(disc_io.as_mut())?;
/// disc_base.get_header();
/// ```
pub fn new_disc_base(disc_io: &mut dyn DiscIO) -> Result<Box<dyn DiscBase>> {
let mut stream = disc_io.begin_read_stream(0)?;
let header: Header = stream.read_be()?;
if header.wii_magic == 0x5D1C9EA3 {
Result::Ok(Box::from(new_disc_wii(stream.as_mut(), header)?))
} else if header.gcn_magic == 0xC2339F3D {
Result::Ok(Box::from(new_disc_gcn(header)?))
} else {
Result::Err(Error::DiscFormat("Invalid GC/Wii magic".to_string()))
}
}
/// An open read stream for a disc partition.
pub trait PartReadStream: ReadStream {
/// Seeks the read stream to the specified file system node
/// and returns a windowed stream.
///
/// # Examples
///
/// Basic usage:
/// ```
/// use nod::disc::{new_disc_base, PartHeader};
/// use nod::fst::NodeType;
/// use nod::io::new_disc_io;
/// use std::io::Read;
///
/// let mut disc_io = new_disc_io("path/to/file".as_ref())?;
/// let disc_base = new_disc_base(disc_io.as_mut())?;
/// let mut partition = disc_base.get_data_partition(disc_io.as_mut())?;
/// let header = partition.read_header()?;
/// if let Some(NodeType::File(node)) = header.find_node("/MP3/Worlds.txt") {
/// let mut s = String::new();
/// partition.begin_file_stream(node)?.read_to_string(&mut s);
/// println!(s);
/// }
/// ```
fn begin_file_stream(&mut self, node: &Node) -> io::Result<SharedWindowedReadStream>;
/// Reads the partition header and file system table.
fn read_header(&mut self) -> Result<Box<dyn PartHeader>>;
/// The ideal size for buffered reads from this partition.
/// GameCube discs have a data block size of 0x8000,
/// whereas Wii discs have a data block size of 0x7c00.
fn ideal_buffer_size(&self) -> usize;
}
/// Disc partition header with file system table.
pub trait PartHeader: Debug {
/// The root node for the filesystem.
fn root_node(&self) -> &NodeType;
/// Finds a particular file or directory by path.
///
/// # Examples
///
/// Basic usage:
/// ```
/// use nod::disc::{new_disc_base, PartHeader};
/// use nod::fst::NodeType;
/// use nod::io::new_disc_io;
///
/// let mut disc_io = new_disc_io("path/to/file".as_ref())?;
/// let disc_base = new_disc_base(disc_io.as_mut())?;
/// let mut partition = disc_base.get_data_partition(disc_io.as_mut())?;
/// let header = partition.read_header()?;
/// if let Some(NodeType::File(node)) = header.find_node("/MP1/Metroid1.pak") {
/// println!(node.name);
/// }
/// if let Some(NodeType::Directory(node, children)) = header.find_node("/MP1") {
/// println!("Number of files: {}", children.len());
/// }
/// ```
fn find_node(&self, path: &str) -> Option<&NodeType>;
}

View File

@ -1,396 +0,0 @@
use std::{io, io::{Read, Seek, SeekFrom}};
use aes::{Aes128, NewBlockCipher, Block};
use binread::prelude::*;
use block_modes::{block_padding::NoPadding, BlockMode, Cbc};
use sha1::{digest, Digest, Sha1};
use crate::disc::{BI2Header, BUFFER_SIZE, DiscBase, DiscIO, Header, PartHeader, PartReadStream};
use crate::{Error, div_rem, Result, array_ref};
use crate::fst::{find_node, Node, NodeKind, NodeType, node_parser};
use crate::streams::{OwningWindowedReadStream, ReadStream, SharedWindowedReadStream, wrap_windowed};
type Aes128Cbc = Cbc<Aes128, NoPadding>;
const BLOCK_SIZE: usize = 0x7c00;
const BUFFER_OFFSET: usize = BUFFER_SIZE - BLOCK_SIZE;
const COMMON_KEYS: [[u8; 16]; 2] = [
/* Normal */
[0xeb, 0xe4, 0x2a, 0x22, 0x5e, 0x85, 0x93, 0xe4, 0x48, 0xd9, 0xc5, 0x45, 0x73, 0x81, 0xaa, 0xf7],
/* Korean */
[0x63, 0xb8, 0x2b, 0xb4, 0xf4, 0x61, 0x4e, 0x2e, 0x13, 0xf2, 0xfe, 0xfb, 0xba, 0x4c, 0x9b, 0x7e],
];
#[derive(Debug, PartialEq, BinRead)]
#[br(repr = u32)]
enum WiiPartType {
Data,
Update,
Channel,
}
#[derive(Debug, PartialEq, BinRead)]
#[br(repr = u32)]
enum SigType {
Rsa4096 = 0x00010000,
Rsa2048 = 0x00010001,
EllipticalCurve = 0x00010002,
}
#[derive(Debug, PartialEq, BinRead)]
#[br(repr = u32)]
enum KeyType {
Rsa4096 = 0x00000000,
Rsa2048 = 0x00000001,
}
#[derive(Debug, PartialEq, BinRead)]
struct WiiPart {
#[br(map = | x: u32 | (x as u64) << 2)]
part_data_off: u64,
part_type: WiiPartType,
#[br(restore_position, args(part_data_off))]
part_header: WiiPartitionHeader,
}
#[derive(Debug, PartialEq, BinRead)]
struct WiiPartInfo {
#[br(seek_before = SeekFrom::Start(0x40000))]
part_count: u32,
#[br(map = | x: u32 | (x as u64) << 2)]
part_info_off: u64,
#[br(seek_before = SeekFrom::Start(part_info_off), count = part_count)]
parts: Vec<WiiPart>,
}
#[derive(Debug, PartialEq, BinRead)]
struct TicketTimeLimit {
enable_time_limit: u32,
time_limit: u32,
}
#[derive(Debug, PartialEq, BinRead)]
struct Ticket {
sig_type: SigType,
#[br(count = 256)]
sig: Vec<u8>,
#[br(pad_before = 60, count = 64)]
sig_issuer: Vec<u8>,
#[br(count = 60)]
ecdh: Vec<u8>,
#[br(pad_before = 3)]
enc_key: [u8; 16],
#[br(pad_before = 1)]
ticket_id: [u8; 8],
console_id: [u8; 4],
title_id: [u8; 8],
#[br(pad_before = 2)]
ticket_version: u16,
permitted_titles_mask: u32,
permit_mask: u32,
title_export_allowed: u8,
common_key_idx: u8,
#[br(pad_before = 48, count = 64)]
content_access_permissions: Vec<u8>,
#[br(pad_before = 2, count = 8)]
time_limits: Vec<TicketTimeLimit>,
}
#[derive(Debug, PartialEq, BinRead)]
struct TMDContent {
id: u32,
index: u16,
content_type: u16,
size: u64,
hash: [u8; 20],
}
#[derive(Debug, PartialEq, BinRead)]
struct TMD {
sig_type: SigType,
#[br(count = 256)]
sig: Vec<u8>,
#[br(pad_before = 60, count = 64)]
sig_issuer: Vec<u8>,
version: u8,
ca_crl_version: u8,
signer_crl_version: u8,
#[br(pad_before = 1)]
ios_id_major: u32,
ios_id_minor: u32,
title_id_major: u32,
title_id_minor: [char; 4],
title_type: u32,
group_id: u16,
#[br(pad_before = 62)]
access_flags: u32,
title_version: u16,
num_contents: u16,
#[br(pad_after = 2)]
boot_idx: u16,
#[br(count = num_contents)]
contents: Vec<TMDContent>,
}
#[derive(Debug, PartialEq, BinRead)]
struct Certificate {
sig_type: SigType,
#[br(count = if sig_type == SigType::Rsa4096 { 512 }
else if sig_type == SigType::Rsa2048 { 256 }
else if sig_type == SigType::EllipticalCurve { 64 } else { 0 })]
sig: Vec<u8>,
#[br(pad_before = 60, count = 64)]
issuer: Vec<u8>,
key_type: KeyType,
#[br(count = 64)]
subject: Vec<u8>,
#[br(count = if key_type == KeyType::Rsa4096 { 512 } else if key_type == KeyType::Rsa2048 { 256 } else { 0 })]
key: Vec<u8>,
modulus: u32,
#[br(pad_after = 52)]
pub_exp: u32,
}
#[derive(Debug, PartialEq, BinRead)]
#[br(import(partition_off: u64))]
struct WiiPartitionHeader {
#[br(seek_before = SeekFrom::Start(partition_off))]
ticket: Ticket,
tmd_size: u32,
#[br(map = | x: u32 | ((x as u64) << 2) + partition_off)]
tmd_off: u64,
cert_chain_size: u32,
#[br(map = | x: u32 | ((x as u64) << 2) + partition_off)]
cert_chain_off: u64,
#[br(map = | x: u32 | ((x as u64) << 2) + partition_off)]
global_hash_table_off: u64,
#[br(map = | x: u32 | ((x as u64) << 2) + partition_off)]
data_off: u64,
#[br(map = | x: u32 | (x as u64) << 2)]
data_size: u64,
#[br(seek_before = SeekFrom::Start(tmd_off))]
tmd: TMD,
#[br(seek_before = SeekFrom::Start(cert_chain_off))]
ca_cert: Certificate,
tmd_cert: Certificate,
ticket_cert: Certificate,
#[br(seek_before = SeekFrom::Start(global_hash_table_off), count = 0x18000)]
h3_data: Vec<u8>,
}
pub(crate) struct DiscWii {
header: Header,
part_info: WiiPartInfo,
}
pub(crate) fn new_disc_wii(mut stream: &mut dyn ReadStream, header: Header) -> Result<DiscWii> {
let mut disc = DiscWii {
header,
part_info: stream.read_be()?,
};
disc.decrypt_partition_keys()?;
Result::Ok(disc)
}
impl DiscWii {
pub(crate) fn decrypt_partition_keys(&mut self) -> Result<()> {
for part in self.part_info.parts.as_mut_slice() {
let ticket = &mut part.part_header.ticket;
let mut iv: [u8; 16] = [0; 16];
iv[..8].copy_from_slice(&ticket.title_id);
Aes128Cbc::new(
Aes128::new(&COMMON_KEYS[ticket.common_key_idx as usize].into()),
&iv.into(),
).decrypt(&mut ticket.enc_key)?;
}
Result::Ok(())
}
}
impl DiscBase for DiscWii {
fn get_header(&self) -> &Header {
&self.header
}
fn get_data_partition<'a>(&self, disc_io: &'a mut dyn DiscIO) -> Result<Box<dyn PartReadStream + 'a>> {
let part = self.part_info.parts.iter().find(|v| v.part_type == WiiPartType::Data)
.ok_or(Error::DiscFormat("Failed to locate data partition".to_string()))?;
let data_off = part.part_header.data_off;
let result = Box::new(WiiPartReadStream {
stream: wrap_windowed(
disc_io.begin_read_stream(data_off)?,
data_off, part.part_header.data_size,
)?,
crypto: if disc_io.has_wii_crypto() {
Aes128::new(&part.part_header.ticket.enc_key.into()).into()
} else { Option::None },
offset: 0,
cur_block: u64::MAX,
buf: [0; 0x8000],
validate_hashes: false,
});
Result::Ok(result)
}
}
struct WiiPartReadStream<'a> {
stream: OwningWindowedReadStream<'a>,
crypto: Option<Aes128>,
offset: u64,
cur_block: u64,
buf: [u8; BUFFER_SIZE],
validate_hashes: bool,
}
impl<'a> PartReadStream for WiiPartReadStream<'a> {
fn begin_file_stream(&mut self, node: &Node) -> io::Result<SharedWindowedReadStream> {
assert_eq!(node.kind, NodeKind::File);
io::Result::Ok(self.new_window((node.offset as u64) << 2, node.length as u64)?)
}
fn read_header(&mut self) -> Result<Box<dyn PartHeader>> {
self.seek(SeekFrom::Start(0))?;
Result::Ok(Box::from(self.read_be::<WiiPartition>()?))
}
fn ideal_buffer_size(&self) -> usize {
BLOCK_SIZE
}
}
#[inline(always)]
fn as_digest(slice: &[u8; 20]) -> digest::Output<Sha1> { (*slice).into() }
fn decrypt_block(part: &mut WiiPartReadStream, cluster: usize) -> io::Result<()> {
part.stream.read(&mut part.buf)?;
if part.crypto.is_some() {
// Fetch IV before decrypting header
let iv = Block::from(*array_ref![part.buf, 0x3d0, 16]);
// Don't need to decrypt header if we're not validating hashes
if part.validate_hashes {
Aes128Cbc::new(part.crypto.as_ref().unwrap().clone(), &Block::from([0; 16]))
.decrypt(&mut part.buf[..BUFFER_OFFSET])
.expect("Failed to decrypt header");
}
Aes128Cbc::new(part.crypto.as_ref().unwrap().clone(), &iv)
.decrypt(&mut part.buf[BUFFER_OFFSET..])
.expect("Failed to decrypt block");
}
if part.validate_hashes && part.crypto.is_some() /* FIXME NFS validation? */ {
let (mut group, sub_group) = div_rem(cluster, 8);
group %= 8;
// H0 hashes
for i in 0..31 {
let mut hash = Sha1::new();
hash.update(array_ref![part.buf, (i + 1) * 0x400, 0x400]);
let expected = as_digest(array_ref![part.buf, i * 20, 20]);
let output = hash.finalize();
if output != expected {
panic!("Invalid hash! (block {:?}) {:?}\n\texpected {:?}", i, output.as_slice(), expected);
}
}
// H1 hash
{
let mut hash = Sha1::new();
hash.update(array_ref![part.buf, 0, 0x26C]);
let expected = as_digest(array_ref![part.buf, 0x280 + sub_group * 20, 20]);
let output = hash.finalize();
if output != expected {
panic!("Invalid hash! (subgroup {:?}) {:?}\n\texpected {:?}", sub_group, output.as_slice(), expected);
}
}
// H2 hash
{
let mut hash = Sha1::new();
hash.update(array_ref![part.buf, 0x280, 0xA0]);
let expected = as_digest(array_ref![part.buf, 0x340 + group * 20, 20]);
let output = hash.finalize();
if output != expected {
panic!("Invalid hash! (group {:?}) {:?}\n\texpected {:?}", group, output.as_slice(), expected);
}
}
}
io::Result::Ok(())
}
impl<'a> Read for WiiPartReadStream<'a> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let (mut block, mut block_offset) = div_rem(self.offset as usize, BLOCK_SIZE);
let mut rem = buf.len();
let mut read: usize = 0;
while rem > 0 {
if block != self.cur_block as usize {
decrypt_block(self, block)?;
self.cur_block = block as u64;
}
let mut cache_size = rem;
if cache_size + block_offset > BLOCK_SIZE {
cache_size = BLOCK_SIZE - block_offset;
}
buf[read..read + cache_size]
.copy_from_slice(&self.buf[BUFFER_OFFSET + block_offset..
BUFFER_OFFSET + block_offset + cache_size]);
read += cache_size;
rem -= cache_size;
block_offset = 0;
block += 1;
}
self.offset += buf.len() as u64;
io::Result::Ok(buf.len())
}
}
#[inline(always)]
fn to_block_size(v: u64) -> u64 {
(v / BUFFER_SIZE as u64) * BLOCK_SIZE as u64 + (v % BUFFER_SIZE as u64)
}
impl<'a> Seek for WiiPartReadStream<'a> {
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
self.offset = match pos {
SeekFrom::Start(v) => v,
SeekFrom::End(v) => (self.stable_stream_len()? as i64 + v) as u64,
SeekFrom::Current(v) => (self.offset as i64 + v) as u64,
};
let block = self.offset / BLOCK_SIZE as u64;
if block != self.cur_block {
self.stream.seek(SeekFrom::Start(block * BUFFER_SIZE as u64))?;
self.cur_block = u64::MAX;
}
io::Result::Ok(self.offset)
}
fn stream_position(&mut self) -> io::Result<u64> {
io::Result::Ok(self.offset)
}
}
impl<'a> ReadStream for WiiPartReadStream<'a> {
fn stable_stream_len(&mut self) -> io::Result<u64> {
io::Result::Ok(to_block_size(self.stream.stable_stream_len()?))
}
}
#[derive(Clone, Debug, PartialEq, BinRead)]
pub(crate) struct WiiPartition {
header: Header,
bi2_header: BI2Header,
#[br(seek_before = SeekFrom::Start((header.fst_off as u64) << 2))]
#[br(parse_with = node_parser)]
root_node: NodeType,
}
impl PartHeader for WiiPartition {
fn root_node(&self) -> &NodeType {
&self.root_node
}
fn find_node(&self, path: &str) -> Option<&NodeType> {
find_node(&self.root_node, path)
}
}

View File

@ -1,148 +0,0 @@
//! Disc file system types
use std::io::{Read, Seek, SeekFrom};
use binread::{derive_binread, NullString, prelude::*, ReadOptions};
use encoding_rs::SHIFT_JIS;
/// File system node kind.
#[derive(Clone, Debug, PartialEq)]
pub enum NodeKind {
File,
Directory,
}
/// An individual file system node.
#[derive_binread]
#[derive(Clone, Debug, PartialEq)]
pub struct Node {
#[br(temp)]
type_and_name_offset: u32,
#[br(calc = if (type_and_name_offset >> 24) != 0 { NodeKind::Directory } else { NodeKind::File })]
pub kind: NodeKind,
/// For files, this is the partition offset of the file data. (Wii: >> 2)
///
/// For directories, this is the children start offset in the FST.
pub offset: u32,
/// For files, this is the byte size of the file.
///
/// For directories, this is the children end offset in the FST.
///
/// Number of child files and directories recursively is `length - offset`.
pub length: u32,
#[br(calc = type_and_name_offset & 0xffffff)]
name_offset: u32,
#[br(ignore)]
/// The node name.
pub name: Box<str>,
}
/// Contains a file system node, and if a directory, its children.
#[derive(Clone, Debug, PartialEq)]
pub enum NodeType {
/// A single file node.
File(Node),
/// A directory node with children.
Directory(Node, Vec<NodeType>),
}
fn read_node<R: Read + Seek>(reader: &mut R, ro: &ReadOptions, i: &mut u32) -> BinResult<NodeType> {
let node = reader.read_type::<Node>(ro.endian)?;
*i += 1;
BinResult::Ok(if node.kind == NodeKind::Directory {
let mut children: Vec<NodeType> = Vec::new();
children.reserve((node.length - *i) as usize);
while *i < node.length {
children.push(read_node(reader, ro, i)?);
}
NodeType::Directory(node, children)
} else {
NodeType::File(node)
})
}
fn read_node_name<R: Read + Seek>(reader: &mut R, ro: &ReadOptions, base: u64, node: &mut NodeType, root: bool) -> BinResult<()> {
let mut decode_name = |v: &mut Node| -> BinResult<()> {
if !root {
let offset = base + v.name_offset as u64;
reader.seek(SeekFrom::Start(offset))?;
let null_string = reader.read_type::<NullString>(ro.endian)?;
let (res, _, errors) = SHIFT_JIS.decode(&*null_string.0);
if errors {
return BinResult::Err(binread::Error::Custom {
pos: offset,
err: Box::new("Failed to decode node name"),
});
}
v.name = res.into();
}
BinResult::Ok(())
};
match node {
NodeType::File(v) => { decode_name(v)?; }
NodeType::Directory(v, c) => {
decode_name(v)?;
for x in c {
read_node_name(reader, ro, base, x, false)?;
}
}
}
BinResult::Ok(())
}
pub(crate) fn node_parser<R: Read + Seek>(reader: &mut R, ro: &ReadOptions, _: ()) -> BinResult<NodeType> {
let mut node = read_node(reader, ro, &mut 0)?;
let base = reader.stream_position()?;
read_node_name(reader, ro, base, &mut node, true)?;
BinResult::Ok(node)
}
fn matches_name(node: &NodeType, name: &str) -> bool {
match node {
NodeType::File(v) => v.name.as_ref().eq_ignore_ascii_case(name),
NodeType::Directory(v, _) => v.name.is_empty() /* root */ || v.name.as_ref().eq_ignore_ascii_case(name),
}
}
pub(crate) fn find_node<'a>(mut node: &'a NodeType, path: &str) -> Option<&'a NodeType> {
let mut split = path.split('/');
let mut current = split.next();
while current.is_some() {
if matches_name(node, current.unwrap()) {
match node {
NodeType::File(_) => {
return if split.next().is_none() {
Option::Some(node)
} else {
Option::None
};
}
NodeType::Directory(v, c) => {
// Find child
if !v.name.is_empty() || current.unwrap().is_empty() {
current = split.next();
}
if current.is_none() || current.unwrap().is_empty() {
return if split.next().is_none() {
Option::Some(node)
} else {
Option::None
};
}
for x in c {
if matches_name(x, current.unwrap()) {
node = x;
break;
}
}
}
}
} else {
break;
}
}
Option::None
}

View File

@ -1,26 +0,0 @@
use std::fs::File;
use std::io::{Seek, SeekFrom};
use std::io;
use std::path::{PathBuf, Path};
use crate::io::DiscIO;
use crate::streams::ReadStream;
use crate::Result;
pub(crate) struct DiscIOISO {
pub(crate) filename: PathBuf,
}
pub(crate) fn new_disc_io_iso(filename: &Path) -> Result<DiscIOISO> {
Result::Ok(DiscIOISO {
filename: filename.to_owned(),
})
}
impl DiscIO for DiscIOISO {
fn begin_read_stream(&self, offset: u64) -> io::Result<Box<dyn ReadStream>> {
let mut file = File::open(&*self.filename)?;
file.seek(SeekFrom::Start(offset))?;
io::Result::Ok(Box::from(file))
}
}

View File

@ -1,75 +0,0 @@
//! Disc file format related logic (ISO, NFS, etc)
use std::{fs, io};
use std::path::Path;
use crate::{Error, Result};
use crate::io::{iso::new_disc_io_iso, nfs::new_disc_io_nfs};
use crate::streams::ReadStream;
pub(crate) mod iso;
pub(crate) mod nfs;
/// Abstraction over supported disc file types.
pub trait DiscIO {
/// Opens a new read stream for the disc file(s).
/// Generally does _not_ need to be used directly.
fn begin_read_stream(&self, offset: u64) -> io::Result<Box<dyn ReadStream + '_>>;
/// If false, the file format does not use standard Wii partition encryption. (e.g. NFS)
fn has_wii_crypto(&self) -> bool { true }
}
/// Helper function for checking a file extension.
#[inline(always)]
pub fn has_extension(filename: &Path, extension: &str) -> bool {
if let Some(ext) = filename.extension() {
// TODO use with Rust 1.53+
// ext.eq_ignore_ascii_case(extension)
ext.to_str().unwrap_or("").eq_ignore_ascii_case(extension)
} else { false }
}
/// Creates a new [`DiscIO`] instance.
///
/// # Examples
///
/// Basic usage:
/// ```
/// use nod::io::new_disc_io;
///
/// let mut disc_io = new_disc_io("path/to/file".as_ref())?;
/// ```
pub fn new_disc_io(filename: &Path) -> Result<Box<dyn DiscIO>> {
let path_result = fs::canonicalize(filename);
if path_result.is_err() {
return Result::Err(Error::Io(
format!("Failed to open {}", filename.to_string_lossy()),
path_result.unwrap_err(),
));
}
let path = path_result.as_ref().unwrap();
let meta = fs::metadata(path);
if meta.is_err() {
return Result::Err(Error::Io(
format!("Failed to open {}", filename.to_string_lossy()),
meta.unwrap_err(),
));
}
if !meta.unwrap().is_file() {
return Result::Err(Error::DiscFormat(
format!("Input is not a file: {}", filename.to_string_lossy())
));
}
if has_extension(path, "iso") {
Result::Ok(Box::from(new_disc_io_iso(path)?))
} else if has_extension(path, "nfs") {
if matches!(path.parent(), Some(parent) if parent.is_dir()) {
Result::Ok(Box::from(new_disc_io_nfs(path.parent().unwrap())?))
} else {
Result::Err(Error::DiscFormat("Failed to locate NFS parent directory".to_string()))
}
} else {
Result::Err(Error::DiscFormat("Unknown file type".to_string()))
}
}

View File

@ -1,300 +0,0 @@
use std::{fs::File, io, io::{Read, Seek, SeekFrom}, path::{Path, PathBuf}};
use aes::{Aes128, NewBlockCipher};
use binread::{derive_binread, prelude::*};
use block_modes::{block_padding::NoPadding, BlockMode, Cbc};
use crate::disc::{BUFFER_SIZE};
use crate::io::DiscIO;
use crate::{Error,Result};
use crate::streams::ReadStream;
type Aes128Cbc = Cbc<Aes128, NoPadding>;
#[derive(Clone, Debug, PartialEq, BinRead)]
pub(crate) struct LBARange {
pub(crate) start_block: u32,
pub(crate) num_blocks: u32,
}
#[derive_binread]
#[derive(Clone, Debug, PartialEq)]
#[br(magic = b"EGGS", assert(end_magic == * b"SGGE"))]
pub(crate) struct NFSHeader {
pub(crate) version: u32,
pub(crate) unk1: u32,
pub(crate) unk2: u32,
pub(crate) lba_range_count: u32,
#[br(count = 61)]
pub(crate) lba_ranges: Vec<LBARange>,
#[br(temp)]
pub(crate) end_magic: [u8; 4],
}
#[derive(Clone, Copy, Debug, PartialEq)]
pub(crate) struct FBO {
pub(crate) file: u32,
pub(crate) block: u32,
pub(crate) l_block: u32,
pub(crate) offset: u32,
}
pub(crate) fn fbo_max() -> FBO {
FBO {
file: u32::MAX,
block: u32::MAX,
l_block: u32::MAX,
offset: u32::MAX,
}
}
impl NFSHeader {
pub(crate) fn calculate_num_files(&self) -> u32 {
let total_block_count = self.lba_ranges.iter().take(self.lba_range_count as usize)
.fold(0u32, |acc, range| acc + range.num_blocks);
(((total_block_count as u64) * 0x8000u64 + (0x200u64 + 0xF9FFFFFu64)) / 0xFA00000u64) as u32
}
pub(crate) fn logical_to_fbo(&self, offset: u64) -> FBO {
let block_div = (offset / 0x8000) as u32;
let block_off = (offset % 0x8000) as u32;
let mut block = u32::MAX;
let mut physical_block = 0u32;
for range in self.lba_ranges.iter().take(self.lba_range_count as usize) {
if block_div >= range.start_block && block_div - range.start_block < range.num_blocks {
block = physical_block + (block_div - range.start_block);
break;
}
physical_block += range.num_blocks;
}
if block == u32::MAX {
fbo_max()
} else {
FBO {
file: block / 8000,
block: block % 8000,
l_block: block_div,
offset: block_off,
}
}
}
}
pub(crate) struct DiscIONFS {
pub(crate) directory: PathBuf,
pub(crate) key: [u8; 16],
pub(crate) header: Option<NFSHeader>,
}
pub(crate) fn new_disc_io_nfs(directory: &Path) -> Result<DiscIONFS> {
let mut disc_io = DiscIONFS {
directory: directory.to_owned(),
key: [0; 16],
header: Option::None,
};
disc_io.validate_files()?;
Result::Ok(disc_io)
}
pub(crate) struct NFSReadStream<'a> {
disc_io: &'a DiscIONFS,
file: Option<File>,
crypto: Aes128,
// Physical address - all UINT32_MAX indicates logical zero block
phys_addr: FBO,
// Logical address
offset: u64,
// Active file stream and its offset as set in the system.
// Block is typically one ahead of the presently decrypted block.
cur_file: u32,
cur_block: u32,
buf: [u8; BUFFER_SIZE],
}
impl<'a> NFSReadStream<'a> {
fn set_cur_file(&mut self, cur_file: u32) -> Result<()> {
if cur_file >= self.disc_io.header.as_ref().unwrap().calculate_num_files() {
return Result::Err(Error::DiscFormat("Out of bounds NFS file access".to_string()));
}
self.cur_file = cur_file;
self.cur_block = u32::MAX;
self.file = Option::from(File::open(self.disc_io.get_nfs(cur_file)?)?);
Result::Ok(())
}
fn set_cur_block(&mut self, cur_block: u32) -> io::Result<()> {
self.cur_block = cur_block;
self.file.as_ref().unwrap().seek(
SeekFrom::Start(self.cur_block as u64 * BUFFER_SIZE as u64 + 0x200u64)
)?;
io::Result::Ok(())
}
fn set_phys_addr(&mut self, phys_addr: FBO) -> Result<()> {
// If we're just changing the offset, nothing else needs to be done
if self.phys_addr.file == phys_addr.file && self.phys_addr.block == phys_addr.block {
self.phys_addr.offset = phys_addr.offset;
return Result::Ok(());
}
self.phys_addr = phys_addr;
// Set logical zero block
if phys_addr.file == u32::MAX {
self.buf.fill(0u8);
return Result::Ok(());
}
// Make necessary file and block current with system
if phys_addr.file != self.cur_file {
self.set_cur_file(phys_addr.file)?;
}
if phys_addr.block != self.cur_block {
self.set_cur_block(phys_addr.block)?;
}
// Read block, handling 0x200 overlap case
if phys_addr.block == 7999 {
self.file.as_ref().unwrap().read(&mut self.buf[..BUFFER_SIZE - 0x200])?;
self.set_cur_file(self.cur_file + 1)?;
self.file.as_ref().unwrap().read(&mut self.buf[BUFFER_SIZE - 0x200..])?;
self.cur_block = 0;
} else {
self.file.as_ref().unwrap().read(&mut self.buf)?;
self.cur_block += 1;
}
// Decrypt
let iv: [u8; 16] = [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
(phys_addr.l_block & 0xFF) as u8,
((phys_addr.l_block >> 8) & 0xFF) as u8,
((phys_addr.l_block >> 16) & 0xFF) as u8,
((phys_addr.l_block >> 24) & 0xFF) as u8,
];
Aes128Cbc::new(self.crypto.clone(), &iv.into())
.decrypt(&mut self.buf)?;
Result::Ok(())
}
fn set_logical_addr(&mut self, addr: u64) -> Result<()> {
self.set_phys_addr(self.disc_io.header.as_ref().unwrap().logical_to_fbo(addr))
}
}
impl<'a> Read for NFSReadStream<'a> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let mut rem = buf.len();
let mut read: usize = 0;
while rem > 0 {
let mut read_size = rem;
let block_offset: usize = if self.phys_addr.offset == u32::MAX { 0 } else { self.phys_addr.offset as usize };
if read_size + block_offset > BUFFER_SIZE {
read_size = BUFFER_SIZE - block_offset
}
buf[read..read + read_size]
.copy_from_slice(&mut self.buf[block_offset..block_offset + read_size]);
read += read_size;
rem -= read_size;
self.offset += read_size as u64;
self.set_logical_addr(self.offset)
.map_err(|v| match v {
Error::Io(_, v) => v,
_ => io::Error::from(io::ErrorKind::Other)
})?;
}
io::Result::Ok(read)
}
}
impl<'a> Seek for NFSReadStream<'a> {
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
self.offset = match pos {
SeekFrom::Start(v) => v,
SeekFrom::End(v) => (self.stable_stream_len()? as i64 + v) as u64,
SeekFrom::Current(v) => (self.offset as i64 + v) as u64,
};
self.set_logical_addr(self.offset)
.map_err(|v| match v {
Error::Io(_, v) => v,
_ => io::Error::from(io::ErrorKind::Other)
})?;
io::Result::Ok(self.offset)
}
fn stream_position(&mut self) -> io::Result<u64> {
io::Result::Ok(self.offset)
}
}
impl<'a> ReadStream for NFSReadStream<'a> {
fn stable_stream_len(&mut self) -> io::Result<u64> {
todo!()
}
}
impl DiscIO for DiscIONFS {
fn begin_read_stream(&self, offset: u64) -> io::Result<Box<dyn ReadStream + '_>> {
io::Result::Ok(Box::from(NFSReadStream {
disc_io: self,
file: Option::None,
crypto: Aes128::new(&self.key.into()),
phys_addr: fbo_max(),
offset,
cur_file: u32::MAX,
cur_block: u32::MAX,
buf: [0; BUFFER_SIZE],
}))
}
fn has_wii_crypto(&self) -> bool { false }
}
impl DiscIONFS {
fn get_path<P: AsRef<Path>>(&self, path: P) -> PathBuf {
let mut buf = self.directory.clone();
buf.push(path);
buf
}
fn get_nfs(&self, num: u32) -> Result<PathBuf> {
let path = self.get_path(format!("hif_{:06}.nfs", num));
if path.exists() {
Result::Ok(path)
} else {
Result::Err(Error::DiscFormat(format!("Failed to locate {}", path.to_string_lossy())))
}
}
pub(crate) fn validate_files(&mut self) -> Result<()> {
{
// Load key file
let mut key_path = self.get_path("../code/htk.bin");
if !key_path.is_file() {
key_path = self.directory.clone();
key_path.push("htk.bin");
}
if !key_path.is_file() {
return Result::Err(Error::DiscFormat(format!(
"Failed to locate {} or {}",
self.get_path("../code/htk.bin").to_string_lossy(),
key_path.to_string_lossy()
)));
}
File::open(key_path.as_path())
.map_err(|v| Error::Io(format!("Failed to open {}", key_path.to_string_lossy()), v))?
.read(&mut self.key)
.map_err(|v| Error::Io(format!("Failed to read {}", key_path.to_string_lossy()), v))?;
}
{
// Load header from first file
let header: NFSHeader = File::open(self.get_nfs(0)?)?.read_be()?;
// Ensure remaining files exist
for i in 1..header.calculate_num_files() {
self.get_nfs(i)?;
}
self.header = Option::from(header)
}
Result::Ok(())
}
}

View File

@ -1,67 +0,0 @@
//! Library for traversing & reading GameCube and Wii disc images.
//!
//! Based on the C++ library [nod](https://github.com/AxioDL/nod),
//! but does not currently support authoring.
//!
//! Currently supported file formats:
//! - ISO
//! - NFS (Wii U VC files, e.g. `hif_000000.nfs`)
//!
//! # Examples
//!
//! Opening a disc image and reading a file:
//! ```
//! use nod::disc::{new_disc_base, PartHeader};
//! use nod::fst::NodeType;
//! use nod::io::new_disc_io;
//! use std::io::Read;
//!
//! let mut disc_io = new_disc_io("path/to/file".as_ref())?;
//! let disc_base = new_disc_base(disc_io.as_mut())?;
//! let mut partition = disc_base.get_data_partition(disc_io.as_mut())?;
//! let header = partition.read_header()?;
//! if let Some(NodeType::File(node)) = header.find_node("/MP3/Worlds.txt") {
//! let mut s = String::new();
//! partition.begin_file_stream(node)?.read_to_string(&mut s);
//! println!(s);
//! }
//! ```
pub mod fst;
pub mod disc;
pub mod io;
pub mod streams;
#[derive(Debug)]
pub enum Error {
BinaryFormat(binread::Error),
Encryption(block_modes::BlockModeError),
Io(String, std::io::Error),
DiscFormat(String),
}
pub type Result<T> = std::result::Result<T, Error>;
impl From<std::io::Error> for Error {
fn from(v: std::io::Error) -> Self {
Error::Io("I/O error".to_string(), v)
}
}
impl From<binread::Error> for Error {
fn from(v: binread::Error) -> Self {
Error::BinaryFormat(v)
}
}
impl From<block_modes::BlockModeError> for Error {
fn from(v: block_modes::BlockModeError) -> Self {
Error::Encryption(v)
}
}
#[inline(always)]
pub(crate) fn div_rem<T: std::ops::Div<Output=T> + std::ops::Rem<Output=T> + Copy>(x: T, y: T) -> (T, T) {
let quot = x / y;
let rem = x % y;
(quot, rem)
}

View File

@ -1,162 +0,0 @@
//! Common stream types
use std::{fs::File, io, io::{Read, Seek, SeekFrom}};
use std::ops::DerefMut;
/// Creates a fixed-size array from a slice.
#[macro_export]
macro_rules! array_ref {
($slice:expr, $offset:expr, $size:expr) => {{
#[inline]
fn to_array<T>(slice: &[T]) -> &[T; $size] {
unsafe { &*(slice.as_ptr() as *const [_; $size]) }
}
to_array(&$slice[$offset..$offset + $size])
}}
}
pub trait ReadStream: Read + Seek {
/// Replace with [`Read.stream_len`] when stabilized.
///
/// <https://github.com/rust-lang/rust/issues/59359>
fn stable_stream_len(&mut self) -> io::Result<u64>;
/// Creates a windowed read sub-stream with offset and size.
///
/// Seeks underlying stream immediately.
fn new_window(&mut self, offset: u64, size: u64) -> io::Result<SharedWindowedReadStream> where Self: Sized {
self.seek(SeekFrom::Start(offset))?;
io::Result::Ok(SharedWindowedReadStream {
base: self,
begin: offset,
end: offset + size,
})
}
}
impl ReadStream for File {
fn stable_stream_len(&mut self) -> io::Result<u64> {
let before = self.stream_position()?;
let result = self.seek(SeekFrom::End(0));
// Try to restore position even if the above failed
self.seek(SeekFrom::Start(before))?;
result
}
}
trait WindowedReadStream: ReadStream {
fn base_stream(&mut self) -> &mut dyn ReadStream;
fn window(&self) -> (u64, u64);
}
pub struct OwningWindowedReadStream<'a> {
pub base: Box<dyn ReadStream + 'a>,
pub begin: u64,
pub end: u64,
}
/// Takes ownership of & wraps a read stream into a windowed read stream.
pub fn wrap_windowed<'a>(mut base: Box<dyn ReadStream + 'a>, offset: u64, size: u64) -> io::Result<OwningWindowedReadStream<'a>> {
base.seek(SeekFrom::Start(offset))?;
io::Result::Ok(OwningWindowedReadStream {
base,
begin: offset,
end: offset + size,
})
}
pub struct SharedWindowedReadStream<'a> {
pub base: &'a mut dyn ReadStream,
pub begin: u64,
pub end: u64,
}
#[inline(always)]
fn windowed_read(stream: &mut dyn WindowedReadStream, buf: &mut [u8]) -> io::Result<usize> {
let pos = stream.stream_position()?;
let size = stream.stable_stream_len()?;
stream.base_stream().read(if pos + buf.len() as u64 > size {
&mut buf[..(size - pos) as usize]
} else {
buf
})
}
#[inline(always)]
fn windowed_seek(stream: &mut dyn WindowedReadStream, pos: SeekFrom) -> io::Result<u64> {
let (begin, end) = stream.window();
let result = stream.base_stream().seek(match pos {
SeekFrom::Start(p) => SeekFrom::Start(begin + p),
SeekFrom::End(p) => SeekFrom::End(end as i64 + p),
SeekFrom::Current(_) => pos,
})?;
if result < begin || result > end {
io::Result::Err(io::Error::from(io::ErrorKind::UnexpectedEof))
} else {
io::Result::Ok(result - begin)
}
}
impl<'a> Read for OwningWindowedReadStream<'a> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
windowed_read(self, buf)
}
}
impl<'a> Seek for OwningWindowedReadStream<'a> {
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
windowed_seek(self, pos)
}
fn stream_position(&mut self) -> io::Result<u64> {
Result::Ok(self.base.stream_position()? - self.begin)
}
}
impl<'a> ReadStream for OwningWindowedReadStream<'a> {
fn stable_stream_len(&mut self) -> io::Result<u64> {
Result::Ok(self.end - self.begin)
}
}
impl<'a> WindowedReadStream for OwningWindowedReadStream<'a> {
fn base_stream(&mut self) -> &mut dyn ReadStream {
self.base.deref_mut()
}
fn window(&self) -> (u64, u64) {
(self.begin, self.end)
}
}
impl<'a> Read for SharedWindowedReadStream<'a> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
windowed_read(self, buf)
}
}
impl<'a> Seek for SharedWindowedReadStream<'a> {
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
windowed_seek(self, pos)
}
fn stream_position(&mut self) -> io::Result<u64> {
Result::Ok(self.base.stream_position()? - self.begin)
}
}
impl<'a> ReadStream for SharedWindowedReadStream<'a> {
fn stable_stream_len(&mut self) -> io::Result<u64> {
Result::Ok(self.end - self.begin)
}
}
impl<'a> WindowedReadStream for SharedWindowedReadStream<'a> {
fn base_stream(&mut self) -> &mut dyn ReadStream {
self.base
}
fn window(&self) -> (u64, u64) {
(self.begin, self.end)
}
}