Compare commits

...

41 Commits
v1.2.0 ... main

Author SHA1 Message Date
c219afaaff Fix SplitFileReader at file boundaries 2025-08-20 08:50:35 -06:00
e5c1f60121 Fix regression reading lossless WBFS/CISO 2025-08-19 20:22:32 -06:00
914e777cc5 Update deny.toml 2025-08-18 12:38:28 -06:00
4ec8fbf6b9 Update all dependencies 2025-08-18 12:36:03 -06:00
5e7269ddcc Make gen module private for now; lint fixes 2025-08-18 12:30:34 -06:00
4b4564207a Documentation updates 2025-03-31 23:33:07 -06:00
9d8cd980b8 DiscStream rework & threading improvements 2025-03-31 23:06:18 -06:00
56db78207a clippy fixes 2025-03-04 23:20:50 -07:00
38183e4258 Update dependencies 2025-03-04 23:14:00 -07:00
d6969045be Migrate to Rust edition 2024 2025-03-04 23:14:00 -07:00
fb3542f445 Rename PartitionHeader -> BootHeader & various fixes 2025-03-04 22:54:09 -07:00
73eebfe90b Minor changes & cleanup 2024-12-01 22:44:02 -07:00
1e44f23aba Add RVZ packing support 2024-11-30 16:17:19 -07:00
55b0d3f29e Move sha1_hash to util/digest 2024-11-24 01:22:28 -07:00
490ae80a60 Minor cleanup 2024-11-23 12:59:38 -07:00
d197b8e7c2 Centralize logic into fetch_sector_group 2024-11-23 12:54:05 -07:00
b8b06dcd5c README updates 2024-11-22 00:33:51 -07:00
75e6f09b24 More fixes 2024-11-22 00:19:45 -07:00
a8bc312dd9 nightly clippy fixes 2024-11-22 00:14:51 -07:00
f0af954c23 Resolve CI issues 2024-11-22 00:11:48 -07:00
3848edfe7b Add conversion support & large refactor 2024-11-22 00:01:26 -07:00
374c6950b2 Support decrypted discs & decrypt/encrypt conversion 2024-11-08 00:02:02 -07:00
df8ab228c8 Update README.md and repo URLs 2024-10-18 00:09:18 -06:00
32e08f9543 Export LaggedFibonacci & add more helper methods 2024-10-18 00:04:11 -06:00
e0d735dd39 Export more constants & minor cleanup 2024-10-18 00:03:23 -06:00
d4bca2caa8 Ignore Shift JIS decoding errors in Fst::get_name 2024-10-18 00:02:37 -06:00
be4672471d Move region info from PartitionMeta to Disc 2024-10-04 20:17:16 -06:00
f4638369d1 Extract Wii region.bin 2024-10-04 19:53:07 -06:00
d99ef72fe9 Fix matching paths with repeated slashes in Fst::find 2024-10-04 17:25:07 -06:00
e6a3871d28 Resolve +nightly clippy warning 2024-10-03 21:00:06 -06:00
30bcf4936b Upgrade to zerocopy 0.8 2024-10-03 20:57:02 -06:00
5f537f0e7b Various minor API adjustments 2024-10-03 20:18:44 -06:00
8abe674cb9 Fix building without compress features 2024-10-03 01:00:51 -06:00
54890674a2 Add Disc::detect for detecting disc image format 2024-10-03 00:55:03 -06:00
370d03fa9a Add Disc::new_stream/new_stream_with_options
Allows opening a disc image from a custom stream,
rather than a filesystem path.
2024-10-02 23:49:20 -06:00
5ad514d59c Use mimalloc when targeting musl
Also removes the armv7 linux build.
If you used it, let me know!
2024-09-29 12:10:59 -06:00
312dd6f080 SharedWindowedReadStream -> FileStream & impl BufRead 2024-09-10 23:19:57 -06:00
6f3052e05d Use workspace keys in Cargo.toml 2024-09-10 23:19:19 -06:00
d2b8135cdb Use full LTO, update dependencies & CI 2024-09-08 16:29:48 -06:00
a8f91ff9c2 Update README.md 2024-09-04 20:26:19 -06:00
22434fbba3 Add nod version to nodtool Cargo.toml 2024-09-04 20:04:57 -06:00
62 changed files with 11658 additions and 3732 deletions

View File

@ -1,11 +1,17 @@
name: Build
on: [ push, pull_request ]
on:
pull_request:
push:
paths-ignore:
- '*.md'
- 'LICENSE*'
workflow_dispatch:
env:
BUILD_PROFILE: release-lto
CARGO_BIN_NAME: nodtool
CARGO_TARGET_DIR: target
CARGO_INCREMENTAL: 0
jobs:
check:
@ -13,7 +19,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
toolchain: [ stable, 1.73.0, nightly ]
toolchain: [ stable, 1.85.0, nightly ]
fail-fast: false
env:
RUSTFLAGS: -D warnings
@ -24,11 +30,13 @@ jobs:
uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ matrix.toolchain }}
components: rustfmt, clippy
components: clippy
- name: Cache Rust workspace
uses: Swatinem/rust-cache@v2
- name: Cargo check
run: cargo check --all-features --all-targets
run: cargo check --all-targets
- name: Cargo clippy
run: cargo clippy --all-features --all-targets
run: cargo clippy --all-targets
fmt:
name: Format
@ -59,7 +67,7 @@ jobs:
continue-on-error: ${{ matrix.checks == 'advisories' }}
steps:
- uses: actions/checkout@v4
- uses: EmbarkStudios/cargo-deny-action@v1
- uses: EmbarkStudios/cargo-deny-action@v2
with:
command: check ${{ matrix.checks }}
@ -75,11 +83,15 @@ jobs:
uses: actions/checkout@v4
- name: Setup Rust toolchain
uses: dtolnay/rust-toolchain@stable
- name: Cache Rust workspace
uses: Swatinem/rust-cache@v2
- name: Cargo test
run: cargo test --release
build:
name: Build
name: Build nodtool
env:
CARGO_BIN_NAME: nodtool
strategy:
matrix:
include:
@ -87,21 +99,21 @@ jobs:
target: x86_64-unknown-linux-musl
name: linux-x86_64
build: zigbuild
features: asm
# - platform: ubuntu-latest
# target: i686-unknown-linux-musl
# name: linux-i686
# build: zigbuild
# features: asm
features: default
- platform: ubuntu-latest
target: i686-unknown-linux-musl
name: linux-i686
build: zigbuild
features: default
- platform: ubuntu-latest
target: aarch64-unknown-linux-musl
name: linux-aarch64
build: zigbuild
features: nightly
- platform: ubuntu-latest
target: armv7-unknown-linux-musleabi
name: linux-armv7l
build: zigbuild
features: default
- platform: windows-latest
target: i686-pc-windows-msvc
name: windows-x86
build: build
features: default
- platform: windows-latest
target: x86_64-pc-windows-msvc
@ -112,17 +124,17 @@ jobs:
target: aarch64-pc-windows-msvc
name: windows-arm64
build: build
features: nightly
features: default
- platform: macos-latest
target: x86_64-apple-darwin
name: macos-x86_64
build: build
features: asm
features: default
- platform: macos-latest
target: aarch64-apple-darwin
name: macos-arm64
build: build
features: nightly
features: default
fail-fast: false
runs-on: ${{ matrix.platform }}
steps:
@ -135,20 +147,24 @@ jobs:
sudo apt-get -y install ${{ matrix.packages }}
- name: Install cargo-zigbuild
if: matrix.build == 'zigbuild'
run: pip install ziglang==0.11.0 cargo-zigbuild==0.18.3
run: pip install ziglang==0.13.0 cargo-zigbuild==0.19.1
- name: Setup Rust toolchain
uses: dtolnay/rust-toolchain@nightly
uses: dtolnay/rust-toolchain@stable
with:
targets: ${{ matrix.target }}
- name: Cache Rust workspace
uses: Swatinem/rust-cache@v2
with:
key: ${{ matrix.target }}
- name: Cargo build
run: cargo ${{ matrix.build }} --profile ${{ env.BUILD_PROFILE }} --target ${{ matrix.target }} --bin ${{ env.CARGO_BIN_NAME }} --features ${{ matrix.features }}
run: >
cargo ${{ matrix.build }} --profile ${{ env.BUILD_PROFILE }} --target ${{ matrix.target }}
--bin ${{ env.CARGO_BIN_NAME }} --features ${{ matrix.features }}
- name: Upload artifacts
uses: actions/upload-artifact@v4
with:
name: ${{ matrix.name }}
name: ${{ env.CARGO_BIN_NAME }}-${{ matrix.name }}
path: |
${{ env.CARGO_TARGET_DIR }}/${{ env.BUILD_PROFILE }}/${{ env.CARGO_BIN_NAME }}
${{ env.CARGO_TARGET_DIR }}/${{ env.BUILD_PROFILE }}/${{ env.CARGO_BIN_NAME }}.exe
${{ env.CARGO_TARGET_DIR }}/${{ matrix.target }}/${{ env.BUILD_PROFILE }}/${{ env.CARGO_BIN_NAME }}
${{ env.CARGO_TARGET_DIR }}/${{ matrix.target }}/${{ env.BUILD_PROFILE }}/${{ env.CARGO_BIN_NAME }}.exe
if-no-files-found: error
@ -159,6 +175,20 @@ jobs:
runs-on: ubuntu-latest
needs: [ build ]
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Check git tag against Cargo version
shell: bash
run: |
set -eou pipefail
tag='${{github.ref}}'
tag="${tag#refs/tags/}"
version=$(grep '^version' Cargo.toml | head -1 | awk -F' = ' '{print $2}' | tr -d '"')
version="v$version"
if [ "$tag" != "$version" ]; then
echo "::error::Git tag doesn't match the Cargo version! ($tag != $version)"
exit 1
fi
- name: Download artifacts
uses: actions/download-artifact@v4
with:
@ -166,12 +196,28 @@ jobs:
- name: Rename artifacts
working-directory: artifacts
run: |
set -euo pipefail
mkdir ../out
for i in */*/$BUILD_PROFILE/$CARGO_BIN_NAME*; do
mv "$i" "../out/$(sed -E "s/([^/]+)\/[^/]+\/$BUILD_PROFILE\/($CARGO_BIN_NAME)/\2-\1/" <<< "$i")"
for dir in */; do
for file in "$dir"*; do
base=$(basename "$file")
name="${base%.*}"
ext="${base##*.}"
if [ "$ext" = "$base" ]; then
ext=""
else
ext=".$ext"
fi
arch="${dir%/}" # remove trailing slash
arch="${arch##"$name-"}" # remove bin name
dst="../out/${name}-${arch}${ext}"
mv "$file" "$dst"
done
done
ls -R ../out
- name: Release
uses: softprops/action-gh-release@4634c16e79c963813287e889244c50009e7f0981
uses: softprops/action-gh-release@v2
with:
files: out/*
draft: true
generate_release_notes: true

1110
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,8 +1,28 @@
[workspace]
members = ["nod", "nodtool"]
resolver = "2"
resolver = "3"
[profile.release]
debug = 1
[profile.release-lto]
inherits = "release"
lto = "thin"
lto = "fat"
strip = "debuginfo"
codegen-units = 1
[workspace.package]
version = "2.0.0-alpha.3"
edition = "2024"
rust-version = "1.85"
authors = ["Luke Street <luke@street.dev>"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/encounter/nod"
keywords = ["gamecube", "wii", "iso", "wbfs", "rvz"]
[workspace.dependencies]
digest = { version = "0.11.0-rc.0", default-features = false }
md-5 = { version = "0.11.0-rc.0", default-features = false }
sha1 = { version = "0.11.0-rc.0", default-features = false }
tracing = "0.1"
zerocopy = { version = "0.8", features = ["alloc", "derive"] }

152
README.md
View File

@ -1,30 +1,38 @@
# nod [![Build Status]][actions] [![Latest Version]][crates.io] [![Api Rustdoc]][rustdoc] ![Rust Version]
[Build Status]: https://github.com/encounter/nod-rs/actions/workflows/build.yaml/badge.svg
[actions]: https://github.com/encounter/nod-rs/actions
[Build Status]: https://github.com/encounter/nod/actions/workflows/build.yaml/badge.svg
[actions]: https://github.com/encounter/nod/actions
[Latest Version]: https://img.shields.io/crates/v/nod.svg
[crates.io]: https://crates.io/crates/nod
[Api Rustdoc]: https://img.shields.io/badge/api-rustdoc-blue.svg
[rustdoc]: https://docs.rs/nod
[Rust Version]: https://img.shields.io/badge/rust-1.73+-blue.svg?maxAge=3600
[Rust Version]: https://img.shields.io/badge/rust-1.85+-blue.svg?maxAge=3600
Library for traversing & reading Nintendo Optical Disc (GameCube and Wii) images.
Library for reading and writing Nintendo Optical Disc (GameCube and Wii) images.
Originally based on the C++ library [nod](https://github.com/AxioDL/nod),
but does not currently support authoring.
but with extended format support and many additional features.
Currently supported file formats:
- ISO (GCM)
- WIA / RVZ
- WBFS (+ NKit 2 lossless)
- CISO (+ NKit 2 lossless)
- NFS (Wii U VC)
- NFS (Wii U VC, read-only)
- GCZ
- TGC
## CLI tool
This crate includes a command-line tool called `nodtool`.
This crate includes a command-line tool called `nodtool`.
Download the latest release from the [releases page](https://github.com/encounter/nod-rs/releases),
or install it using Cargo:
```shell
cargo install --locked nodtool
```
### info
@ -50,15 +58,17 @@ nodtool extract /path/to/game/content/hif_000000.nfs [outdir]
### convert
Converts any supported format to raw ISO.
Converts a disc image to any supported format.
See `nodtool convert --help` for more information.
```shell
nodtool convert /path/to/game.wia /path/to/game.iso
```
nodtool convert /path/to/game.iso /path/to/game.rvz
```
### verify
Hashes the contents of a disc image and verifies it.
Verifies a disc image against an internal Redump database.
```shell
nodtool verify /path/to/game.iso
@ -71,17 +81,21 @@ Opening a disc image and reading a file:
```rust
use std::io::Read;
use nod::{
common::PartitionKind,
read::{DiscOptions, DiscReader, PartitionOptions},
};
// Open a disc image and the first data partition.
let disc = nod::Disc::new("path/to/file.iso")
.expect("Failed to open disc");
let mut partition = disc.open_partition_kind(nod::PartitionKind::Data)
let disc =
DiscReader::new("path/to/file.iso", &DiscOptions::default()).expect("Failed to open disc");
let mut partition = disc
.open_partition_kind(PartitionKind::Data, &PartitionOptions::default())
.expect("Failed to open data partition");
// Read partition metadata and the file system table.
let meta = partition.meta()
.expect("Failed to read partition metadata");
let fst = meta.fst()
.expect("File system table is invalid");
let meta = partition.meta().expect("Failed to read partition metadata");
let fst = meta.fst().expect("File system table is invalid");
// Find a file by path and read it into a string.
if let Some((_, node)) = fst.find("/MP3/Worlds.txt") {
@ -98,24 +112,106 @@ if let Some((_, node)) = fst.find("/MP3/Worlds.txt") {
Converting a disc image to raw ISO:
```rust
// Enable `rebuild_encryption` to ensure the output is a valid ISO.
let options = nod::OpenOptions { rebuild_encryption: true, ..Default::default() };
let mut disc = nod::Disc::new_with_options("path/to/file.rvz", &options)
.expect("Failed to open disc");
use nod::read::{DiscOptions, DiscReader, PartitionEncryption};
// Read directly from the open disc and write to the output file.
let mut out = std::fs::File::create("output.iso")
let options = DiscOptions {
partition_encryption: PartitionEncryption::Original,
// Use 4 threads to preload data as the disc is read. This can speed up sequential reads,
// especially when the disc image format uses compression.
preloader_threads: 4,
};
// Open a disc image.
let mut disc = DiscReader::new("path/to/file.rvz", &options).expect("Failed to open disc");
// Create a new output file.
let mut out = std::fs::File::create("output.iso").expect("Failed to create output file");
// Read directly from the DiscReader and write to the output file.
// NOTE: Any copy method that accepts `Read` and `Write` can be used here,
// such as `std::io::copy`. This example utilizes `BufRead` for efficiency,
// since `DiscReader` has its own internal buffer.
nod::util::buf_copy(&mut disc, &mut out).expect("Failed to write data");
```
Converting a disc image to RVZ:
```rust
use std::fs::File;
use std::io::{Seek, Write};
use nod::common::{Compression, Format};
use nod::read::{DiscOptions, DiscReader, PartitionEncryption};
use nod::write::{DiscWriter, DiscWriterWeight, FormatOptions, ProcessOptions};
let open_options = DiscOptions {
partition_encryption: PartitionEncryption::Original,
// Use 4 threads to preload data as the disc is read. This can speed up sequential reads,
// especially when the disc image format uses compression.
preloader_threads: 4,
};
// Open a disc image.
let disc = DiscReader::new("path/to/file.iso", &open_options)
.expect("Failed to open disc");
// Create a new output file.
let mut output_file = File::create("output.rvz")
.expect("Failed to create output file");
std::io::copy(&mut disc, &mut out)
.expect("Failed to write data");
let options = FormatOptions {
format: Format::Rvz,
compression: Compression::Zstandard(19),
block_size: Format::Rvz.default_block_size(),
};
// Create a disc writer with the desired output format.
let mut writer = DiscWriter::new(disc, &options)
.expect("Failed to create writer");
// Ideally we'd base this on the actual number of CPUs available.
// This is just an example.
let num_threads = match writer.weight() {
DiscWriterWeight::Light => 0,
DiscWriterWeight::Medium => 4,
DiscWriterWeight::Heavy => 12,
};
let process_options = ProcessOptions {
processor_threads: num_threads,
// Enable checksum calculation for the _original_ disc data.
// Digests will be stored in the output file for verification, if supported.
// They will also be returned in the finalization result.
digest_crc32: true,
digest_md5: false, // MD5 is slow, skip it
digest_sha1: true,
digest_xxh64: true,
};
// Start processing the disc image.
let finalization = writer.process(
|data, _progress, _total| {
output_file.write_all(data.as_ref())?;
// One could display progress here, if desired.
Ok(())
},
&process_options
)
.expect("Failed to process disc image");
// Some disc writers calculate data during processing.
// If the finalization returns header data, seek to the beginning of the file and write it.
if !finalization.header.is_empty() {
output_file.rewind()
.expect("Failed to seek");
output_file.write_all(finalization.header.as_ref())
.expect("Failed to write header");
}
output_file.flush().expect("Failed to flush output file");
// Display the calculated digests.
println!("CRC32: {:08X}", finalization.crc32.unwrap());
// ...
```
## License
Licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
- Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or <http://www.apache.org/licenses/LICENSE-2.0>)
- MIT license ([LICENSE-MIT](LICENSE-MIT) or <http://opensource.org/licenses/MIT>)
at your option.

188
deny.toml
View File

@ -9,6 +9,11 @@
# The values provided in this template are the default values that will be used
# when any section or field is not specified in your own configuration
# Root options
# The graph table configures how the dependency graph is constructed and thus
# which crates the checks are performed against
[graph]
# If 1 or more target triples (and optionally, target_features) are specified,
# only the specified targets will be checked when running `cargo deny check`.
# This means, if a particular package is only ever used as a target specific
@ -20,83 +25,81 @@
targets = [
# The triple can be any string, but only the target triples built in to
# rustc (as of 1.40) can be checked against actual config expressions
#{ triple = "x86_64-unknown-linux-musl" },
#"x86_64-unknown-linux-musl",
# You can also specify which target_features you promise are enabled for a
# particular target. target_features are currently not validated against
# the actual valid features supported by the target architecture.
#{ triple = "wasm32-unknown-unknown", features = ["atomics"] },
]
# When creating the dependency graph used as the source of truth when checks are
# executed, this field can be used to prune crates from the graph, removing them
# from the view of cargo-deny. This is an extremely heavy hammer, as if a crate
# is pruned from the graph, all of its dependencies will also be pruned unless
# they are connected to another crate in the graph that hasn't been pruned,
# so it should be used with care. The identifiers are [Package ID Specifications]
# (https://doc.rust-lang.org/cargo/reference/pkgid-spec.html)
#exclude = []
# If true, metadata will be collected with `--all-features`. Note that this can't
# be toggled off if true, if you want to conditionally enable `--all-features` it
# is recommended to pass `--all-features` on the cmd line instead
all-features = false
# If true, metadata will be collected with `--no-default-features`. The same
# caveat with `all-features` applies
no-default-features = false
# If set, these feature will be enabled when collecting metadata. If `--features`
# is specified on the cmd line they will take precedence over this option.
#features = []
# The output table provides options for how/if diagnostics are outputted
[output]
# When outputting inclusion graphs in diagnostics that include features, this
# option can be used to specify the depth at which feature edges will be added.
# This option is included since the graphs can be quite large and the addition
# of features from the crate(s) to all of the graph roots can be far too verbose.
# This option can be overridden via `--feature-depth` on the cmd line
feature-depth = 1
# This section is considered when running `cargo deny check advisories`
# More documentation for the advisories section can be found here:
# https://embarkstudios.github.io/cargo-deny/checks/advisories/cfg.html
[advisories]
# The path where the advisory database is cloned/fetched into
db-path = "~/.cargo/advisory-db"
# The path where the advisory databases are cloned/fetched into
#db-path = "$CARGO_HOME/advisory-dbs"
# The url(s) of the advisory databases to use
db-urls = ["https://github.com/rustsec/advisory-db"]
# The lint level for security vulnerabilities
vulnerability = "deny"
# The lint level for unmaintained crates
unmaintained = "warn"
# The lint level for crates that have been yanked from their source registry
yanked = "warn"
# The lint level for crates with security notices. Note that as of
# 2019-12-17 there are no security notice advisories in
# https://github.com/rustsec/advisory-db
notice = "warn"
#db-urls = ["https://github.com/rustsec/advisory-db"]
# A list of advisory IDs to ignore. Note that ignored advisories will still
# output a note when they are encountered.
ignore = [
#"RUSTSEC-0000-0000",
#{ id = "RUSTSEC-0000-0000", reason = "you can specify a reason the advisory is ignored" },
#"a-crate-that-is-yanked@0.1.1", # you can also ignore yanked crate versions if you wish
#{ crate = "a-crate-that-is-yanked@0.1.1", reason = "you can specify why you are ignoring the yanked crate" },
]
# Threshold for security vulnerabilities, any vulnerability with a CVSS score
# lower than the range specified will be ignored. Note that ignored advisories
# will still output a note when they are encountered.
# * None - CVSS Score 0.0
# * Low - CVSS Score 0.1 - 3.9
# * Medium - CVSS Score 4.0 - 6.9
# * High - CVSS Score 7.0 - 8.9
# * Critical - CVSS Score 9.0 - 10.0
#severity-threshold =
# If this is true, then cargo deny will use the git executable to fetch advisory database.
# If this is false, then it uses a built-in git library.
# Setting this to true can be helpful if you have special authentication requirements that cargo-deny does not support.
# See Git Authentication for more information about setting up git authentication.
#git-fetch-with-cli = true
# This section is considered when running `cargo deny check licenses`
# More documentation for the licenses section can be found here:
# https://embarkstudios.github.io/cargo-deny/checks/licenses/cfg.html
[licenses]
# The lint level for crates which do not have a detectable license
unlicensed = "deny"
# List of explictly allowed licenses
# List of explicitly allowed licenses
# See https://spdx.org/licenses/ for list of possible licenses
# [possible values: any SPDX 3.11 short identifier (+ optional exception)].
allow = [
"MIT",
"0BSD",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"Unicode-DFS-2016",
"BSL-1.0",
"ISC",
"MIT",
"Unicode-3.0",
"Zlib",
"bzip2-1.0.6",
]
# List of explictly disallowed licenses
# See https://spdx.org/licenses/ for list of possible licenses
# [possible values: any SPDX 3.11 short identifier (+ optional exception)].
deny = [
#"Nokia",
]
# Lint level for licenses considered copyleft
copyleft = "warn"
# Blanket approval or denial for OSI-approved or FSF Free/Libre licenses
# * both - The license will be approved if it is both OSI-approved *AND* FSF
# * either - The license will be approved if it is either OSI-approved *OR* FSF
# * osi-only - The license will be approved if is OSI-approved *AND NOT* FSF
# * fsf-only - The license will be approved if is FSF *AND NOT* OSI-approved
# * neither - This predicate is ignored and the default lint level is used
allow-osi-fsf-free = "neither"
# Lint level used when no other predicates are matched
# 1. License isn't in the allow or deny lists
# 2. License isn't copyleft
# 3. License isn't OSI/FSF, or allow-osi-fsf-free = "neither"
default = "deny"
# The confidence threshold for detecting a license from license text.
# The higher the value, the more closely the license text must be to the
# canonical license text of a valid SPDX license file.
@ -107,32 +110,32 @@ confidence-threshold = 0.8
exceptions = [
# Each entry is the crate and version constraint, and its specific allow
# list
#{ allow = ["Zlib"], name = "adler32", version = "*" },
#{ allow = ["Zlib"], crate = "adler32" },
]
# Some crates don't have (easily) machine readable licensing information,
# adding a clarification entry for it allows you to manually specify the
# licensing information
[[licenses.clarify]]
# The name of the crate the clarification applies to
name = "encoding_rs"
# The optional version constraint for the crate
#version = "*"
#[[licenses.clarify]]
# The package spec the clarification applies to
#crate = "ring"
# The SPDX expression for the license requirements of the crate
expression = "(Apache-2.0 OR MIT) AND BSD-3-Clause"
#expression = "MIT AND ISC AND OpenSSL"
# One or more files in the crate's source used as the "source of truth" for
# the license expression. If the contents match, the clarification will be used
# when running the license check, otherwise the clarification will be ignored
# and the crate will be checked normally, which may produce warnings or errors
# depending on the rest of your configuration
license-files = [
# Each entry is a crate relative path, and the (opaque) hash of its contents
{ path = "COPYRIGHT", hash = 0x39f8ad31 }
]
#license-files = [
# Each entry is a crate relative path, and the (opaque) hash of its contents
#{ path = "LICENSE", hash = 0xbd0eed23 }
#]
[licenses.private]
# If true, ignores workspace crates that aren't published, or are only
# published to private registries
# published to private registries.
# To see how to mark a crate as unpublished (to the official registry),
# visit https://doc.rust-lang.org/cargo/reference/manifest.html#the-publish-field.
ignore = false
# One or more private registries that you might publish crates to, if a crate
# is only published to private registries, and ignore is true, the crate will
@ -146,7 +149,7 @@ registries = [
# https://embarkstudios.github.io/cargo-deny/checks/bans/cfg.html
[bans]
# Lint level for when multiple versions of the same crate are detected
multiple-versions = "warn"
multiple-versions = "allow"
# Lint level for when a crate version requirement is `*`
wildcards = "allow"
# The graph highlighting used when creating dotgraphs for crates
@ -155,30 +158,63 @@ wildcards = "allow"
# * simplest-path - The path to the version with the fewest edges is highlighted
# * all - Both lowest-version and simplest-path are used
highlight = "all"
# The default lint level for `default` features for crates that are members of
# the workspace that is being checked. This can be overridden by allowing/denying
# `default` on a crate-by-crate basis if desired.
workspace-default-features = "allow"
# The default lint level for `default` features for external crates that are not
# members of the workspace. This can be overridden by allowing/denying `default`
# on a crate-by-crate basis if desired.
external-default-features = "allow"
# List of crates that are allowed. Use with care!
allow = [
#{ name = "ansi_term", version = "=0.11.0" },
#"ansi_term@0.11.0",
#{ crate = "ansi_term@0.11.0", reason = "you can specify a reason it is allowed" },
]
# List of crates to deny
deny = [
# Each entry the name of a crate and a version range. If version is
# not specified, all versions will be matched.
#{ name = "ansi_term", version = "=0.11.0" },
#
#"ansi_term@0.11.0",
#{ crate = "ansi_term@0.11.0", reason = "you can specify a reason it is banned" },
# Wrapper crates can optionally be specified to allow the crate when it
# is a direct dependency of the otherwise banned crate
#{ name = "ansi_term", version = "=0.11.0", wrappers = [] },
#{ crate = "ansi_term@0.11.0", wrappers = ["this-crate-directly-depends-on-ansi_term"] },
]
# List of features to allow/deny
# Each entry the name of a crate and a version range. If version is
# not specified, all versions will be matched.
#[[bans.features]]
#crate = "reqwest"
# Features to not allow
#deny = ["json"]
# Features to allow
#allow = [
# "rustls",
# "__rustls",
# "__tls",
# "hyper-rustls",
# "rustls",
# "rustls-pemfile",
# "rustls-tls-webpki-roots",
# "tokio-rustls",
# "webpki-roots",
#]
# If true, the allowed features must exactly match the enabled feature set. If
# this is set there is no point setting `deny`
#exact = true
# Certain crates/versions that will be skipped when doing duplicate detection.
skip = [
#{ name = "ansi_term", version = "=0.11.0" },
#"ansi_term@0.11.0",
#{ crate = "ansi_term@0.11.0", reason = "you can specify a reason why it can't be updated/removed" },
]
# Similarly to `skip` allows you to skip certain crates during duplicate
# detection. Unlike skip, it also includes the entire tree of transitive
# dependencies starting at the specified crate, up to a certain depth, which is
# by default infinite
# by default infinite.
skip-tree = [
#{ name = "ansi_term", version = "=0.11.0", depth = 20 },
#"ansi_term@0.11.0", # will be skipped along with _all_ of its direct and transitive dependencies
#{ crate = "ansi_term@0.11.0", depth = 20 },
]
# This section is considered when running `cargo deny check sources`.
@ -198,9 +234,9 @@ allow-registry = ["https://github.com/rust-lang/crates.io-index"]
allow-git = []
[sources.allow-org]
# 1 or more github.com organizations to allow git sources for
github = ["encounter"]
# 1 or more gitlab.com organizations to allow git sources for
#gitlab = [""]
# 1 or more bitbucket.org organizations to allow git sources for
#bitbucket = [""]
# github.com organizations to allow git sources for
github = []
# gitlab.com organizations to allow git sources for
gitlab = []
# bitbucket.org organizations to allow git sources for
bitbucket = []

View File

@ -1,42 +1,54 @@
[package]
name = "nod"
version = "1.2.0"
edition = "2021"
rust-version = "1.73.0"
authors = ["Luke Street <luke@street.dev>"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/encounter/nod-rs"
version.workspace = true
edition.workspace = true
rust-version.workspace = true
authors.workspace = true
license.workspace = true
repository.workspace = true
documentation = "https://docs.rs/nod"
readme = "../README.md"
description = """
Library for reading GameCube and Wii disc images.
Library for reading and writing GameCube and Wii disc images.
"""
keywords = ["gamecube", "wii", "iso", "wbfs", "rvz"]
keywords.workspace = true
categories = ["command-line-utilities", "parser-implementations"]
[features]
default = ["compress-bzip2", "compress-lzma", "compress-zlib", "compress-zstd"]
asm = ["sha1/asm"]
compress-bzip2 = ["bzip2"]
compress-lzma = ["liblzma"]
compress-lzma = ["liblzma", "liblzma-sys"]
compress-zlib = ["adler", "miniz_oxide"]
compress-zstd = ["zstd"]
compress-zstd = ["zstd", "zstd-safe"]
openssl = ["dep:openssl"]
openssl-vendored = ["openssl", "openssl/vendored"]
[dependencies]
adler = { version = "1.0", optional = true }
aes = "0.8"
aes = "0.9.0-rc.0"
base16ct = "0.2"
bzip2 = { version = "0.4", features = ["static"], optional = true }
cbc = "0.1"
digest = "0.10"
bytes = "1.10"
bzip2 = { version = "0.6", features = ["static"], optional = true }
cbc = "0.2.0-rc.0"
crc32fast = "1.5"
crossbeam-channel = "0.5"
crossbeam-utils = "0.8"
digest = { workspace = true }
dyn-clone = "1.0"
encoding_rs = "0.8"
itertools = "0.12"
liblzma = { version = "0.2", features = ["static"], optional = true }
log = "0.4"
miniz_oxide = { version = "0.7", optional = true }
rayon = "1.8"
sha1 = "0.10"
thiserror = "1.0"
zerocopy = { version = "0.7", features = ["alloc", "derive"] }
zstd = { version = "0.13", optional = true }
itertools = "0.14"
liblzma = { version = "0.4", features = ["static"], optional = true }
liblzma-sys = { version = "0.4", features = ["static"], optional = true }
lru = "0.16"
md-5 = { workspace = true }
miniz_oxide = { version = "0.8", optional = true }
openssl = { version = "0.10", optional = true }
polonius-the-crab = "0.4"
sha1 = { workspace = true }
simple_moving_average = "1.0"
thiserror = "2.0"
tracing = { workspace = true }
xxhash-rust = { version = "0.8", features = ["xxh64"] }
zerocopy = { workspace = true }
zstd = { version = "0.13", optional = true, default-features = false }
zstd-safe = { version = "7.2", optional = true, default-features = false }

835
nod/src/build/gc.rs Normal file
View File

@ -0,0 +1,835 @@
#![allow(missing_docs, unused)] // TODO
use std::{
io,
io::{Read, Seek, Write},
sync::Arc,
};
use tracing::debug;
use zerocopy::{FromZeros, IntoBytes};
use crate::{
Error, Result, ResultContext,
disc::{
BI2_SIZE, BOOT_SIZE, BootHeader, DiscHeader, GCN_MAGIC, MINI_DVD_SIZE, SECTOR_SIZE,
WII_MAGIC,
fst::{Fst, FstBuilder},
},
read::{CloneableStream, DiscStream, NonCloneableStream},
util::{Align, array_ref, array_ref_mut, lfg::LaggedFibonacci},
};
pub trait FileCallback: Send {
fn read_file(&mut self, out: &mut [u8], name: &str, offset: u64) -> io::Result<()>;
}
#[derive(Debug, Clone)]
pub struct FileInfo {
pub name: String,
pub size: u64,
pub offset: Option<u64>,
pub alignment: Option<u32>,
}
pub struct GCPartitionBuilder {
disc_header: Box<DiscHeader>,
boot_header: Box<BootHeader>,
user_files: Vec<FileInfo>,
overrides: PartitionOverrides,
junk_files: Vec<String>,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum WriteKind {
File(String),
Static(Arc<[u8]>, &'static str),
Junk,
}
impl WriteKind {
fn name(&self) -> &str {
match self {
WriteKind::File(name) => name,
WriteKind::Static(_, name) => name,
WriteKind::Junk => "[junk data]",
}
}
}
#[derive(Debug, Clone)]
pub struct WriteInfo {
pub kind: WriteKind,
pub size: u64,
pub offset: u64,
}
pub struct GCPartitionWriter {
write_info: Vec<WriteInfo>,
disc_size: u64,
disc_id: [u8; 4],
disc_num: u8,
}
const BI2_OFFSET: u64 = BOOT_SIZE as u64;
const APPLOADER_OFFSET: u64 = BI2_OFFSET + BI2_SIZE as u64;
#[derive(Debug, Clone, Default)]
pub struct PartitionOverrides {
pub game_id: Option<[u8; 6]>,
pub game_title: Option<String>,
pub disc_num: Option<u8>,
pub disc_version: Option<u8>,
pub audio_streaming: Option<bool>,
pub audio_stream_buf_size: Option<u8>,
pub junk_id: Option<[u8; 4]>,
pub region: Option<u8>,
}
impl GCPartitionBuilder {
pub fn new(is_wii: bool, overrides: PartitionOverrides) -> Self {
let mut disc_header = DiscHeader::new_box_zeroed().unwrap();
if is_wii {
disc_header.gcn_magic = [0u8; 4];
disc_header.wii_magic = WII_MAGIC;
} else {
disc_header.gcn_magic = GCN_MAGIC;
disc_header.wii_magic = [0u8; 4];
}
Self {
disc_header,
boot_header: BootHeader::new_box_zeroed().unwrap(),
user_files: Vec::new(),
overrides,
junk_files: Vec::new(),
}
}
#[inline]
pub fn set_disc_header(&mut self, disc_header: Box<DiscHeader>) {
self.disc_header = disc_header;
}
#[inline]
pub fn set_boot_header(&mut self, boot_header: Box<BootHeader>) {
self.boot_header = boot_header;
}
pub fn add_file(&mut self, info: FileInfo) -> Result<()> {
if let (Some(offset), Some(alignment)) = (info.offset, info.alignment) {
if offset % alignment as u64 != 0 {
return Err(Error::Other(format!(
"File {} offset {:#X} is not aligned to {}",
info.name, offset, alignment
)));
}
}
self.user_files.push(info);
Ok(())
}
/// A junk file exists in the FST, but is excluded from the disc layout, so junk data will be
/// written in its place.
pub fn add_junk_file(&mut self, name: String) { self.junk_files.push(name); }
pub fn build(
&self,
sys_file_callback: impl FnMut(&mut dyn Write, &str) -> io::Result<()>,
) -> Result<GCPartitionWriter> {
let mut layout = GCPartitionLayout::new(self);
layout.locate_sys_files(sys_file_callback)?;
layout.apply_overrides(&self.overrides)?;
let write_info = layout.layout_files()?;
let disc_size =
layout.boot_header.user_offset.get() as u64 + layout.boot_header.user_size.get() as u64;
let junk_id = layout.junk_id();
Ok(GCPartitionWriter::new(write_info, disc_size, junk_id, self.disc_header.disc_num))
}
}
struct GCPartitionLayout {
disc_header: Box<DiscHeader>,
boot_header: Box<BootHeader>,
user_files: Vec<FileInfo>,
apploader_file: Option<FileInfo>,
dol_file: Option<FileInfo>,
raw_fst: Option<Box<[u8]>>,
raw_bi2: Option<Box<[u8]>>,
junk_id: Option<[u8; 4]>,
junk_files: Vec<String>,
}
impl GCPartitionLayout {
fn new(builder: &GCPartitionBuilder) -> Self {
GCPartitionLayout {
disc_header: builder.disc_header.clone(),
boot_header: builder.boot_header.clone(),
user_files: builder.user_files.clone(),
apploader_file: None,
dol_file: None,
raw_fst: None,
raw_bi2: None,
junk_id: builder.overrides.junk_id,
junk_files: builder.junk_files.clone(),
}
}
fn locate_sys_files(
&mut self,
mut file_callback: impl FnMut(&mut dyn Write, &str) -> io::Result<()>,
) -> Result<()> {
let mut handled = vec![false; self.user_files.len()];
// Locate fixed offset system files
for (info, handled) in self.user_files.iter().zip(handled.iter_mut()) {
if info.offset == Some(0) || info.name == "sys/boot.bin" {
let mut data = Vec::with_capacity(BOOT_SIZE);
file_callback(&mut data, &info.name)
.with_context(|| format!("Failed to read file {}", info.name))?;
if data.len() != BOOT_SIZE {
return Err(Error::Other(format!(
"Boot file {} is {} bytes, expected {}",
info.name,
data.len(),
BOOT_SIZE
)));
}
self.disc_header.as_mut_bytes().copy_from_slice(&data[..size_of::<DiscHeader>()]);
self.boot_header.as_mut_bytes().copy_from_slice(&data[size_of::<DiscHeader>()..]);
*handled = true;
continue;
}
if info.offset == Some(BI2_OFFSET) || info.name == "sys/bi2.bin" {
let mut data = Vec::with_capacity(BI2_SIZE);
file_callback(&mut data, &info.name)
.with_context(|| format!("Failed to read file {}", info.name))?;
if data.len() != BI2_SIZE {
return Err(Error::Other(format!(
"BI2 file {} is {} bytes, expected {}",
info.name,
data.len(),
BI2_SIZE
)));
}
self.raw_bi2 = Some(data.into_boxed_slice());
*handled = true;
continue;
}
if info.offset == Some(APPLOADER_OFFSET) || info.name == "sys/apploader.img" {
self.apploader_file = Some(info.clone());
*handled = true;
continue;
}
}
// Locate other system files
let is_wii = self.disc_header.is_wii();
for (info, handled) in self.user_files.iter().zip(handled.iter_mut()) {
let dol_offset = self.boot_header.dol_offset(is_wii);
if (dol_offset != 0 && info.offset == Some(dol_offset)) || info.name == "sys/main.dol" {
let mut info = info.clone();
if info.alignment.is_none() {
info.alignment = Some(128);
}
self.dol_file = Some(info);
*handled = true; // TODO DOL in user data
continue;
}
let fst_offset = self.boot_header.fst_offset(is_wii);
if (fst_offset != 0 && info.offset == Some(fst_offset)) || info.name == "sys/fst.bin" {
let mut data = Vec::with_capacity(info.size as usize);
file_callback(&mut data, &info.name)
.with_context(|| format!("Failed to read file {}", info.name))?;
if data.len() != info.size as usize {
return Err(Error::Other(format!(
"FST file {} is {} bytes, expected {}",
info.name,
data.len(),
info.size
)));
}
self.raw_fst = Some(data.into_boxed_slice());
*handled = true;
continue;
}
}
// Remove handled files
let mut iter = handled.iter();
self.user_files.retain(|_| !iter.next().unwrap());
Ok(())
}
fn apply_overrides(&mut self, overrides: &PartitionOverrides) -> Result<()> {
if let Some(game_id) = overrides.game_id {
self.disc_header.game_id.copy_from_slice(&game_id);
}
if let Some(game_title) = overrides.game_title.as_ref() {
let max_size = self.disc_header.game_title.len() - 1; // nul terminator
if game_title.len() > max_size {
return Err(Error::Other(format!(
"Game title \"{}\" is too long ({} > {})",
game_title,
game_title.len(),
max_size
)));
}
let len = game_title.len().min(max_size);
self.disc_header.game_title[..len].copy_from_slice(&game_title.as_bytes()[..len]);
}
if let Some(disc_num) = overrides.disc_num {
self.disc_header.disc_num = disc_num;
}
if let Some(disc_version) = overrides.disc_version {
self.disc_header.disc_version = disc_version;
}
if let Some(audio_streaming) = overrides.audio_streaming {
self.disc_header.audio_streaming = audio_streaming as u8;
}
if let Some(audio_stream_buf_size) = overrides.audio_stream_buf_size {
self.disc_header.audio_stream_buf_size = audio_stream_buf_size;
}
let set_bi2 = self.raw_bi2.is_none() || overrides.region.is_some();
let raw_bi2 = self.raw_bi2.get_or_insert_with(|| {
<[u8]>::new_box_zeroed_with_elems(BI2_SIZE).expect("Failed to allocate BI2")
});
if set_bi2 {
let region = overrides.region.unwrap_or(0xFF) as u32;
*array_ref_mut![raw_bi2, 0x18, 4] = region.to_be_bytes();
}
Ok(())
}
fn can_use_orig_fst(&self) -> bool {
if let Some(existing) = self.raw_fst.as_deref() {
let Ok(existing_fst) = Fst::new(existing) else {
return false;
};
for (_, node, path) in existing_fst.iter() {
if node.is_dir() {
continue;
}
if !self.user_files.iter().any(|info| info.name == path)
&& !self.junk_files.contains(&path)
{
println!("FST file {} not found", path);
return false;
}
}
println!("Using existing FST");
return true;
}
false
}
fn calculate_fst_size(&self) -> Result<u64> {
if self.can_use_orig_fst() {
return Ok(self.raw_fst.as_deref().unwrap().len() as u64);
}
let mut file_names = Vec::with_capacity(self.user_files.len());
for info in &self.user_files {
file_names.push(info.name.as_str());
}
// file_names.sort_unstable();
let is_wii = self.disc_header.is_wii();
let mut builder = if let Some(existing) = self.raw_fst.as_deref() {
let existing_fst = Fst::new(existing)?;
FstBuilder::new_with_string_table(is_wii, Vec::from(existing_fst.string_table))?
} else {
FstBuilder::new(is_wii)
};
for name in file_names {
builder.add_file(name, 0, 0);
}
let size = builder.byte_size() as u64;
// if size != self.partition_header.fst_size(is_wii) {
// return Err(Error::Other(format!(
// "FST size {} != {}",
// size,
// self.partition_header.fst_size(is_wii)
// )));
// }
Ok(size)
}
fn generate_fst(&mut self, write_info: &[WriteInfo]) -> Result<Arc<[u8]>> {
if self.can_use_orig_fst() {
let fst_data = self.raw_fst.as_ref().unwrap().clone();
// TODO update offsets and sizes
// let node_count = Fst::new(fst_data.as_ref())?.nodes.len();
// let string_base = node_count * size_of::<Node>();
// let (node_buf, string_table) = fst_data.split_at_mut(string_base);
// let nodes = <[Node]>::mut_from_bytes(node_buf).unwrap();
return Ok(Arc::from(fst_data));
}
let files = write_info.to_vec();
// files.sort_unstable_by(|a, b| a.name.cmp(&b.name));
let is_wii = self.disc_header.is_wii();
let mut builder = if let Some(existing) = self.raw_fst.as_deref() {
let existing_fst = Fst::new(existing)?;
FstBuilder::new_with_string_table(is_wii, Vec::from(existing_fst.string_table))?
} else {
FstBuilder::new(is_wii)
};
for info in files {
if let WriteKind::File(name) = info.kind {
builder.add_file(&name, info.offset, info.size as u32);
}
}
let raw_fst = builder.finalize();
if raw_fst.len() != self.boot_header.fst_size(is_wii) as usize {
return Err(Error::Other(format!(
"FST size mismatch: {} != {}",
raw_fst.len(),
self.boot_header.fst_size(is_wii)
)));
}
Ok(Arc::from(raw_fst))
}
fn layout_system_data(&mut self, write_info: &mut Vec<WriteInfo>) -> Result<u64> {
let mut last_offset = 0;
let Some(apploader_file) = self.apploader_file.as_ref() else {
return Err(Error::Other("Apploader not set".to_string()));
};
let Some(dol_file) = self.dol_file.as_ref() else {
return Err(Error::Other("DOL not set".to_string()));
};
let Some(raw_bi2) = self.raw_bi2.as_ref() else {
return Err(Error::Other("BI2 not set".to_string()));
};
// let Some(raw_fst) = self.raw_fst.as_ref() else {
// return Err(Error::Other("FST not set".to_string()));
// };
let mut boot = <[u8]>::new_box_zeroed_with_elems(BOOT_SIZE)?;
boot[..size_of::<DiscHeader>()].copy_from_slice(self.disc_header.as_bytes());
boot[size_of::<DiscHeader>()..].copy_from_slice(self.boot_header.as_bytes());
write_info.push(WriteInfo {
kind: WriteKind::Static(Arc::from(boot), "[BOOT]"),
size: BOOT_SIZE as u64,
offset: last_offset,
});
last_offset += BOOT_SIZE as u64;
write_info.push(WriteInfo {
kind: WriteKind::Static(Arc::from(raw_bi2.as_ref()), "[BI2]"),
size: BI2_SIZE as u64,
offset: last_offset,
});
last_offset += BI2_SIZE as u64;
write_info.push(WriteInfo {
kind: WriteKind::File(apploader_file.name.clone()),
size: apploader_file.size,
offset: last_offset,
});
last_offset += apploader_file.size;
// Update DOL and FST offsets if not set
let is_wii = self.disc_header.is_wii();
let mut dol_offset = self.boot_header.dol_offset(is_wii);
if dol_offset == 0 {
dol_offset = last_offset.align_up(dol_file.alignment.unwrap() as u64);
self.boot_header.set_dol_offset(dol_offset, is_wii);
}
let mut fst_offset = self.boot_header.fst_offset(is_wii);
if fst_offset == 0 {
// TODO handle DOL in user data
fst_offset = (dol_offset + dol_file.size).align_up(128);
self.boot_header.set_fst_offset(fst_offset, is_wii);
}
let fst_size = self.calculate_fst_size()?;
self.boot_header.set_fst_size(fst_size, is_wii);
if self.boot_header.fst_max_size(is_wii) < fst_size {
self.boot_header.set_fst_max_size(fst_size, is_wii);
}
if dol_offset < fst_offset {
write_info.push(WriteInfo {
kind: WriteKind::File(dol_file.name.clone()),
size: dol_file.size,
offset: dol_offset,
});
} else {
// DOL in user data
}
// write_info.push(WriteInfo {
// kind: WriteKind::Static(Arc::from(raw_fst.as_ref()), "[FST]"),
// size: fst_size,
// offset: fst_offset,
// });
Ok(fst_offset + fst_size)
}
fn layout_files(&mut self) -> Result<Vec<WriteInfo>> {
let mut system_write_info = Vec::new();
let mut write_info = Vec::with_capacity(self.user_files.len());
let mut last_offset = self.layout_system_data(&mut system_write_info)?;
// Layout user data
let mut user_offset = self.boot_header.user_offset.get() as u64;
if user_offset == 0 {
user_offset = last_offset.align_up(SECTOR_SIZE as u64);
self.boot_header.user_offset.set(user_offset as u32);
} else if user_offset < last_offset {
return Err(Error::Other(format!(
"User offset {:#X} is before FST {:#X}",
user_offset, last_offset
)));
}
last_offset = user_offset;
for info in &self.user_files {
let offset = info
.offset
.unwrap_or_else(|| last_offset.align_up(info.alignment.unwrap_or(32) as u64));
write_info.push(WriteInfo {
kind: WriteKind::File(info.name.clone()),
offset,
size: info.size,
});
last_offset = offset + info.size;
}
// Generate FST from only user files
let is_wii = self.disc_header.is_wii();
let fst_data = self.generate_fst(&write_info)?;
let fst_size = fst_data.len() as u64;
write_info.push(WriteInfo {
kind: WriteKind::Static(fst_data, "[FST]"),
size: fst_size,
offset: self.boot_header.fst_offset(is_wii),
});
// Add system files to write info
write_info.extend(system_write_info);
// Sort files by offset
sort_files(&mut write_info)?;
// Update user size if not set
if self.boot_header.user_size.get() == 0 {
let user_end = if self.disc_header.is_wii() {
last_offset.align_up(SECTOR_SIZE as u64)
} else {
MINI_DVD_SIZE
};
self.boot_header.user_size.set((user_end - user_offset) as u32);
}
// Insert junk data
let write_info = insert_junk_data(write_info, &self.boot_header);
Ok(write_info)
}
fn junk_id(&self) -> [u8; 4] {
self.junk_id.unwrap_or_else(|| *array_ref![self.disc_header.game_id, 0, 4])
}
}
pub(crate) fn insert_junk_data(
write_info: Vec<WriteInfo>,
boot_header: &BootHeader,
) -> Vec<WriteInfo> {
let mut new_write_info = Vec::with_capacity(write_info.len());
let fst_end = boot_header.fst_offset(false) + boot_header.fst_size(false);
let file_gap = find_file_gap(&write_info, fst_end);
let mut last_file_end = 0;
for info in write_info {
if let WriteKind::File(..) | WriteKind::Static(..) = &info.kind {
let aligned_end = gcm_align(last_file_end);
if info.offset > aligned_end && last_file_end >= fst_end {
// Junk data is aligned to 4 bytes with a 28 byte padding (aka `(n + 31) & !3`)
// but a few cases don't have the 28 byte padding. Namely, the junk data after the
// FST, and the junk data in between the inner and outer rim files. This attempts to
// determine the correct alignment, but is not 100% accurate.
let junk_start = if file_gap == Some(last_file_end) {
last_file_end.align_up(4)
} else {
aligned_end
};
new_write_info.push(WriteInfo {
kind: WriteKind::Junk,
size: info.offset - junk_start,
offset: junk_start,
});
}
last_file_end = info.offset + info.size;
}
new_write_info.push(info);
}
let aligned_end = gcm_align(last_file_end);
let user_end = boot_header.user_offset.get() as u64 + boot_header.user_size.get() as u64;
if aligned_end < user_end && aligned_end >= fst_end {
new_write_info.push(WriteInfo {
kind: WriteKind::Junk,
size: user_end - aligned_end,
offset: aligned_end,
});
}
new_write_info
}
impl GCPartitionWriter {
fn new(write_info: Vec<WriteInfo>, disc_size: u64, disc_id: [u8; 4], disc_num: u8) -> Self {
Self { write_info, disc_size, disc_id, disc_num }
}
pub fn write_to<W>(
&self,
out: &mut W,
mut file_callback: impl FnMut(&mut dyn Write, &str) -> io::Result<()>,
) -> Result<()>
where
W: Write + ?Sized,
{
let mut out = WriteCursor { inner: out, position: 0 };
let mut lfg = LaggedFibonacci::default();
for info in &self.write_info {
out.write_zeroes_until(info.offset).context("Writing padding")?;
match &info.kind {
WriteKind::File(name) => file_callback(&mut out, name)
.with_context(|| format!("Writing file {}", name))?,
WriteKind::Static(data, name) => out.write_all(data).with_context(|| {
format!("Writing static data {} ({} bytes)", name, data.len())
})?,
WriteKind::Junk => {
lfg.write_sector_chunked(
&mut out,
info.size,
self.disc_id,
self.disc_num,
info.offset,
)
.with_context(|| {
format!(
"Writing junk data at {:X} -> {:X}",
info.offset,
info.offset + info.size
)
})?;
}
};
if out.position != info.offset + info.size {
return Err(Error::Other(format!(
"File {}: Wrote {} bytes, expected {}",
info.kind.name(),
out.position - info.offset,
info.size
)));
}
}
out.write_zeroes_until(self.disc_size).context("Writing end of file")?;
out.flush().context("Flushing output")?;
Ok(())
}
pub fn into_cloneable_stream<Cb>(self, file_callback: Cb) -> Result<Box<dyn DiscStream>>
where Cb: FileCallback + Clone + 'static {
Ok(Box::new(CloneableStream::new(GCPartitionStream::new(
file_callback,
Arc::from(self.write_info),
self.disc_size,
self.disc_id,
self.disc_num,
))))
}
pub fn into_non_cloneable_stream<Cb>(self, file_callback: Cb) -> Result<Box<dyn DiscStream>>
where Cb: FileCallback + 'static {
Ok(Box::new(NonCloneableStream::new(GCPartitionStream::new(
file_callback,
Arc::from(self.write_info),
self.disc_size,
self.disc_id,
self.disc_num,
))))
}
}
struct WriteCursor<W> {
inner: W,
position: u64,
}
impl<W> WriteCursor<W>
where W: Write
{
fn write_zeroes_until(&mut self, until: u64) -> io::Result<()> {
static ZEROES: [u8; 0x1000] = [0u8; 0x1000];
let mut remaining = until.saturating_sub(self.position);
while remaining > 0 {
let write_len = remaining.min(ZEROES.len() as u64) as usize;
let written = self.write(&ZEROES[..write_len])?;
remaining -= written as u64;
}
Ok(())
}
}
impl<W> Write for WriteCursor<W>
where W: Write
{
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let len = self.inner.write(buf)?;
self.position += len as u64;
Ok(len)
}
#[inline]
fn flush(&mut self) -> io::Result<()> { self.inner.flush() }
}
#[derive(Clone)]
pub(crate) struct GCPartitionStream<Cb> {
file_callback: Cb,
pos: u64,
write_info: Arc<[WriteInfo]>,
size: u64,
disc_id: [u8; 4],
disc_num: u8,
}
impl<Cb> GCPartitionStream<Cb> {
pub fn new(
file_callback: Cb,
write_info: Arc<[WriteInfo]>,
size: u64,
disc_id: [u8; 4],
disc_num: u8,
) -> Self {
Self { file_callback, pos: 0, write_info, size, disc_id, disc_num }
}
#[inline]
pub fn set_position(&mut self, pos: u64) { self.pos = pos; }
#[inline]
pub fn len(&self) -> u64 { self.size }
}
impl<Cb> Read for GCPartitionStream<Cb>
where Cb: FileCallback
{
fn read(&mut self, out: &mut [u8]) -> io::Result<usize> {
if self.pos >= self.size {
// Out of bounds
return Ok(0);
}
let end = (self.size - self.pos).min(out.len() as u64) as usize;
let mut buf = &mut out[..end];
let mut curr = self
.write_info
.binary_search_by_key(&self.pos, |i| i.offset)
.unwrap_or_else(|idx| idx.saturating_sub(1));
let mut pos = self.pos;
let mut total = 0;
while !buf.is_empty() {
let Some(info) = self.write_info.get(curr) else {
buf.fill(0);
total += buf.len();
break;
};
if pos > info.offset + info.size {
curr += 1;
continue;
}
let read = if pos < info.offset {
let read = buf.len().min((info.offset - pos) as usize);
buf[..read].fill(0);
read
} else {
let read = buf.len().min((info.offset + info.size - pos) as usize);
match &info.kind {
WriteKind::File(name) => {
self.file_callback.read_file(&mut buf[..read], name, pos - info.offset)?;
}
WriteKind::Static(data, _) => {
let offset = (pos - info.offset) as usize;
buf[..read].copy_from_slice(&data[offset..offset + read]);
}
WriteKind::Junk => {
let mut lfg = LaggedFibonacci::default();
lfg.fill_sector_chunked(&mut buf[..read], self.disc_id, self.disc_num, pos);
}
}
curr += 1;
read
};
buf = &mut buf[read..];
pos += read as u64;
total += read;
}
Ok(total)
}
}
impl<Cb> Seek for GCPartitionStream<Cb> {
fn seek(&mut self, pos: io::SeekFrom) -> io::Result<u64> {
self.pos = match pos {
io::SeekFrom::Start(pos) => pos,
io::SeekFrom::End(v) => self.size.saturating_add_signed(v),
io::SeekFrom::Current(v) => self.pos.saturating_add_signed(v),
};
Ok(self.pos)
}
}
#[inline(always)]
fn gcm_align(n: u64) -> u64 { (n + 31) & !3 }
fn sort_files(files: &mut [WriteInfo]) -> Result<()> {
files.sort_unstable_by_key(|info| (info.offset, info.size));
for i in 1..files.len() {
let prev = &files[i - 1];
let cur = &files[i];
if cur.offset < prev.offset + prev.size {
let name = match &cur.kind {
WriteKind::File(name) => name.as_str(),
WriteKind::Static(_, name) => name,
WriteKind::Junk => "[junk data]",
};
let prev_name = match &prev.kind {
WriteKind::File(name) => name.as_str(),
WriteKind::Static(_, name) => name,
WriteKind::Junk => "[junk data]",
};
return Err(Error::Other(format!(
"File {} ({:#X}-{:#X}) overlaps with {} ({:#X}-{:#X})",
name,
cur.offset,
cur.offset + cur.size,
prev_name,
prev.offset,
prev.offset + prev.size
)));
}
}
Ok(())
}
/// Files can be located on the inner rim of the disc (closer to the center) or the outer rim
/// (closer to the edge). The inner rim is slower to read, so developers often configured certain
/// files to be located on the outer rim. This function attempts to find a gap in the file offsets
/// between the inner and outer rim, which we need to recreate junk data properly.
fn find_file_gap(file_infos: &[WriteInfo], fst_end: u64) -> Option<u64> {
let mut last_offset = 0;
for info in file_infos {
if let WriteKind::File(..) | WriteKind::Static(..) = &info.kind {
if last_offset > fst_end && info.offset > last_offset + SECTOR_SIZE as u64 {
debug!("Found file gap at {:X} -> {:X}", last_offset, info.offset);
return Some(last_offset);
}
last_offset = info.offset + info.size;
}
}
None
}

4
nod/src/build/mod.rs Normal file
View File

@ -0,0 +1,4 @@
//! Disc image building.
pub mod gc;
pub mod wii;

1
nod/src/build/wii.rs Normal file
View File

@ -0,0 +1 @@
#![allow(missing_docs)] // TODO

363
nod/src/common.rs Normal file
View File

@ -0,0 +1,363 @@
//! Common types.
use std::{borrow::Cow, fmt, str::FromStr, sync::Arc};
use zerocopy::FromBytes;
use crate::{
Error, Result,
disc::{
BB2_OFFSET, BOOT_SIZE, BootHeader, DebugHeader, DiscHeader, SECTOR_SIZE, fst::Fst,
wii::WiiPartitionHeader,
},
util::array_ref,
};
/// SHA-1 hash bytes
pub type HashBytes = [u8; 20];
/// AES key bytes
pub type KeyBytes = [u8; 16];
/// Magic bytes
pub type MagicBytes = [u8; 4];
/// The disc file format.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
pub enum Format {
/// ISO / GCM (GameCube master disc)
#[default]
Iso,
/// CISO (Compact ISO)
Ciso,
/// GCZ
Gcz,
/// NFS (Wii U VC)
Nfs,
/// RVZ
Rvz,
/// WBFS
Wbfs,
/// WIA
Wia,
/// TGC
Tgc,
}
impl Format {
/// Returns the default block size for the disc format, if any.
pub fn default_block_size(self) -> u32 {
match self {
Format::Ciso => crate::io::ciso::DEFAULT_BLOCK_SIZE,
#[cfg(feature = "compress-zlib")]
Format::Gcz => crate::io::gcz::DEFAULT_BLOCK_SIZE,
Format::Rvz => crate::io::wia::RVZ_DEFAULT_CHUNK_SIZE,
Format::Wbfs => crate::io::wbfs::DEFAULT_BLOCK_SIZE,
Format::Wia => crate::io::wia::WIA_DEFAULT_CHUNK_SIZE,
_ => 0,
}
}
/// Returns the default compression algorithm for the disc format.
pub fn default_compression(self) -> Compression {
match self {
#[cfg(feature = "compress-zlib")]
Format::Gcz => crate::io::gcz::DEFAULT_COMPRESSION,
Format::Rvz => crate::io::wia::RVZ_DEFAULT_COMPRESSION,
Format::Wia => crate::io::wia::WIA_DEFAULT_COMPRESSION,
_ => Compression::None,
}
}
}
impl fmt::Display for Format {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Format::Iso => write!(f, "ISO"),
Format::Ciso => write!(f, "CISO"),
Format::Gcz => write!(f, "GCZ"),
Format::Nfs => write!(f, "NFS"),
Format::Rvz => write!(f, "RVZ"),
Format::Wbfs => write!(f, "WBFS"),
Format::Wia => write!(f, "WIA"),
Format::Tgc => write!(f, "TGC"),
}
}
}
/// The disc file format's compression algorithm.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
pub enum Compression {
/// No compression
#[default]
None,
/// BZIP2
Bzip2(u8),
/// Deflate (GCZ only)
Deflate(u8),
/// LZMA
Lzma(u8),
/// LZMA2
Lzma2(u8),
/// Zstandard
Zstandard(i8),
}
impl Compression {
/// Validates the compression level. Sets the default level if the level is 0.
pub fn validate_level(&mut self) -> Result<()> {
match self {
Compression::Bzip2(level) => {
if *level == 0 {
*level = 9;
}
if *level > 9 {
return Err(Error::Other(format!(
"Invalid BZIP2 compression level: {level} (expected 1-9)"
)));
}
}
Compression::Deflate(level) => {
if *level == 0 {
*level = 9;
}
if *level > 10 {
return Err(Error::Other(format!(
"Invalid Deflate compression level: {level} (expected 1-10)"
)));
}
}
Compression::Lzma(level) => {
if *level == 0 {
*level = 6;
}
if *level > 9 {
return Err(Error::Other(format!(
"Invalid LZMA compression level: {level} (expected 1-9)"
)));
}
}
Compression::Lzma2(level) => {
if *level == 0 {
*level = 6;
}
if *level > 9 {
return Err(Error::Other(format!(
"Invalid LZMA2 compression level: {level} (expected 1-9)"
)));
}
}
Compression::Zstandard(level) => {
if *level == 0 {
*level = 19;
}
if *level < -22 || *level > 22 {
return Err(Error::Other(format!(
"Invalid Zstandard compression level: {level} (expected -22 to 22)"
)));
}
}
_ => {}
}
Ok(())
}
}
impl fmt::Display for Compression {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Compression::None => write!(f, "None"),
Compression::Bzip2(level) => {
if *level == 0 {
write!(f, "BZIP2")
} else {
write!(f, "BZIP2 ({level})")
}
}
Compression::Deflate(level) => {
if *level == 0 {
write!(f, "Deflate")
} else {
write!(f, "Deflate ({level})")
}
}
Compression::Lzma(level) => {
if *level == 0 {
write!(f, "LZMA")
} else {
write!(f, "LZMA ({level})")
}
}
Compression::Lzma2(level) => {
if *level == 0 {
write!(f, "LZMA2")
} else {
write!(f, "LZMA2 ({level})")
}
}
Compression::Zstandard(level) => {
if *level == 0 {
write!(f, "Zstandard")
} else {
write!(f, "Zstandard ({level})")
}
}
}
}
}
impl FromStr for Compression {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let (format, level) =
if let Some((format, level_str)) = s.split_once(':').or_else(|| s.split_once('.')) {
let level = level_str
.parse::<i32>()
.map_err(|_| format!("Failed to parse compression level: {level_str:?}"))?;
(format, level)
} else {
(s, 0)
};
match format.to_ascii_lowercase().as_str() {
"" | "none" => Ok(Compression::None),
"bz2" | "bzip2" => Ok(Compression::Bzip2(level as u8)),
"deflate" | "gz" | "gzip" => Ok(Compression::Deflate(level as u8)),
"lzma" => Ok(Compression::Lzma(level as u8)),
"lzma2" | "xz" => Ok(Compression::Lzma2(level as u8)),
"zst" | "zstd" | "zstandard" => Ok(Compression::Zstandard(level as i8)),
_ => Err(format!("Unknown compression type: {format:?}")),
}
}
}
/// The kind of disc partition.
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub enum PartitionKind {
/// Data partition.
Data,
/// Update partition.
Update,
/// Channel partition.
Channel,
/// Other partition kind.
Other(u32),
}
impl fmt::Display for PartitionKind {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Data => write!(f, "Data"),
Self::Update => write!(f, "Update"),
Self::Channel => write!(f, "Channel"),
Self::Other(v) => {
let bytes = v.to_be_bytes();
write!(f, "Other ({:08X}, {})", v, String::from_utf8_lossy(&bytes))
}
}
}
}
impl PartitionKind {
/// Returns the directory name for the partition kind.
#[inline]
pub fn dir_name(&self) -> Cow<'_, str> {
match self {
Self::Data => Cow::Borrowed("DATA"),
Self::Update => Cow::Borrowed("UPDATE"),
Self::Channel => Cow::Borrowed("CHANNEL"),
Self::Other(v) => {
let bytes = v.to_be_bytes();
Cow::Owned(format!("P-{}", String::from_utf8_lossy(&bytes)))
}
}
}
}
impl From<u32> for PartitionKind {
#[inline]
fn from(v: u32) -> Self {
match v {
0 => Self::Data,
1 => Self::Update,
2 => Self::Channel,
v => Self::Other(v),
}
}
}
/// Wii partition information.
#[derive(Debug, Clone)]
pub struct PartitionInfo {
/// The partition index.
pub index: usize,
/// The kind of disc partition.
pub kind: PartitionKind,
/// The start sector of the partition.
pub start_sector: u32,
/// The start sector of the partition's (usually encrypted) data.
pub data_start_sector: u32,
/// The end sector of the partition's (usually encrypted) data.
pub data_end_sector: u32,
/// The AES key for the partition, also known as the "title key".
pub key: KeyBytes,
/// The Wii partition header.
pub header: Arc<WiiPartitionHeader>,
/// Whether the partition data is encrypted
pub has_encryption: bool,
/// Whether the partition data hashes are present
pub has_hashes: bool,
/// Disc and boot header (boot.bin)
pub raw_boot: Arc<[u8; BOOT_SIZE]>,
/// File system table (fst.bin), or `None` if partition is invalid
pub raw_fst: Option<Arc<[u8]>>,
}
impl PartitionInfo {
/// Returns the size of the partition's data region in bytes.
#[inline]
pub fn data_size(&self) -> u64 {
(self.data_end_sector as u64 - self.data_start_sector as u64) * SECTOR_SIZE as u64
}
/// Returns whether the given sector is within the partition's data region.
#[inline]
pub fn data_contains_sector(&self, sector: u32) -> bool {
sector >= self.data_start_sector && sector < self.data_end_sector
}
/// A view into the disc header.
#[inline]
pub fn disc_header(&self) -> &DiscHeader {
DiscHeader::ref_from_bytes(array_ref![self.raw_boot, 0, size_of::<DiscHeader>()])
.expect("Invalid disc header alignment")
}
/// A view into the debug header.
#[inline]
pub fn debug_header(&self) -> &DebugHeader {
DebugHeader::ref_from_bytes(array_ref![
self.raw_boot,
size_of::<DiscHeader>(),
size_of::<DebugHeader>()
])
.expect("Invalid debug header alignment")
}
/// A view into the boot header.
#[inline]
pub fn boot_header(&self) -> &BootHeader {
BootHeader::ref_from_bytes(array_ref![self.raw_boot, BB2_OFFSET, size_of::<BootHeader>()])
.expect("Invalid boot header alignment")
}
/// A view into the file system table (FST).
#[inline]
pub fn fst(&self) -> Option<Fst<'_>> {
// FST has already been parsed, so we can safely unwrap
Some(Fst::new(self.raw_fst.as_deref()?).unwrap())
}
}

145
nod/src/disc/direct.rs Normal file
View File

@ -0,0 +1,145 @@
use std::{
io,
io::{BufRead, Seek, SeekFrom},
sync::Arc,
};
use zerocopy::FromZeros;
use crate::{
Result,
common::KeyBytes,
disc::{DiscHeader, SECTOR_SIZE, wii::SECTOR_DATA_SIZE},
io::block::{Block, BlockReader},
read::{PartitionMeta, PartitionReader},
util::impl_read_for_bufread,
};
#[derive(Clone)]
pub enum DirectDiscReaderMode {
Raw,
Partition { disc_header: Arc<DiscHeader>, data_start_sector: u32, key: KeyBytes },
}
/// Simplified disc reader that uses a block reader directly.
///
/// This is used to read disc and partition metadata before we can construct a full disc reader.
pub struct DirectDiscReader {
io: Box<dyn BlockReader>,
block: Block,
block_buf: Box<[u8]>,
block_decrypted: bool,
pos: u64,
mode: DirectDiscReaderMode,
}
impl Clone for DirectDiscReader {
fn clone(&self) -> Self {
Self {
io: self.io.clone(),
block: Block::default(),
block_buf: <[u8]>::new_box_zeroed_with_elems(self.block_buf.len()).unwrap(),
block_decrypted: false,
pos: 0,
mode: self.mode.clone(),
}
}
}
impl DirectDiscReader {
pub fn new(inner: Box<dyn BlockReader>) -> Result<Box<Self>> {
let block_size = inner.block_size() as usize;
Ok(Box::new(Self {
io: inner,
block: Block::default(),
block_buf: <[u8]>::new_box_zeroed_with_elems(block_size)?,
block_decrypted: false,
pos: 0,
mode: DirectDiscReaderMode::Raw,
}))
}
pub fn reset(&mut self, mode: DirectDiscReaderMode) {
self.block = Block::default();
self.block_decrypted = false;
self.pos = 0;
self.mode = mode;
}
pub fn into_inner(self) -> Box<dyn BlockReader> { self.io }
}
impl BufRead for DirectDiscReader {
fn fill_buf(&mut self) -> io::Result<&[u8]> {
match &self.mode {
DirectDiscReaderMode::Raw => {
// Read new block if necessary
let sector = (self.pos / SECTOR_SIZE as u64) as u32;
if self.block_decrypted || !self.block.contains(sector) {
self.block = self.io.read_block(self.block_buf.as_mut(), sector)?;
self.block_decrypted = false;
}
self.block.data(self.block_buf.as_ref(), self.pos)
}
DirectDiscReaderMode::Partition { disc_header, data_start_sector, key } => {
let has_encryption = disc_header.has_partition_encryption();
let has_hashes = disc_header.has_partition_hashes();
let part_sector = if has_hashes {
(self.pos / SECTOR_DATA_SIZE as u64) as u32
} else {
(self.pos / SECTOR_SIZE as u64) as u32
};
// Read new block if necessary
let abs_sector = data_start_sector + part_sector;
if !self.block.contains(abs_sector) {
self.block = self.io.read_block(self.block_buf.as_mut(), abs_sector)?;
self.block_decrypted = false;
}
// Allow reusing the same block from raw mode, just decrypt it if necessary
if !self.block_decrypted {
self.block
.decrypt_block(self.block_buf.as_mut(), has_encryption.then_some(*key))?;
self.block_decrypted = true;
}
self.block.partition_data(
self.block_buf.as_ref(),
self.pos,
*data_start_sector,
has_hashes,
)
}
}
}
#[inline]
fn consume(&mut self, amt: usize) { self.pos += amt as u64; }
}
impl_read_for_bufread!(DirectDiscReader);
impl Seek for DirectDiscReader {
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
self.pos = match pos {
SeekFrom::Start(v) => v,
SeekFrom::End(_) => {
return Err(io::Error::new(
io::ErrorKind::Unsupported,
"DirectDiscReader: SeekFrom::End is not supported",
));
}
SeekFrom::Current(v) => self.pos.saturating_add_signed(v),
};
Ok(self.pos)
}
fn stream_position(&mut self) -> io::Result<u64> { Ok(self.pos) }
}
impl PartitionReader for DirectDiscReader {
fn is_wii(&self) -> bool { unimplemented!() }
fn meta(&mut self) -> Result<PartitionMeta> { unimplemented!() }
}

376
nod/src/disc/fst.rs Normal file
View File

@ -0,0 +1,376 @@
//! File system table (FST) types.
use std::{borrow::Cow, ffi::CStr, mem::size_of};
use encoding_rs::SHIFT_JIS;
use itertools::Itertools;
use zerocopy::{FromBytes, FromZeros, Immutable, IntoBytes, KnownLayout, big_endian::*};
use crate::{
Error, Result,
util::{array_ref, static_assert},
};
/// File system node kind.
#[derive(Clone, Debug, PartialEq)]
pub enum NodeKind {
/// Node is a file.
File,
/// Node is a directory.
Directory,
/// Invalid node kind. (Should not normally occur)
Invalid,
}
/// An individual file system node.
#[derive(Copy, Clone, Debug, PartialEq, FromBytes, IntoBytes, Immutable, KnownLayout)]
#[repr(C, align(4))]
pub struct Node {
kind: u8,
// u24 big-endian
name_offset: [u8; 3],
offset: U32,
length: U32,
}
static_assert!(size_of::<Node>() == 12);
impl Node {
/// Create a new node.
#[inline]
pub fn new(kind: NodeKind, name_offset: u32, offset: u64, length: u32, is_wii: bool) -> Self {
let name_offset_bytes = name_offset.to_be_bytes();
Self {
kind: match kind {
NodeKind::File => 0,
NodeKind::Directory => 1,
NodeKind::Invalid => u8::MAX,
},
name_offset: *array_ref![name_offset_bytes, 1, 3],
offset: U32::new(match kind {
NodeKind::File if is_wii => (offset / 4) as u32,
_ => offset as u32,
}),
length: U32::new(length),
}
}
/// File system node kind.
#[inline]
pub fn kind(&self) -> NodeKind {
match self.kind {
0 => NodeKind::File,
1 => NodeKind::Directory,
_ => NodeKind::Invalid,
}
}
/// Set the node kind.
#[inline]
pub fn set_kind(&mut self, kind: NodeKind) {
self.kind = match kind {
NodeKind::File => 0,
NodeKind::Directory => 1,
NodeKind::Invalid => u8::MAX,
};
}
/// Whether the node is a file.
#[inline]
pub fn is_file(&self) -> bool { self.kind == 0 }
/// Whether the node is a directory.
#[inline]
pub fn is_dir(&self) -> bool { self.kind == 1 }
/// Offset in the string table to the filename.
#[inline]
pub fn name_offset(&self) -> u32 {
u32::from_be_bytes([0, self.name_offset[0], self.name_offset[1], self.name_offset[2]])
}
/// Set the name offset of the node.
#[inline]
pub fn set_name_offset(&mut self, name_offset: u32) {
let name_offset_bytes = name_offset.to_be_bytes();
self.name_offset = *array_ref![name_offset_bytes, 1, 3];
}
/// For files, this is the partition offset of the file data. (Wii: >> 2)
///
/// For directories, this is the parent node index in the FST.
#[inline]
pub fn offset(&self, is_wii: bool) -> u64 {
if is_wii && self.is_file() {
self.offset.get() as u64 * 4
} else {
self.offset.get() as u64
}
}
/// Set the offset of the node. See [`Node::offset`] for details.
#[inline]
pub fn set_offset(&mut self, offset: u64, is_wii: bool) {
self.offset.set(if is_wii && self.is_file() { (offset / 4) as u32 } else { offset as u32 });
}
/// For files, this is the byte size of the file.
///
/// For directories, this is the child end index in the FST.
///
/// Number of child files and directories recursively is `length - offset`.
#[inline]
pub fn length(&self) -> u32 { self.length.get() }
/// Set the length of the node. See [`Node::length`] for details.
#[inline]
pub fn set_length(&mut self, length: u32) { self.length.set(length); }
}
/// A view into the file system table (FST).
#[derive(Clone)]
pub struct Fst<'a> {
/// The nodes in the FST.
pub nodes: &'a [Node],
/// The string table containing all file and directory names.
pub string_table: &'a [u8],
}
impl<'a> Fst<'a> {
/// Create a new FST view from a buffer.
pub fn new(buf: &'a [u8]) -> Result<Self, &'static str> {
let Ok((root_node, _)) = Node::ref_from_prefix(buf) else {
return Err("FST root node not found");
};
// String table starts after the last node
let string_base = root_node.length() * size_of::<Node>() as u32;
if string_base > buf.len() as u32 {
return Err("FST string table out of bounds");
}
let (node_buf, string_table) = buf.split_at(string_base as usize);
let nodes = <[Node]>::ref_from_bytes(node_buf).unwrap();
Ok(Self { nodes, string_table })
}
/// Iterate over the nodes in the FST.
#[inline]
pub fn iter(&self) -> FstIter<'_> { FstIter { fst: self.clone(), idx: 1, segments: vec![] } }
/// Get the name of a node.
pub fn get_name(&self, node: Node) -> Result<Cow<'a, str>, String> {
let name_buf = self.string_table.get(node.name_offset() as usize..).ok_or_else(|| {
format!(
"FST: name offset {} out of bounds (string table size: {})",
node.name_offset(),
self.string_table.len()
)
})?;
let c_string = CStr::from_bytes_until_nul(name_buf).map_err(|_| {
format!("FST: name at offset {} not null-terminated", node.name_offset())
})?;
let (decoded, _, _) = SHIFT_JIS.decode(c_string.to_bytes());
// Ignore decoding errors, we can't do anything about them. Consumers may check for
// U+FFFD (REPLACEMENT CHARACTER), or fetch the raw bytes from the string table.
Ok(decoded)
}
/// Finds a particular file or directory by path.
pub fn find(&self, path: &str) -> Option<(usize, Node)> {
let mut split = path.trim_matches('/').split('/');
let mut current = next_non_empty(&mut split);
if current.is_empty() {
return Some((0, self.nodes[0]));
}
let mut idx = 1;
let mut stop_at = None;
while let Some(node) = self.nodes.get(idx).copied() {
if self.get_name(node).as_ref().is_ok_and(|name| name.eq_ignore_ascii_case(current)) {
current = next_non_empty(&mut split);
if current.is_empty() {
return Some((idx, node));
}
// Descend into directory
idx += 1;
stop_at = Some(node.length() as usize + idx);
} else if node.is_dir() {
// Skip directory
idx = node.length() as usize;
} else {
// Skip file
idx += 1;
}
if let Some(stop) = stop_at {
if idx >= stop {
break;
}
}
}
None
}
/// Count the number of files in the FST.
pub fn num_files(&self) -> usize { self.nodes.iter().filter(|n| n.is_file()).count() }
}
/// Iterator over the nodes in an FST.
///
/// For each node, the iterator yields the node index, the node itself,
/// and the full path to the node (separated by `/`).
pub struct FstIter<'a> {
fst: Fst<'a>,
idx: usize,
segments: Vec<(Cow<'a, str>, usize)>,
}
impl Iterator for FstIter<'_> {
type Item = (usize, Node, String);
fn next(&mut self) -> Option<Self::Item> {
let idx = self.idx;
let node = self.fst.nodes.get(idx).copied()?;
let name = self.fst.get_name(node).unwrap_or("<invalid>".into());
self.idx += 1;
// Remove ended path segments
let mut new_size = 0;
for (_, end) in self.segments.iter() {
if *end == idx {
break;
}
new_size += 1;
}
self.segments.truncate(new_size);
// Add the new path segment
let length = node.length() as u64;
let end = if node.is_dir() { length as usize } else { idx + 1 };
self.segments.push((name, end));
let path = self.segments.iter().map(|(name, _)| name.as_ref()).join("/");
Some((idx, node, path))
}
}
#[inline]
fn next_non_empty<'a>(iter: &mut impl Iterator<Item = &'a str>) -> &'a str {
loop {
match iter.next() {
Some("") => continue,
Some(next) => break next,
None => break "",
}
}
}
/// A builder for creating a file system table (FST).
pub struct FstBuilder {
nodes: Vec<Node>,
string_table: Vec<u8>,
stack: Vec<(String, u32)>,
is_wii: bool,
}
impl FstBuilder {
/// Create a new FST builder.
pub fn new(is_wii: bool) -> Self {
let mut builder = Self { nodes: vec![], string_table: vec![], stack: vec![], is_wii };
builder.add_node(NodeKind::Directory, "<root>", 0, 0);
builder
}
/// Create a new FST builder with an existing string table. This allows matching the string
/// ordering of an existing FST.
pub fn new_with_string_table(is_wii: bool, string_table: Vec<u8>) -> Result<Self> {
if matches!(string_table.last(), Some(n) if *n != 0) {
return Err(Error::DiscFormat("String table must be null-terminated".to_string()));
}
let root_name = CStr::from_bytes_until_nul(&string_table)
.map_err(|_| {
Error::DiscFormat("String table root name not null-terminated".to_string())
})?
.to_str()
.unwrap_or("<root>")
.to_string();
let mut builder = Self { nodes: vec![], string_table, stack: vec![], is_wii };
builder.add_node(NodeKind::Directory, &root_name, 0, 0);
Ok(builder)
}
/// Add a file to the FST. All paths within a directory must be added sequentially,
/// otherwise the output FST will be invalid.
pub fn add_file(&mut self, path: &str, offset: u64, size: u32) {
let components = path.split('/').collect::<Vec<_>>();
for i in 0..components.len() - 1 {
if matches!(self.stack.get(i), Some((name, _)) if name != components[i]) {
// Pop directories
while self.stack.len() > i {
let (_, idx) = self.stack.pop().unwrap();
let length = self.nodes.len() as u32;
self.nodes[idx as usize].set_length(length);
}
}
while i >= self.stack.len() {
// Push a new directory node
let component_idx = self.stack.len();
let parent = if component_idx == 0 { 0 } else { self.stack[component_idx - 1].1 };
let node_idx =
self.add_node(NodeKind::Directory, components[component_idx], parent as u64, 0);
self.stack.push((components[i].to_string(), node_idx));
}
}
if components.len() == 1 {
// Pop all directories
while let Some((_, idx)) = self.stack.pop() {
let length = self.nodes.len() as u32;
self.nodes[idx as usize].set_length(length);
}
}
// Add file node
self.add_node(NodeKind::File, components.last().unwrap(), offset, size);
}
/// Get the byte size of the FST.
pub fn byte_size(&self) -> usize {
size_of_val(self.nodes.as_slice()) + self.string_table.len()
}
/// Finalize the FST and return the serialized data.
pub fn finalize(mut self) -> Box<[u8]> {
// Finalize directory lengths
let node_count = self.nodes.len() as u32;
while let Some((_, idx)) = self.stack.pop() {
self.nodes[idx as usize].set_length(node_count);
}
self.nodes[0].set_length(node_count);
// Serialize nodes and string table
let nodes_data = self.nodes.as_bytes();
let string_table_data = self.string_table.as_bytes();
let mut data =
<[u8]>::new_box_zeroed_with_elems(nodes_data.len() + string_table_data.len()).unwrap();
data[..nodes_data.len()].copy_from_slice(self.nodes.as_bytes());
data[nodes_data.len()..].copy_from_slice(self.string_table.as_bytes());
data
}
fn add_node(&mut self, kind: NodeKind, name: &str, offset: u64, length: u32) -> u32 {
let (bytes, _, _) = SHIFT_JIS.encode(name);
// Check if the name already exists in the string table
let mut name_offset = 0;
while name_offset < self.string_table.len() {
let string_buf = &self.string_table[name_offset..];
let existing = CStr::from_bytes_until_nul(string_buf).unwrap();
if existing.to_bytes() == bytes.as_ref() {
break;
}
name_offset += existing.to_bytes_with_nul().len();
}
// Otherwise, add the name to the string table
if name_offset == self.string_table.len() {
self.string_table.extend_from_slice(bytes.as_ref());
self.string_table.push(0);
}
let idx = self.nodes.len() as u32;
self.nodes.push(Node::new(kind, name_offset as u32, offset, length, self.is_wii));
idx
}
}

View File

@ -1,199 +1,206 @@
use std::{
cmp::min,
io,
io::{Read, Seek, SeekFrom},
io::{BufRead, Seek, SeekFrom},
mem::size_of,
sync::Arc,
};
use zerocopy::{FromBytes, FromZeroes};
use zerocopy::{FromBytes, FromZeros, IntoBytes};
use crate::{
disc::{
ApploaderHeader, DiscHeader, DolHeader, PartitionBase, PartitionHeader, PartitionMeta,
BI2_SIZE, BOOT_SIZE, SECTOR_SIZE,
},
fst::{Node, NodeKind},
io::block::{Block, BlockIO},
streams::{ReadStream, SharedWindowedReadStream},
util::read::{read_box, read_box_slice, read_vec},
Result, ResultContext,
disc::{
ApploaderHeader, BB2_OFFSET, BI2_SIZE, BOOT_SIZE, BootHeader, DolHeader, SECTOR_GROUP_SIZE,
SECTOR_SIZE,
preloader::{Preloader, SectorGroup, SectorGroupRequest, fetch_sector_group},
},
read::{PartitionEncryption, PartitionMeta, PartitionReader},
util::{
impl_read_for_bufread,
read::{read_arc, read_arc_slice, read_from},
},
};
pub struct PartitionGC {
io: Box<dyn BlockIO>,
block: Block,
block_buf: Box<[u8]>,
block_idx: u32,
sector_buf: Box<[u8; SECTOR_SIZE]>,
sector: u32,
pub struct PartitionReaderGC {
preloader: Arc<Preloader>,
pos: u64,
disc_header: Box<DiscHeader>,
disc_size: u64,
sector_group: Option<SectorGroup>,
meta: Option<PartitionMeta>,
}
impl Clone for PartitionGC {
impl Clone for PartitionReaderGC {
fn clone(&self) -> Self {
Self {
io: self.io.clone(),
block: Block::default(),
block_buf: <u8>::new_box_slice_zeroed(self.block_buf.len()),
block_idx: u32::MAX,
sector_buf: <[u8; SECTOR_SIZE]>::new_box_zeroed(),
sector: u32::MAX,
preloader: self.preloader.clone(),
pos: 0,
disc_header: self.disc_header.clone(),
disc_size: self.disc_size,
sector_group: None,
meta: self.meta.clone(),
}
}
}
impl PartitionGC {
pub fn new(inner: Box<dyn BlockIO>, disc_header: Box<DiscHeader>) -> Result<Box<Self>> {
let block_size = inner.block_size();
Ok(Box::new(Self {
io: inner,
block: Block::default(),
block_buf: <u8>::new_box_slice_zeroed(block_size as usize),
block_idx: u32::MAX,
sector_buf: <[u8; SECTOR_SIZE]>::new_box_zeroed(),
sector: u32::MAX,
pos: 0,
disc_header,
}))
}
pub fn into_inner(self) -> Box<dyn BlockIO> { self.io }
}
impl Read for PartitionGC {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let sector = (self.pos / SECTOR_SIZE as u64) as u32;
let block_idx = (sector as u64 * SECTOR_SIZE as u64 / self.block_buf.len() as u64) as u32;
// Read new block if necessary
if block_idx != self.block_idx {
self.block = self.io.read_block(self.block_buf.as_mut(), block_idx, None)?;
self.block_idx = block_idx;
}
// Copy sector if necessary
if sector != self.sector {
self.block.copy_raw(
self.sector_buf.as_mut(),
self.block_buf.as_ref(),
sector,
&self.disc_header,
)?;
self.sector = sector;
}
let offset = (self.pos % SECTOR_SIZE as u64) as usize;
let len = min(buf.len(), SECTOR_SIZE - offset);
buf[..len].copy_from_slice(&self.sector_buf[offset..offset + len]);
self.pos += len as u64;
Ok(len)
impl PartitionReaderGC {
pub fn new(preloader: Arc<Preloader>, disc_size: u64) -> Result<Box<Self>> {
Ok(Box::new(Self { preloader, pos: 0, disc_size, sector_group: None, meta: None }))
}
}
impl Seek for PartitionGC {
impl BufRead for PartitionReaderGC {
fn fill_buf(&mut self) -> io::Result<&[u8]> {
if self.pos >= self.disc_size {
return Ok(&[]);
}
let abs_sector = (self.pos / SECTOR_SIZE as u64) as u32;
let group_idx = abs_sector / 64;
let max_groups = self.disc_size.div_ceil(SECTOR_GROUP_SIZE as u64) as u32;
let request = SectorGroupRequest {
group_idx,
partition_idx: None,
mode: PartitionEncryption::Original,
force_rehash: false,
};
// Load sector group
let (sector_group, _updated) =
fetch_sector_group(request, max_groups, &mut self.sector_group, &self.preloader)?;
// Calculate the number of consecutive sectors in the group
let group_sector = abs_sector - sector_group.start_sector;
let consecutive_sectors = sector_group.consecutive_sectors(group_sector);
if consecutive_sectors == 0 {
return Ok(&[]);
}
let num_sectors = group_sector + consecutive_sectors;
// Read from sector group buffer
let group_start = sector_group.start_sector as u64 * SECTOR_SIZE as u64;
let offset = (self.pos - group_start) as usize;
let end =
(num_sectors as u64 * SECTOR_SIZE as u64).min(self.disc_size - group_start) as usize;
Ok(&sector_group.data[offset..end])
}
#[inline]
fn consume(&mut self, amt: usize) { self.pos += amt as u64; }
}
impl_read_for_bufread!(PartitionReaderGC);
impl Seek for PartitionReaderGC {
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
self.pos = match pos {
SeekFrom::Start(v) => v,
SeekFrom::End(_) => {
return Err(io::Error::new(
io::ErrorKind::Unsupported,
"GCPartitionReader: SeekFrom::End is not supported".to_string(),
));
}
SeekFrom::End(v) => self.disc_size.saturating_add_signed(v),
SeekFrom::Current(v) => self.pos.saturating_add_signed(v),
};
Ok(self.pos)
}
fn stream_position(&mut self) -> io::Result<u64> { Ok(self.pos) }
}
impl PartitionBase for PartitionGC {
fn meta(&mut self) -> Result<Box<PartitionMeta>> {
self.seek(SeekFrom::Start(0)).context("Seeking to partition metadata")?;
read_part_meta(self, false)
}
impl PartitionReader for PartitionReaderGC {
fn is_wii(&self) -> bool { false }
fn open_file(&mut self, node: &Node) -> io::Result<SharedWindowedReadStream> {
assert_eq!(node.kind(), NodeKind::File);
self.new_window(node.offset(false), node.length())
fn meta(&mut self) -> Result<PartitionMeta> {
if let Some(meta) = &self.meta {
Ok(meta.clone())
} else {
let meta = read_part_meta(self, false)?;
self.meta = Some(meta.clone());
Ok(meta)
}
}
fn ideal_buffer_size(&self) -> usize { SECTOR_SIZE }
}
pub(crate) fn read_part_meta(
reader: &mut dyn ReadStream,
pub(crate) fn read_dol(
reader: &mut dyn PartitionReader,
boot_header: &BootHeader,
is_wii: bool,
) -> Result<Box<PartitionMeta>> {
// boot.bin
let raw_boot: Box<[u8; BOOT_SIZE]> = read_box(reader).context("Reading boot.bin")?;
let partition_header = PartitionHeader::ref_from(&raw_boot[size_of::<DiscHeader>()..]).unwrap();
// bi2.bin
let raw_bi2: Box<[u8; BI2_SIZE]> = read_box(reader).context("Reading bi2.bin")?;
// apploader.bin
let mut raw_apploader: Vec<u8> =
read_vec(reader, size_of::<ApploaderHeader>()).context("Reading apploader header")?;
let apploader_header = ApploaderHeader::ref_from(raw_apploader.as_slice()).unwrap();
raw_apploader.resize(
size_of::<ApploaderHeader>()
+ apploader_header.size.get() as usize
+ apploader_header.trailer_size.get() as usize,
0,
);
) -> Result<Arc<[u8]>> {
reader
.read_exact(&mut raw_apploader[size_of::<ApploaderHeader>()..])
.context("Reading apploader")?;
.seek(SeekFrom::Start(boot_header.dol_offset(is_wii)))
.context("Seeking to DOL offset")?;
let dol_header: DolHeader = read_from(reader).context("Reading DOL header")?;
let dol_size = (dol_header.text_offs.iter().zip(&dol_header.text_sizes))
.chain(dol_header.data_offs.iter().zip(&dol_header.data_sizes))
.map(|(offs, size)| offs.get() + size.get())
.max()
.unwrap_or(size_of::<DolHeader>() as u32);
let mut raw_dol = <[u8]>::new_box_zeroed_with_elems(dol_size as usize)?;
raw_dol[..size_of::<DolHeader>()].copy_from_slice(dol_header.as_bytes());
reader.read_exact(&mut raw_dol[size_of::<DolHeader>()..]).context("Reading DOL")?;
Ok(Arc::from(raw_dol))
}
// fst.bin
pub(crate) fn read_fst(
reader: &mut dyn PartitionReader,
boot_header: &BootHeader,
is_wii: bool,
) -> Result<Arc<[u8]>> {
reader
.seek(SeekFrom::Start(partition_header.fst_offset(is_wii)))
.seek(SeekFrom::Start(boot_header.fst_offset(is_wii)))
.context("Seeking to FST offset")?;
let raw_fst: Box<[u8]> = read_box_slice(reader, partition_header.fst_size(is_wii) as usize)
let raw_fst: Arc<[u8]> = read_arc_slice(reader, boot_header.fst_size(is_wii) as usize)
.with_context(|| {
format!(
"Reading partition FST (offset {}, size {})",
partition_header.fst_offset(is_wii),
partition_header.fst_size(is_wii)
boot_header.fst_offset(is_wii),
boot_header.fst_size(is_wii)
)
})?;
Ok(raw_fst)
}
pub(crate) fn read_apploader(reader: &mut dyn PartitionReader) -> Result<Arc<[u8]>> {
reader
.seek(SeekFrom::Start(BOOT_SIZE as u64 + BI2_SIZE as u64))
.context("Seeking to apploader offset")?;
let apploader_header: ApploaderHeader =
read_from(reader).context("Reading apploader header")?;
let apploader_size = size_of::<ApploaderHeader>()
+ apploader_header.size.get() as usize
+ apploader_header.trailer_size.get() as usize;
let mut raw_apploader = <[u8]>::new_box_zeroed_with_elems(apploader_size)?;
raw_apploader[..size_of::<ApploaderHeader>()].copy_from_slice(apploader_header.as_bytes());
reader
.read_exact(&mut raw_apploader[size_of::<ApploaderHeader>()..])
.context("Reading apploader")?;
Ok(Arc::from(raw_apploader))
}
pub(crate) fn read_part_meta(
reader: &mut dyn PartitionReader,
is_wii: bool,
) -> Result<PartitionMeta> {
// boot.bin
let raw_boot: Arc<[u8; BOOT_SIZE]> = read_arc(reader).context("Reading boot.bin")?;
let boot_header = BootHeader::ref_from_bytes(&raw_boot[BB2_OFFSET..]).unwrap();
// bi2.bin
let raw_bi2: Arc<[u8; BI2_SIZE]> = read_arc(reader).context("Reading bi2.bin")?;
// apploader.bin
let raw_apploader = read_apploader(reader)?;
// fst.bin
let raw_fst = read_fst(reader, boot_header, is_wii)?;
// main.dol
reader
.seek(SeekFrom::Start(partition_header.dol_offset(is_wii)))
.context("Seeking to DOL offset")?;
let mut raw_dol: Vec<u8> =
read_vec(reader, size_of::<DolHeader>()).context("Reading DOL header")?;
let dol_header = DolHeader::ref_from(raw_dol.as_slice()).unwrap();
let dol_size = dol_header
.text_offs
.iter()
.zip(&dol_header.text_sizes)
.map(|(offs, size)| offs.get() + size.get())
.chain(
dol_header
.data_offs
.iter()
.zip(&dol_header.data_sizes)
.map(|(offs, size)| offs.get() + size.get()),
)
.max()
.unwrap_or(size_of::<DolHeader>() as u32);
raw_dol.resize(dol_size as usize, 0);
reader.read_exact(&mut raw_dol[size_of::<DolHeader>()..]).context("Reading DOL")?;
let raw_dol = read_dol(reader, boot_header, is_wii)?;
Ok(Box::new(PartitionMeta {
Ok(PartitionMeta {
raw_boot,
raw_bi2,
raw_apploader: raw_apploader.into_boxed_slice(),
raw_apploader,
raw_fst,
raw_dol: raw_dol.into_boxed_slice(),
raw_dol,
raw_ticket: None,
raw_tmd: None,
raw_cert_chain: None,
raw_h3_table: None,
}))
})
}

View File

@ -1,204 +1,80 @@
use std::{
io::{Read, Seek, SeekFrom},
sync::{Arc, Mutex},
time::Instant,
};
use rayon::iter::{IntoParallelIterator, ParallelIterator};
use sha1::{Digest, Sha1};
use zerocopy::FromZeroes;
use tracing::instrument;
use zerocopy::{FromZeros, IntoBytes};
use crate::{
array_ref, array_ref_mut,
common::HashBytes,
disc::{
reader::DiscReader,
SECTOR_GROUP_SIZE, SECTOR_SIZE,
wii::{HASHES_SIZE, SECTOR_DATA_SIZE},
},
io::HashBytes,
util::read::read_box_slice,
OpenOptions, Result, ResultContext, SECTOR_SIZE,
util::{array_ref, array_ref_mut, digest::sha1_hash},
};
/// In a sector, following the 0x400 byte block of hashes, each 0x400 bytes of decrypted data is
/// hashed, yielding 31 H0 hashes.
/// Then, 8 sectors are aggregated into a subgroup, and the 31 H0 hashes for each sector are hashed,
/// yielding 8 H1 hashes.
/// Then, 8 subgroups are aggregated into a group, and the 8 H1 hashes for each subgroup are hashed,
/// yielding 8 H2 hashes.
/// Finally, the 8 H2 hashes for each group are hashed, yielding 1 H3 hash.
/// The H3 hashes for each group are stored in the partition's H3 table.
#[derive(Clone, Debug)]
pub struct HashTable {
/// SHA-1 hash of each 0x400 byte block of decrypted data.
pub h0_hashes: Box<[HashBytes]>,
/// SHA-1 hash of the 31 H0 hashes for each sector.
pub h1_hashes: Box<[HashBytes]>,
/// SHA-1 hash of the 8 H1 hashes for each subgroup.
pub h2_hashes: Box<[HashBytes]>,
/// SHA-1 hash of the 8 H2 hashes for each group.
pub h3_hashes: Box<[HashBytes]>,
/// Hashes for a single sector group (64 sectors).
#[derive(Clone, FromZeros)]
pub struct GroupHashes {
pub h3_hash: HashBytes,
pub h2_hashes: [HashBytes; 8],
pub h1_hashes: [HashBytes; 64],
pub h0_hashes: [HashBytes; 1984],
}
#[derive(Clone, FromZeroes)]
struct HashResult {
h0_hashes: [HashBytes; 1984],
h1_hashes: [HashBytes; 64],
h2_hashes: [HashBytes; 8],
h3_hash: HashBytes,
}
impl HashTable {
fn new(num_sectors: u32) -> Self {
let num_sectors = num_sectors.next_multiple_of(64) as usize;
let num_data_hashes = num_sectors * 31;
let num_subgroups = num_sectors / 8;
let num_groups = num_subgroups / 8;
Self {
h0_hashes: HashBytes::new_box_slice_zeroed(num_data_hashes),
h1_hashes: HashBytes::new_box_slice_zeroed(num_sectors),
h2_hashes: HashBytes::new_box_slice_zeroed(num_subgroups),
h3_hashes: HashBytes::new_box_slice_zeroed(num_groups),
}
impl GroupHashes {
#[inline]
pub fn hashes_for_sector(
&self,
sector: usize,
) -> (&[HashBytes; 31], &[HashBytes; 8], &[HashBytes; 8]) {
let h1_hashes = array_ref![self.h1_hashes, sector & !7, 8];
let h0_hashes = array_ref![self.h0_hashes, sector * 31, 31];
(h0_hashes, h1_hashes, &self.h2_hashes)
}
fn extend(&mut self, group_index: usize, result: &HashResult) {
*array_ref_mut![self.h0_hashes, group_index * 1984, 1984] = result.h0_hashes;
*array_ref_mut![self.h1_hashes, group_index * 64, 64] = result.h1_hashes;
*array_ref_mut![self.h2_hashes, group_index * 8, 8] = result.h2_hashes;
self.h3_hashes[group_index] = result.h3_hash;
#[inline]
pub fn apply(&self, sector_data: &mut [u8; SECTOR_SIZE], sector: usize) {
let (h0_hashes, h1_hashes, h2_hashes) = self.hashes_for_sector(sector);
array_ref_mut![sector_data, 0, 0x26C].copy_from_slice(h0_hashes.as_bytes());
array_ref_mut![sector_data, 0x280, 0xA0].copy_from_slice(h1_hashes.as_bytes());
array_ref_mut![sector_data, 0x340, 0xA0].copy_from_slice(h2_hashes.as_bytes());
}
}
pub fn rebuild_hashes(reader: &mut DiscReader) -> Result<()> {
const NUM_H0_HASHES: usize = SECTOR_DATA_SIZE / HASHES_SIZE;
pub const NUM_H0_HASHES: usize = SECTOR_DATA_SIZE / HASHES_SIZE;
log::info!(
"Rebuilding hashes for Wii partition data (using {} threads)",
rayon::current_num_threads()
);
let start = Instant::now();
// Precompute hashes for zeroed sectors.
const ZERO_H0_BYTES: &[u8] = &[0u8; HASHES_SIZE];
let zero_h0_hash = hash_bytes(ZERO_H0_BYTES);
let partitions = reader.partitions();
let mut hash_tables = Vec::with_capacity(partitions.len());
for part in partitions {
let part_sectors = part.data_end_sector - part.data_start_sector;
let hash_table = HashTable::new(part_sectors);
log::debug!(
"Rebuilding hashes: {} sectors, {} subgroups, {} groups",
hash_table.h1_hashes.len(),
hash_table.h2_hashes.len(),
hash_table.h3_hashes.len()
);
let group_count = hash_table.h3_hashes.len();
let mutex = Arc::new(Mutex::new(hash_table));
(0..group_count).into_par_iter().try_for_each_with(
(reader.open_partition(part.index, &OpenOptions::default())?, mutex.clone()),
|(stream, mutex), h3_index| -> Result<()> {
let mut result = HashResult::new_box_zeroed();
let mut data_buf = <u8>::new_box_slice_zeroed(SECTOR_DATA_SIZE);
let mut h3_hasher = Sha1::new();
for h2_index in 0..8 {
let mut h2_hasher = Sha1::new();
for h1_index in 0..8 {
let sector = h1_index + h2_index * 8;
let part_sector = sector as u32 + h3_index as u32 * 64;
let mut h1_hasher = Sha1::new();
if part_sector >= part_sectors {
for h0_index in 0..NUM_H0_HASHES {
result.h0_hashes[h0_index + sector * 31] = zero_h0_hash;
h1_hasher.update(zero_h0_hash);
}
} else {
stream
.seek(SeekFrom::Start(part_sector as u64 * SECTOR_DATA_SIZE as u64))
.with_context(|| format!("Seeking to sector {}", part_sector))?;
stream
.read_exact(&mut data_buf)
.with_context(|| format!("Reading sector {}", part_sector))?;
for h0_index in 0..NUM_H0_HASHES {
let h0_hash = hash_bytes(array_ref![
data_buf,
h0_index * HASHES_SIZE,
HASHES_SIZE
]);
result.h0_hashes[h0_index + sector * 31] = h0_hash;
h1_hasher.update(h0_hash);
}
};
let h1_hash = h1_hasher.finalize().into();
result.h1_hashes[sector] = h1_hash;
h2_hasher.update(h1_hash);
}
let h2_hash = h2_hasher.finalize().into();
result.h2_hashes[h2_index] = h2_hash;
h3_hasher.update(h2_hash);
#[instrument(skip_all)]
pub fn hash_sector_group(
sector_group: &[u8; SECTOR_GROUP_SIZE],
ignore_existing: bool,
) -> Box<GroupHashes> {
let mut result = GroupHashes::new_box_zeroed().unwrap();
for (h2_index, h2_hash) in result.h2_hashes.iter_mut().enumerate() {
let out_h1_hashes = array_ref_mut![result.h1_hashes, h2_index * 8, 8];
for (h1_index, h1_hash) in out_h1_hashes.iter_mut().enumerate() {
let sector = h1_index + h2_index * 8;
let out_h0_hashes =
array_ref_mut![result.h0_hashes, sector * NUM_H0_HASHES, NUM_H0_HASHES];
if !ignore_existing
&& array_ref![sector_group, sector * SECTOR_SIZE, 20].iter().any(|&v| v != 0)
{
// Hash block already present, use it
out_h0_hashes.as_mut_bytes().copy_from_slice(array_ref![
sector_group,
sector * SECTOR_SIZE,
0x26C
]);
} else {
for (h0_index, h0_hash) in out_h0_hashes.iter_mut().enumerate() {
*h0_hash = sha1_hash(array_ref![
sector_group,
sector * SECTOR_SIZE + HASHES_SIZE + h0_index * HASHES_SIZE,
HASHES_SIZE
]);
}
result.h3_hash = h3_hasher.finalize().into();
let mut hash_table = mutex.lock().map_err(|_| "Failed to lock mutex")?;
hash_table.extend(h3_index, &result);
Ok(())
},
)?;
let hash_table = Arc::try_unwrap(mutex)
.map_err(|_| "Failed to unwrap Arc")?
.into_inner()
.map_err(|_| "Failed to lock mutex")?;
hash_tables.push(hash_table);
}
// Verify against H3 table
for (part, hash_table) in reader.partitions.clone().iter().zip(hash_tables.iter()) {
log::debug!(
"Verifying H3 table for partition {} (count {})",
part.index,
hash_table.h3_hashes.len()
);
reader
.seek(SeekFrom::Start(
part.start_sector as u64 * SECTOR_SIZE as u64 + part.header.h3_table_off(),
))
.context("Seeking to H3 table")?;
let h3_table: Box<[HashBytes]> =
read_box_slice(reader, hash_table.h3_hashes.len()).context("Reading H3 table")?;
let mut mismatches = 0;
for (idx, (expected_hash, h3_hash)) in
h3_table.iter().zip(hash_table.h3_hashes.iter()).enumerate()
{
if expected_hash != h3_hash {
let mut got_bytes = [0u8; 40];
let got = base16ct::lower::encode_str(h3_hash, &mut got_bytes).unwrap();
let mut expected_bytes = [0u8; 40];
let expected =
base16ct::lower::encode_str(expected_hash, &mut expected_bytes).unwrap();
log::debug!(
"Partition {} H3 table does not match:\n\tindex {}\n\texpected: {}\n\tgot: {}",
part.index, idx, expected, got
);
mismatches += 1;
}
*h1_hash = sha1_hash(out_h0_hashes.as_bytes());
}
if mismatches > 0 {
log::warn!("Partition {} H3 table has {} hash mismatches", part.index, mismatches);
}
*h2_hash = sha1_hash(out_h1_hashes.as_bytes());
}
for (part, hash_table) in reader.partitions.iter_mut().zip(hash_tables) {
part.hash_table = Some(hash_table);
}
log::info!("Rebuilt hashes in {:?}", start.elapsed());
Ok(())
}
#[inline]
pub fn hash_bytes(buf: &[u8]) -> HashBytes {
let mut hasher = Sha1::new();
hasher.update(buf);
hasher.finalize().into()
result.h3_hash = sha1_hash(result.h2_hashes.as_bytes());
result
}

View File

@ -1,37 +1,61 @@
//! Disc type related logic (GameCube, Wii)
//! GameCube/Wii disc format types.
use std::{
borrow::Cow,
ffi::CStr,
fmt::{Debug, Display, Formatter},
io,
mem::size_of,
str::from_utf8,
};
use std::{ffi::CStr, str::from_utf8};
use dyn_clone::DynClone;
use zerocopy::{big_endian::*, AsBytes, FromBytes, FromZeroes};
use zerocopy::{FromBytes, Immutable, IntoBytes, KnownLayout, big_endian::*};
use crate::{
disc::wii::{Ticket, TmdHeader},
fst::Node,
static_assert,
streams::{ReadStream, SharedWindowedReadStream},
Fst, Result,
};
use crate::{common::MagicBytes, util::static_assert};
pub(crate) mod direct;
pub mod fst;
pub(crate) mod gcn;
pub(crate) mod hashes;
pub(crate) mod preloader;
pub(crate) mod reader;
pub(crate) mod wii;
pub mod wii;
pub(crate) mod writer;
/// Size in bytes of a disc sector.
/// Size in bytes of a disc sector. (32 KiB)
pub const SECTOR_SIZE: usize = 0x8000;
/// Size in bytes of a Wii partition sector group. (32 KiB * 64, 2 MiB)
pub const SECTOR_GROUP_SIZE: usize = SECTOR_SIZE * 64;
/// Magic bytes for Wii discs. Located at offset 0x18.
pub const WII_MAGIC: MagicBytes = [0x5D, 0x1C, 0x9E, 0xA3];
/// Magic bytes for GameCube discs. Located at offset 0x1C.
pub const GCN_MAGIC: MagicBytes = [0xC2, 0x33, 0x9F, 0x3D];
/// Offset in bytes of the boot block within a disc partition.
pub const BB2_OFFSET: usize = 0x420;
/// Size in bytes of the disc header, debug block and boot block. (boot.bin)
pub const BOOT_SIZE: usize = 0x440;
/// Size in bytes of the DVD Boot Info (debug and region information, bi2.bin)
pub const BI2_SIZE: usize = 0x2000;
/// The size of a single-layer MiniDVD. (1.4 GB)
///
/// GameCube games and some third-party Wii discs (Datel) use this format.
pub const MINI_DVD_SIZE: u64 = 1_459_978_240;
/// The size of a single-layer DVD. (4.7 GB)
///
/// The vast majority of Wii games use this format.
pub const SL_DVD_SIZE: u64 = 4_699_979_776;
/// The size of a dual-layer DVD. (8.5 GB)
///
/// A few larger Wii games use this format.
/// (Super Smash Bros. Brawl, Metroid Prime Trilogy, etc.)
pub const DL_DVD_SIZE: u64 = 8_511_160_320;
/// Shared GameCube & Wii disc header.
///
/// This header is always at the start of the disc image and within each Wii partition.
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
#[derive(Clone, Debug, PartialEq, FromBytes, IntoBytes, Immutable, KnownLayout)]
#[repr(C, align(4))]
pub struct DiscHeader {
/// Game ID (e.g. GM8E01 for Metroid Prime)
@ -47,9 +71,9 @@ pub struct DiscHeader {
/// Padding
_pad1: [u8; 14],
/// If this is a Wii disc, this will be 0x5D1C9EA3
pub wii_magic: U32,
pub wii_magic: MagicBytes,
/// If this is a GameCube disc, this will be 0xC2339F3D
pub gcn_magic: U32,
pub gcn_magic: MagicBytes,
/// Game title
pub game_title: [u8; 64],
/// If 1, disc omits partition hashes
@ -64,9 +88,11 @@ static_assert!(size_of::<DiscHeader>() == 0x400);
impl DiscHeader {
/// Game ID as a string.
#[inline]
pub fn game_id_str(&self) -> &str { from_utf8(&self.game_id).unwrap_or("[invalid]") }
/// Game title as a string.
#[inline]
pub fn game_title_str(&self) -> &str {
CStr::from_bytes_until_nul(&self.game_title)
.ok()
@ -75,26 +101,44 @@ impl DiscHeader {
}
/// Whether this is a GameCube disc.
pub fn is_gamecube(&self) -> bool { self.gcn_magic.get() == 0xC2339F3D }
#[inline]
pub fn is_gamecube(&self) -> bool { self.gcn_magic == GCN_MAGIC }
/// Whether this is a Wii disc.
pub fn is_wii(&self) -> bool { self.wii_magic.get() == 0x5D1C9EA3 }
#[inline]
pub fn is_wii(&self) -> bool { self.wii_magic == WII_MAGIC }
/// Whether the disc has partition data hashes.
#[inline]
pub fn has_partition_hashes(&self) -> bool { self.no_partition_hashes == 0 }
/// Whether the disc has partition data encryption.
#[inline]
pub fn has_partition_encryption(&self) -> bool { self.no_partition_encryption == 0 }
}
/// A header describing the contents of a disc partition.
/// The debug block of a disc partition.
///
/// **GameCube**: Always follows the disc header.
///
/// **Wii**: Follows the disc header within each partition.
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
/// Located at offset 0x400 (following the disc header) within each partition.
#[derive(Clone, Debug, PartialEq, FromBytes, IntoBytes, Immutable, KnownLayout)]
#[repr(C, align(4))]
pub struct PartitionHeader {
pub struct DebugHeader {
/// Debug monitor offset
pub debug_mon_offset: U32,
/// Debug monitor load address
pub debug_load_address: U32,
/// Padding
_pad1: [u8; 0x18],
}
static_assert!(size_of::<DebugHeader>() == 0x20);
/// The boot block (BB2) of a disc partition.
///
/// Located at offset 0x420 (following the debug block) within each partition.
#[derive(Clone, Debug, PartialEq, FromBytes, IntoBytes, Immutable, KnownLayout)]
#[repr(C, align(4))]
pub struct BootHeader {
/// Offset to main DOL (Wii: >> 2)
pub dol_offset: U32,
/// Offset to file system table (Wii: >> 2)
@ -113,48 +157,79 @@ pub struct PartitionHeader {
_pad2: [u8; 4],
}
static_assert!(size_of::<PartitionHeader>() == 0x40);
static_assert!(size_of::<BootHeader>() == 0x20);
static_assert!(
size_of::<DiscHeader>() + size_of::<DebugHeader>() + size_of::<BootHeader>() == BOOT_SIZE
);
impl PartitionHeader {
impl BootHeader {
/// Offset within the partition to the main DOL.
#[inline]
pub fn dol_offset(&self, is_wii: bool) -> u64 {
if is_wii { self.dol_offset.get() as u64 * 4 } else { self.dol_offset.get() as u64 }
}
/// Set the offset within the partition to the main DOL.
#[inline]
pub fn set_dol_offset(&mut self, offset: u64, is_wii: bool) {
if is_wii {
self.dol_offset.get() as u64 * 4
self.dol_offset.set((offset / 4) as u32);
} else {
self.dol_offset.get() as u64
self.dol_offset.set(offset as u32);
}
}
/// Offset within the partition to the file system table (FST).
#[inline]
pub fn fst_offset(&self, is_wii: bool) -> u64 {
if is_wii { self.fst_offset.get() as u64 * 4 } else { self.fst_offset.get() as u64 }
}
/// Set the offset within the partition to the file system table (FST).
#[inline]
pub fn set_fst_offset(&mut self, offset: u64, is_wii: bool) {
if is_wii {
self.fst_offset.get() as u64 * 4
self.fst_offset.set((offset / 4) as u32);
} else {
self.fst_offset.get() as u64
self.fst_offset.set(offset as u32);
}
}
/// Size of the file system table (FST).
#[inline]
pub fn fst_size(&self, is_wii: bool) -> u64 {
if is_wii { self.fst_size.get() as u64 * 4 } else { self.fst_size.get() as u64 }
}
/// Set the size of the file system table (FST).
#[inline]
pub fn set_fst_size(&mut self, size: u64, is_wii: bool) {
if is_wii {
self.fst_size.get() as u64 * 4
self.fst_size.set((size / 4) as u32);
} else {
self.fst_size.get() as u64
self.fst_size.set(size as u32);
}
}
/// Maximum size of the file system table (FST) across multi-disc games.
#[inline]
pub fn fst_max_size(&self, is_wii: bool) -> u64 {
if is_wii { self.fst_max_size.get() as u64 * 4 } else { self.fst_max_size.get() as u64 }
}
/// Set the maximum size of the file system table (FST) across multi-disc games.
#[inline]
pub fn set_fst_max_size(&mut self, size: u64, is_wii: bool) {
if is_wii {
self.fst_max_size.get() as u64 * 4
self.fst_max_size.set((size / 4) as u32);
} else {
self.fst_max_size.get() as u64
self.fst_max_size.set(size as u32);
}
}
}
/// Apploader header.
#[derive(Debug, PartialEq, Clone, FromBytes, FromZeroes, AsBytes)]
#[derive(Debug, PartialEq, Clone, FromBytes, IntoBytes, Immutable, KnownLayout)]
#[repr(C, align(4))]
pub struct ApploaderHeader {
/// Apploader build date
@ -171,6 +246,7 @@ pub struct ApploaderHeader {
impl ApploaderHeader {
/// Apploader build date as a string.
#[inline]
pub fn date_str(&self) -> Option<&str> {
CStr::from_bytes_until_nul(&self.date).ok().and_then(|c| c.to_str().ok())
}
@ -182,7 +258,7 @@ pub const DOL_MAX_TEXT_SECTIONS: usize = 7;
pub const DOL_MAX_DATA_SECTIONS: usize = 11;
/// Dolphin executable (DOL) header.
#[derive(Debug, Clone, FromBytes, FromZeroes)]
#[derive(Debug, Clone, FromBytes, IntoBytes, Immutable, KnownLayout)]
pub struct DolHeader {
/// Text section offsets
pub text_offs: [U32; DOL_MAX_TEXT_SECTIONS],
@ -207,176 +283,3 @@ pub struct DolHeader {
}
static_assert!(size_of::<DolHeader>() == 0x100);
/// The kind of disc partition.
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub enum PartitionKind {
/// Data partition.
Data,
/// Update partition.
Update,
/// Channel partition.
Channel,
/// Other partition kind.
Other(u32),
}
impl Display for PartitionKind {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
Self::Data => write!(f, "Data"),
Self::Update => write!(f, "Update"),
Self::Channel => write!(f, "Channel"),
Self::Other(v) => {
let bytes = v.to_be_bytes();
write!(f, "Other ({:08X}, {})", v, String::from_utf8_lossy(&bytes))
}
}
}
}
impl PartitionKind {
/// Returns the directory name for the partition kind.
pub fn dir_name(&self) -> Cow<str> {
match self {
Self::Data => Cow::Borrowed("DATA"),
Self::Update => Cow::Borrowed("UPDATE"),
Self::Channel => Cow::Borrowed("CHANNEL"),
Self::Other(v) => {
let bytes = v.to_be_bytes();
Cow::Owned(format!("P-{}", String::from_utf8_lossy(&bytes)))
}
}
}
}
impl From<u32> for PartitionKind {
fn from(v: u32) -> Self {
match v {
0 => Self::Data,
1 => Self::Update,
2 => Self::Channel,
v => Self::Other(v),
}
}
}
/// An open disc partition.
pub trait PartitionBase: DynClone + ReadStream + Send + Sync {
/// Reads the partition header and file system table.
fn meta(&mut self) -> Result<Box<PartitionMeta>>;
/// Seeks the read stream to the specified file system node
/// and returns a windowed stream.
///
/// # Examples
///
/// Basic usage:
/// ```no_run
/// use std::io::Read;
///
/// use nod::{Disc, PartitionKind};
///
/// fn main() -> nod::Result<()> {
/// let disc = Disc::new("path/to/file.iso")?;
/// let mut partition = disc.open_partition_kind(PartitionKind::Data)?;
/// let meta = partition.meta()?;
/// let fst = meta.fst()?;
/// if let Some((_, node)) = fst.find("/MP3/Worlds.txt") {
/// let mut s = String::new();
/// partition
/// .open_file(node)
/// .expect("Failed to open file stream")
/// .read_to_string(&mut s)
/// .expect("Failed to read file");
/// println!("{}", s);
/// }
/// Ok(())
/// }
/// ```
fn open_file(&mut self, node: &Node) -> io::Result<SharedWindowedReadStream>;
/// The ideal size for buffered reads from this partition.
/// GameCube discs have a data block size of 0x8000,
/// whereas Wii discs have a data block size of 0x7C00.
fn ideal_buffer_size(&self) -> usize;
}
dyn_clone::clone_trait_object!(PartitionBase);
/// Size of the disc header and partition header (boot.bin)
pub const BOOT_SIZE: usize = size_of::<DiscHeader>() + size_of::<PartitionHeader>();
/// Size of the debug and region information (bi2.bin)
pub const BI2_SIZE: usize = 0x2000;
/// Extra disc partition data. (DOL, FST, etc.)
#[derive(Clone, Debug)]
pub struct PartitionMeta {
/// Disc and partition header (boot.bin)
pub raw_boot: Box<[u8; BOOT_SIZE]>,
/// Debug and region information (bi2.bin)
pub raw_bi2: Box<[u8; BI2_SIZE]>,
/// Apploader (apploader.bin)
pub raw_apploader: Box<[u8]>,
/// File system table (fst.bin)
pub raw_fst: Box<[u8]>,
/// Main binary (main.dol)
pub raw_dol: Box<[u8]>,
/// Ticket (ticket.bin, Wii only)
pub raw_ticket: Option<Box<[u8]>>,
/// TMD (tmd.bin, Wii only)
pub raw_tmd: Option<Box<[u8]>>,
/// Certificate chain (cert.bin, Wii only)
pub raw_cert_chain: Option<Box<[u8]>>,
/// H3 hash table (h3.bin, Wii only)
pub raw_h3_table: Option<Box<[u8]>>,
}
impl PartitionMeta {
/// A view into the disc header.
pub fn header(&self) -> &DiscHeader {
DiscHeader::ref_from(&self.raw_boot[..size_of::<DiscHeader>()]).unwrap()
}
/// A view into the partition header.
pub fn partition_header(&self) -> &PartitionHeader {
PartitionHeader::ref_from(&self.raw_boot[size_of::<DiscHeader>()..]).unwrap()
}
/// A view into the apploader header.
pub fn apploader_header(&self) -> &ApploaderHeader {
ApploaderHeader::ref_from_prefix(&self.raw_apploader).unwrap()
}
/// A view into the file system table (FST).
pub fn fst(&self) -> Result<Fst, &'static str> { Fst::new(&self.raw_fst) }
/// A view into the DOL header.
pub fn dol_header(&self) -> &DolHeader { DolHeader::ref_from_prefix(&self.raw_dol).unwrap() }
/// A view into the ticket. (Wii only)
pub fn ticket(&self) -> Option<&Ticket> {
self.raw_ticket.as_ref().and_then(|v| Ticket::ref_from(v))
}
/// A view into the TMD. (Wii only)
pub fn tmd_header(&self) -> Option<&TmdHeader> {
self.raw_tmd.as_ref().and_then(|v| TmdHeader::ref_from_prefix(v))
}
}
/// The size of a single-layer MiniDVD. (1.4 GB)
///
/// GameCube games and some third-party Wii discs (Datel) use this format.
pub const MINI_DVD_SIZE: u64 = 1_459_978_240;
/// The size of a single-layer DVD. (4.7 GB)
///
/// The vast majority of Wii games use this format.
pub const SL_DVD_SIZE: u64 = 4_699_979_776;
/// The size of a dual-layer DVD. (8.5 GB)
///
/// A few larger Wii games use this format.
/// (Super Smash Bros. Brawl, Metroid Prime Trilogy, etc.)
pub const DL_DVD_SIZE: u64 = 8_511_160_320;

647
nod/src/disc/preloader.rs Normal file
View File

@ -0,0 +1,647 @@
use std::{
collections::HashMap,
fmt::{Display, Formatter},
io,
num::NonZeroUsize,
sync::{Arc, Mutex},
thread::JoinHandle,
time::{Duration, Instant},
};
use bytes::{Bytes, BytesMut};
use crossbeam_channel::{Receiver, Sender};
use crossbeam_utils::sync::WaitGroup;
use lru::LruCache;
use polonius_the_crab::{polonius, polonius_return};
use simple_moving_average::{SMA, SingleSumSMA};
use tracing::{Level, debug, error, instrument, span};
use zerocopy::FromZeros;
use crate::{
IoResultContext,
common::PartitionInfo,
disc::{
DiscHeader, SECTOR_GROUP_SIZE, SECTOR_SIZE,
hashes::{GroupHashes, hash_sector_group},
wii::HASHES_SIZE,
},
io::{
block::{Block, BlockKind, BlockReader},
wia::WIAException,
},
read::PartitionEncryption,
util::{
aes::{decrypt_sector, encrypt_sector},
array_ref_mut,
},
};
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct SectorGroupRequest {
pub group_idx: u32,
pub partition_idx: Option<u8>,
pub mode: PartitionEncryption,
pub force_rehash: bool,
}
impl Display for SectorGroupRequest {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self.partition_idx {
Some(idx) => write!(f, "Partition {} group {}", idx, self.group_idx),
None => write!(f, "Group {}", self.group_idx),
}
}
}
#[derive(Clone)]
pub struct SectorGroup {
pub request: SectorGroupRequest,
pub start_sector: u32,
pub data: Bytes,
pub sector_bitmap: u64,
pub io_duration: Option<Duration>,
#[allow(unused)] // TODO WIA hash exceptions
pub group_hashes: Option<Arc<GroupHashes>>,
}
impl SectorGroup {
/// Calculate the number of consecutive sectors starting from `start`.
#[inline]
pub fn consecutive_sectors(&self, start: u32) -> u32 {
(self.sector_bitmap >> start).trailing_ones()
}
}
pub type SectorGroupResult = io::Result<SectorGroup>;
#[allow(unused)]
pub struct Preloader {
request_tx: Sender<SectorGroupRequest>,
request_rx: Receiver<SectorGroupRequest>,
stat_tx: Sender<PreloaderThreadStats>,
stat_rx: Receiver<PreloaderThreadStats>,
threads: Mutex<PreloaderThreads>,
cache: Arc<Mutex<PreloaderCache>>,
// Fallback single-threaded loader
loader: Mutex<SectorGroupLoader>,
}
#[allow(unused)]
struct PreloaderThreads {
join_handles: Vec<JoinHandle<()>>,
last_adjust: Instant,
num_samples: usize,
wait_time_avg: SingleSumSMA<Duration, u32, 100>,
req_time_avg: SingleSumSMA<Duration, u32, 100>,
io_time_avg: SingleSumSMA<Duration, u32, 100>,
}
impl PreloaderThreads {
fn new(join_handles: Vec<JoinHandle<()>>) -> Self {
Self {
join_handles,
last_adjust: Instant::now(),
num_samples: 0,
wait_time_avg: SingleSumSMA::<_, _, 100>::from_zero(Duration::default()),
req_time_avg: SingleSumSMA::<_, _, 100>::from_zero(Duration::default()),
io_time_avg: SingleSumSMA::<_, _, 100>::from_zero(Duration::default()),
}
}
fn push_stats(&mut self, stat: PreloaderThreadStats, _outer: &Preloader) {
self.wait_time_avg.add_sample(stat.wait_time);
self.req_time_avg.add_sample(stat.req_time);
self.io_time_avg.add_sample(stat.io_time);
self.num_samples += 1;
if self.num_samples % 100 == 0 {
let avg_wait = self.wait_time_avg.get_average();
let avg_req = self.req_time_avg.get_average();
let avg_io = self.io_time_avg.get_average();
let utilization =
avg_req.as_secs_f64() / (avg_req.as_secs_f64() + avg_wait.as_secs_f64());
let io_time = avg_io.as_secs_f64() / avg_req.as_secs_f64();
debug!(
"Preloader stats: count {}, wait: {:?}, req: {:?}, util: {:.2}%, io: {:.2}%",
self.num_samples,
avg_wait,
avg_req,
utilization * 100.0,
io_time * 100.0
);
// if self.last_adjust.elapsed() > Duration::from_secs(2) {
// if utilization > 0.9 && io_time < 0.1 {
// println!("Preloader is CPU-bound, increasing thread count");
// let id = self.join_handles.len();
// self.join_handles.push(preloader_thread(
// id,
// outer.request_rx.clone(),
// outer.cache.clone(),
// outer.loader.lock().unwrap().clone(),
// outer.stat_tx.clone(),
// ));
// self.last_adjust = Instant::now();
// } /*else if io_time > 0.9 {
// println!("Preloader is I/O-bound, decreasing thread count");
// if self.join_handles.len() > 1 {
// let handle = self.join_handles.pop().unwrap();
//
// }
// }*/
// }
}
}
}
struct PreloaderCache {
inflight: HashMap<SectorGroupRequest, WaitGroup>,
lru_cache: LruCache<SectorGroupRequest, SectorGroup>,
}
impl Default for PreloaderCache {
fn default() -> Self {
Self {
inflight: Default::default(),
lru_cache: LruCache::new(NonZeroUsize::new(64).unwrap()),
}
}
}
impl PreloaderCache {
fn push(&mut self, request: SectorGroupRequest, group: SectorGroup) {
self.lru_cache.push(request, group);
self.inflight.remove(&request);
}
fn remove(&mut self, request: &SectorGroupRequest) { self.inflight.remove(request); }
fn contains(&self, request: &SectorGroupRequest) -> bool {
self.lru_cache.contains(request) || self.inflight.contains_key(request)
}
}
#[allow(unused)]
struct PreloaderThreadStats {
thread_id: usize,
wait_time: Duration,
req_time: Duration,
io_time: Duration,
}
fn preloader_thread(
thread_id: usize,
request_rx: Receiver<SectorGroupRequest>,
cache: Arc<Mutex<PreloaderCache>>,
mut loader: SectorGroupLoader,
stat_tx: Sender<PreloaderThreadStats>,
) -> JoinHandle<()> {
std::thread::Builder::new()
.name(format!("Preloader {thread_id}"))
.spawn(move || {
let mut last_request_end: Option<Instant> = None;
while let Ok(request) = request_rx.recv() {
let wait_time = if let Some(last_request) = last_request_end {
last_request.elapsed()
} else {
Duration::default()
};
let start = Instant::now();
let mut io_time = Duration::default();
match loader.load(request) {
Ok(group) => {
let Ok(mut cache_guard) = cache.lock() else {
break;
};
io_time = group.io_duration.unwrap_or_default();
cache_guard.push(request, group);
}
Err(_) => {
let Ok(mut cache_guard) = cache.lock() else {
break;
};
// Just drop the request if it failed
cache_guard.remove(&request);
}
}
let end = Instant::now();
last_request_end = Some(end);
let req_time = end - start;
if stat_tx
.send(PreloaderThreadStats { thread_id, wait_time, req_time, io_time })
.is_err()
{
break;
}
}
})
.expect("Failed to spawn preloader thread")
}
impl Preloader {
pub fn new(loader: SectorGroupLoader, num_threads: usize) -> Arc<Self> {
debug!("Creating preloader with {} threads", num_threads);
let (request_tx, request_rx) = crossbeam_channel::unbounded();
let (stat_tx, stat_rx) = crossbeam_channel::unbounded();
let cache = Arc::new(Mutex::new(PreloaderCache::default()));
let mut join_handles = Vec::with_capacity(num_threads);
for i in 0..num_threads {
join_handles.push(preloader_thread(
i,
request_rx.clone(),
cache.clone(),
loader.clone(),
stat_tx.clone(),
));
}
let threads = Mutex::new(PreloaderThreads::new(join_handles));
let loader = Mutex::new(loader);
Arc::new(Self { request_tx, request_rx, stat_tx, stat_rx, threads, cache, loader })
}
#[allow(unused)]
pub fn shutdown(self) {
let guard = self.threads.into_inner().unwrap();
for handle in guard.join_handles {
handle.join().unwrap();
}
}
#[instrument(name = "Preloader::fetch", skip_all)]
pub fn fetch(&self, request: SectorGroupRequest, max_groups: u32) -> SectorGroupResult {
let num_threads = {
let mut threads_guard = self.threads.lock().map_err(map_poisoned)?;
while let Ok(stat) = self.stat_rx.try_recv() {
threads_guard.push_stats(stat, self);
}
threads_guard.join_handles.len()
};
let mut cache_guard = self.cache.lock().map_err(map_poisoned)?;
// Preload n groups ahead
for i in 0..num_threads as u32 {
let group_idx = request.group_idx + i;
if group_idx >= max_groups {
break;
}
let request = SectorGroupRequest { group_idx, ..request };
if cache_guard.contains(&request) {
continue;
}
if self.request_tx.send(request).is_ok() {
cache_guard.inflight.insert(request, WaitGroup::new());
}
}
if let Some(cached) = cache_guard.lru_cache.get(&request) {
return Ok(cached.clone());
}
if let Some(wg) = cache_guard.inflight.get(&request) {
// Wait for inflight request to finish
let wg = wg.clone();
drop(cache_guard);
{
let _span = span!(Level::TRACE, "wg.wait").entered();
wg.wait();
}
let mut cache_guard = self.cache.lock().map_err(map_poisoned)?;
if let Some(cached) = cache_guard.lru_cache.get(&request) {
return Ok(cached.clone());
}
} else {
drop(cache_guard);
}
// No threads are running, fallback to single-threaded loader
let result = {
let mut loader = self.loader.lock().map_err(map_poisoned)?;
loader.load(request)
};
match result {
Ok(group) => {
let mut cache_guard = self.cache.lock().map_err(map_poisoned)?;
cache_guard.push(request, group.clone());
Ok(group)
}
Err(e) => Err(e),
}
}
}
#[inline]
fn map_poisoned<T>(_: std::sync::PoisonError<T>) -> io::Error { io::Error::other("Mutex poisoned") }
pub struct SectorGroupLoader {
io: Box<dyn BlockReader>,
disc_header: Arc<DiscHeader>,
partitions: Arc<[PartitionInfo]>,
block: Block,
block_buf: Box<[u8]>,
}
impl Clone for SectorGroupLoader {
fn clone(&self) -> Self {
let block_size = self.io.block_size() as usize;
Self {
io: self.io.clone(),
disc_header: self.disc_header.clone(),
partitions: self.partitions.clone(),
block: Block::default(),
block_buf: <[u8]>::new_box_zeroed_with_elems(block_size).unwrap(),
}
}
}
#[derive(Default)]
struct LoadedSectorGroup {
/// Start sector of the group
start_sector: u32,
/// Bitmap of sectors that were read
sector_bitmap: u64,
/// Total duration of I/O operations
io_duration: Option<Duration>,
/// Calculated sector group hashes
group_hashes: Option<Arc<GroupHashes>>,
}
impl SectorGroupLoader {
pub fn new(
io: Box<dyn BlockReader>,
disc_header: Arc<DiscHeader>,
partitions: Arc<[PartitionInfo]>,
) -> Self {
let block_buf = <[u8]>::new_box_zeroed_with_elems(io.block_size() as usize).unwrap();
Self { io, disc_header, partitions, block: Block::default(), block_buf }
}
#[instrument(name = "SectorGroupLoader::load", skip_all)]
pub fn load(&mut self, request: SectorGroupRequest) -> SectorGroupResult {
let mut sector_group_buf = BytesMut::zeroed(SECTOR_GROUP_SIZE);
let out = array_ref_mut![sector_group_buf, 0, SECTOR_GROUP_SIZE];
let LoadedSectorGroup { start_sector, sector_bitmap, io_duration, group_hashes } =
if request.partition_idx.is_some() {
self.load_partition_group(request, out)?
} else {
self.load_raw_group(request, out)?
};
Ok(SectorGroup {
request,
start_sector,
data: sector_group_buf.freeze(),
sector_bitmap,
io_duration,
group_hashes,
})
}
/// Load a sector group from a partition.
///
/// This will handle encryption, decryption, and hash recovery as needed.
fn load_partition_group(
&mut self,
request: SectorGroupRequest,
sector_group_buf: &mut [u8; SECTOR_GROUP_SIZE],
) -> io::Result<LoadedSectorGroup> {
let Some(partition) =
request.partition_idx.and_then(|idx| self.partitions.get(idx as usize))
else {
return Ok(LoadedSectorGroup::default());
};
let abs_group_sector = partition.data_start_sector + request.group_idx * 64;
if abs_group_sector >= partition.data_end_sector {
return Ok(LoadedSectorGroup::default());
}
// Bitmap of sectors that were read
let mut sector_bitmap = 0u64;
// Bitmap of sectors that are decrypted
let mut decrypted_sectors = 0u64;
// Bitmap of sectors that need hash recovery
let mut hash_recovery_sectors = 0u64;
// Hash exceptions
let mut hash_exceptions = Vec::<WIAException>::new();
// Total duration of I/O operations
let mut io_duration = None;
// Calculated sector group hashes
let mut group_hashes = None;
// Read sector group
for sector in 0..64 {
let sector_data =
array_ref_mut![sector_group_buf, sector as usize * SECTOR_SIZE, SECTOR_SIZE];
let abs_sector = abs_group_sector + sector;
if abs_sector >= partition.data_end_sector {
// Already zeroed
decrypted_sectors |= 1 << sector;
hash_recovery_sectors |= 1 << sector;
continue;
}
// Read new block
if !self.block.contains(abs_sector) {
self.block = self
.io
.read_block(self.block_buf.as_mut(), abs_sector)
.io_with_context(|| format!("Reading block for sector {abs_sector}"))?;
if let Some(duration) = self.block.io_duration {
*io_duration.get_or_insert_with(Duration::default) += duration;
}
if self.block.kind == BlockKind::None {
error!("Failed to read block for sector {}", abs_sector);
break;
}
}
// Add hash exceptions
self.block
.append_hash_exceptions(abs_sector, sector, &mut hash_exceptions)
.io_with_context(|| format!("Appending hash exceptions for sector {abs_sector}"))?;
// Read new sector into buffer
let (encrypted, has_hashes) = self
.block
.copy_sector(
sector_data,
self.block_buf.as_mut(),
abs_sector,
partition.disc_header(),
Some(partition),
)
.io_with_context(|| format!("Copying sector {abs_sector} from block"))?;
if !encrypted {
decrypted_sectors |= 1 << sector;
}
if !has_hashes && partition.has_hashes {
hash_recovery_sectors |= 1 << sector;
}
sector_bitmap |= 1 << sector;
}
// Recover hashes
if request.force_rehash
|| (request.mode != PartitionEncryption::ForceDecryptedNoHashes
&& hash_recovery_sectors != 0)
{
// Decrypt any encrypted sectors
if decrypted_sectors != u64::MAX {
for sector in 0..64 {
let sector_data =
array_ref_mut![sector_group_buf, sector * SECTOR_SIZE, SECTOR_SIZE];
if (decrypted_sectors >> sector) & 1 == 0 {
decrypt_sector(sector_data, &partition.key);
}
}
decrypted_sectors = u64::MAX;
}
// Recover hashes
let hashes = hash_sector_group(sector_group_buf, request.force_rehash);
// Apply hashes
for sector in 0..64 {
let sector_data =
array_ref_mut![sector_group_buf, sector * SECTOR_SIZE, SECTOR_SIZE];
if (hash_recovery_sectors >> sector) & 1 == 1 {
hashes.apply(sector_data, sector);
}
}
// Persist hashes
group_hashes = Some(Arc::from(hashes));
}
// Apply hash exceptions
if request.mode != PartitionEncryption::ForceDecryptedNoHashes
&& !hash_exceptions.is_empty()
{
for exception in hash_exceptions {
let offset = exception.offset.get();
let sector = offset / HASHES_SIZE as u16;
// Decrypt sector if needed
let sector_data =
array_ref_mut![sector_group_buf, sector as usize * SECTOR_SIZE, SECTOR_SIZE];
if (decrypted_sectors >> sector) & 1 == 0 {
decrypt_sector(sector_data, &partition.key);
decrypted_sectors |= 1 << sector;
}
let sector_offset = (offset - (sector * HASHES_SIZE as u16)) as usize;
*array_ref_mut![sector_data, sector_offset, 20] = exception.hash;
}
}
// Encrypt/decrypt sectors
if match request.mode {
PartitionEncryption::Original => partition.has_encryption,
PartitionEncryption::ForceEncrypted => true,
PartitionEncryption::ForceDecrypted | PartitionEncryption::ForceDecryptedNoHashes => {
false
}
} {
// Encrypt any decrypted sectors
if decrypted_sectors != 0 {
for sector in 0..64 {
let sector_data = array_ref_mut![
sector_group_buf,
sector as usize * SECTOR_SIZE,
SECTOR_SIZE
];
if (decrypted_sectors >> sector) & 1 == 1 {
encrypt_sector(sector_data, &partition.key);
}
}
}
} else if decrypted_sectors != u64::MAX {
// Decrypt any encrypted sectors
for sector in 0..64 {
let sector_data =
array_ref_mut![sector_group_buf, sector as usize * SECTOR_SIZE, SECTOR_SIZE];
if (decrypted_sectors >> sector) & 1 == 0 {
decrypt_sector(sector_data, &partition.key);
}
}
}
Ok(LoadedSectorGroup {
start_sector: abs_group_sector,
sector_bitmap,
io_duration,
group_hashes,
})
}
/// Loads a non-partition sector group.
fn load_raw_group(
&mut self,
request: SectorGroupRequest,
sector_group_buf: &mut [u8; SECTOR_GROUP_SIZE],
) -> io::Result<LoadedSectorGroup> {
let abs_group_sector = request.group_idx * 64;
// Bitmap of sectors that were read
let mut sector_bitmap = 0u64;
// Total duration of I/O operations
let mut io_duration = None;
for sector in 0..64 {
let sector_data =
array_ref_mut![sector_group_buf, sector as usize * SECTOR_SIZE, SECTOR_SIZE];
let abs_sector = abs_group_sector + sector;
if self.partitions.iter().any(|p| p.data_contains_sector(abs_sector)) {
continue;
}
// Read new block
if !self.block.contains(abs_sector) {
self.block = self
.io
.read_block(self.block_buf.as_mut(), abs_sector)
.io_with_context(|| format!("Reading block for sector {abs_sector}"))?;
if let Some(duration) = self.block.io_duration {
*io_duration.get_or_insert_with(Duration::default) += duration;
}
if self.block.kind == BlockKind::None {
break;
}
}
// Read new sector into buffer
self.block
.copy_sector(
sector_data,
self.block_buf.as_mut(),
abs_sector,
self.disc_header.as_ref(),
None,
)
.io_with_context(|| format!("Copying sector {abs_sector} from block"))?;
sector_bitmap |= 1 << sector;
}
Ok(LoadedSectorGroup {
start_sector: abs_group_sector,
sector_bitmap,
io_duration,
group_hashes: None,
})
}
}
/// Fetch a sector group from the cache or from the preloader.
/// Returns a boolean indicating if the group was updated.
pub fn fetch_sector_group<'a>(
request: SectorGroupRequest,
max_groups: u32,
mut cached: &'a mut Option<SectorGroup>,
preloader: &Preloader,
) -> io::Result<(&'a SectorGroup, bool)> {
polonius!(|cached| -> io::Result<(&'polonius SectorGroup, bool)> {
if let Some(sector_group) = cached {
if sector_group.request == request {
polonius_return!(Ok((sector_group, false)));
}
}
});
let sector_group = preloader.fetch(request, max_groups)?;
Ok((cached.insert(sector_group), true))
}

View File

@ -1,131 +1,269 @@
use std::{
cmp::min,
io,
io::{Read, Seek, SeekFrom},
io::{BufRead, Seek, SeekFrom},
sync::Arc,
};
use zerocopy::FromZeroes;
use bytes::Bytes;
use polonius_the_crab::{polonius, polonius_return};
use tracing::warn;
use zerocopy::{FromBytes, IntoBytes};
use crate::{
Error, Result, ResultContext,
common::{PartitionInfo, PartitionKind},
disc::{
gcn::PartitionGC,
hashes::{rebuild_hashes, HashTable},
wii::{PartitionWii, WiiPartEntry, WiiPartGroup, WiiPartitionHeader, WII_PART_GROUP_OFF},
DL_DVD_SIZE, MINI_DVD_SIZE, SL_DVD_SIZE,
BB2_OFFSET, BOOT_SIZE, BootHeader, DL_DVD_SIZE, DiscHeader, MINI_DVD_SIZE,
SECTOR_GROUP_SIZE, SECTOR_SIZE, SL_DVD_SIZE,
direct::{DirectDiscReader, DirectDiscReaderMode},
fst::{Fst, NodeKind},
gcn::{PartitionReaderGC, read_fst},
preloader::{
Preloader, SectorGroup, SectorGroupLoader, SectorGroupRequest, fetch_sector_group,
},
wii::{
PartitionReaderWii, REGION_OFFSET, REGION_SIZE, WII_PART_GROUP_OFF, WiiPartEntry,
WiiPartGroup, WiiPartitionHeader,
},
},
io::block::BlockReader,
read::{DiscMeta, DiscOptions, PartitionEncryption, PartitionOptions, PartitionReader},
util::{
array_ref, impl_read_for_bufread,
read::{read_arc, read_from, read_vec},
},
io::block::{Block, BlockIO, PartitionInfo},
util::read::{read_box, read_from, read_vec},
DiscHeader, DiscMeta, Error, OpenOptions, PartitionBase, PartitionHeader, PartitionKind,
Result, ResultContext, SECTOR_SIZE,
};
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
pub enum EncryptionMode {
Encrypted,
Decrypted,
pub struct DiscReader {
io: Box<dyn BlockReader>,
preloader: Arc<Preloader>,
pos: u64,
size: u64,
mode: PartitionEncryption,
raw_boot: Arc<[u8; BOOT_SIZE]>,
alt_disc_header: Option<Arc<DiscHeader>>,
disc_data: DiscReaderData,
sector_group: Option<SectorGroup>,
}
pub struct DiscReader {
io: Box<dyn BlockIO>,
block: Block,
block_buf: Box<[u8]>,
block_idx: u32,
sector_buf: Box<[u8; SECTOR_SIZE]>,
sector_idx: u32,
pos: u64,
mode: EncryptionMode,
disc_header: Box<DiscHeader>,
pub(crate) partitions: Vec<PartitionInfo>,
hash_tables: Vec<HashTable>,
#[derive(Clone)]
enum DiscReaderData {
GameCube {
raw_fst: Option<Arc<[u8]>>,
},
Wii {
partitions: Arc<[PartitionInfo]>,
alt_partitions: Option<Arc<[PartitionInfo]>>,
region: [u8; REGION_SIZE],
},
}
impl Clone for DiscReader {
fn clone(&self) -> Self {
Self {
io: self.io.clone(),
block: Block::default(),
block_buf: <u8>::new_box_slice_zeroed(self.block_buf.len()),
block_idx: u32::MAX,
sector_buf: <[u8; SECTOR_SIZE]>::new_box_zeroed(),
sector_idx: u32::MAX,
preloader: self.preloader.clone(),
pos: 0,
size: self.size,
mode: self.mode,
disc_header: self.disc_header.clone(),
partitions: self.partitions.clone(),
hash_tables: self.hash_tables.clone(),
raw_boot: self.raw_boot.clone(),
alt_disc_header: self.alt_disc_header.clone(),
disc_data: self.disc_data.clone(),
sector_group: None,
}
}
}
impl DiscReader {
pub fn new(inner: Box<dyn BlockIO>, options: &OpenOptions) -> Result<Self> {
let block_size = inner.block_size();
let meta = inner.meta();
let mut reader = Self {
io: inner,
block: Block::default(),
block_buf: <u8>::new_box_slice_zeroed(block_size as usize),
block_idx: u32::MAX,
sector_buf: <[u8; SECTOR_SIZE]>::new_box_zeroed(),
sector_idx: u32::MAX,
pos: 0,
mode: if options.rebuild_encryption {
EncryptionMode::Encrypted
} else {
EncryptionMode::Decrypted
},
disc_header: DiscHeader::new_box_zeroed(),
partitions: vec![],
hash_tables: vec![],
};
let disc_header: Box<DiscHeader> = read_box(&mut reader).context("Reading disc header")?;
reader.disc_header = disc_header;
if reader.disc_header.is_wii() {
reader.partitions = read_partition_info(&mut reader)?;
// Rebuild hashes if the format requires it
if (options.rebuild_encryption || options.validate_hashes) && meta.needs_hash_recovery {
rebuild_hashes(&mut reader)?;
pub fn new(inner: Box<dyn BlockReader>, options: &DiscOptions) -> Result<Self> {
let mut reader = DirectDiscReader::new(inner)?;
let raw_boot: Arc<[u8; BOOT_SIZE]> =
read_arc(reader.as_mut()).context("Reading disc headers")?;
let disc_header = DiscHeader::ref_from_bytes(&raw_boot[..size_of::<DiscHeader>()])
.expect("Invalid disc header alignment");
let disc_header_arc = Arc::from(disc_header.clone());
let mut alt_disc_header = None;
let disc_data = if disc_header.is_wii() {
// Sanity check
if disc_header.has_partition_encryption() && !disc_header.has_partition_hashes() {
return Err(Error::DiscFormat(
"Wii disc is encrypted but has no partition hashes".to_string(),
));
}
if !disc_header.has_partition_hashes()
&& options.partition_encryption == PartitionEncryption::ForceEncrypted
{
return Err(Error::Other(
"Unsupported: Rebuilding encryption for Wii disc without hashes".to_string(),
));
}
// Read region info
reader.seek(SeekFrom::Start(REGION_OFFSET)).context("Seeking to region info")?;
let region: [u8; REGION_SIZE] =
read_from(&mut reader).context("Reading region info")?;
// Read partition info
let partitions = Arc::<[PartitionInfo]>::from(read_partition_info(
&mut reader,
disc_header_arc.clone(),
)?);
let mut alt_partitions = None;
// Update disc header with encryption mode
if matches!(
options.partition_encryption,
PartitionEncryption::ForceDecrypted | PartitionEncryption::ForceEncrypted
) {
let mut disc_header = Box::new(disc_header.clone());
let mut partitions = Box::<[PartitionInfo]>::from(partitions.as_ref());
disc_header.no_partition_encryption = match options.partition_encryption {
PartitionEncryption::ForceDecrypted => 1,
PartitionEncryption::ForceEncrypted => 0,
_ => unreachable!(),
};
for partition in &mut partitions {
partition.has_encryption = disc_header.has_partition_encryption();
}
alt_disc_header = Some(Arc::from(disc_header));
alt_partitions = Some(Arc::from(partitions));
}
DiscReaderData::Wii { partitions, alt_partitions, region }
} else if disc_header.is_gamecube() {
DiscReaderData::GameCube { raw_fst: None }
} else {
return Err(Error::DiscFormat("Invalid disc header".to_string()));
};
// Calculate disc size
let io = reader.into_inner();
let partitions = match &disc_data {
DiscReaderData::Wii { partitions, .. } => partitions,
_ => &Arc::default(),
};
let size = io.meta().disc_size.unwrap_or_else(|| guess_disc_size(partitions));
let preloader = Preloader::new(
SectorGroupLoader::new(io.clone(), disc_header_arc, partitions.clone()),
options.preloader_threads,
);
Ok(Self {
io,
preloader,
pos: 0,
size,
mode: options.partition_encryption,
raw_boot,
disc_data,
sector_group: None,
alt_disc_header,
})
}
#[inline]
pub fn reset(&mut self) { self.pos = 0; }
#[inline]
pub fn position(&self) -> u64 { self.pos }
#[inline]
pub fn disc_size(&self) -> u64 { self.size }
#[inline]
pub fn header(&self) -> &DiscHeader {
self.alt_disc_header.as_deref().unwrap_or_else(|| {
DiscHeader::ref_from_bytes(&self.raw_boot[..size_of::<DiscHeader>()])
.expect("Invalid disc header alignment")
})
}
// #[inline]
// pub fn orig_header(&self) -> &DiscHeader {
// DiscHeader::ref_from_bytes(&self.raw_boot[..size_of::<DiscHeader>()])
// .expect("Invalid disc header alignment")
// }
#[inline]
pub fn region(&self) -> Option<&[u8; REGION_SIZE]> {
match &self.disc_data {
DiscReaderData::Wii { region, .. } => Some(region),
_ => None,
}
reader.reset();
Ok(reader)
}
pub fn reset(&mut self) {
self.block = Block::default();
self.block_buf.fill(0);
self.block_idx = u32::MAX;
self.sector_buf.fill(0);
self.sector_idx = u32::MAX;
self.pos = 0;
#[inline]
pub fn partitions(&self) -> &[PartitionInfo] {
match &self.disc_data {
DiscReaderData::Wii { partitions, alt_partitions, .. } => {
alt_partitions.as_deref().unwrap_or(partitions)
}
_ => &[],
}
}
pub fn disc_size(&self) -> u64 {
self.io.meta().disc_size.unwrap_or_else(|| guess_disc_size(&self.partitions))
#[inline]
pub fn orig_partitions(&self) -> &[PartitionInfo] {
match &self.disc_data {
DiscReaderData::Wii { partitions, .. } => partitions,
_ => &[],
}
}
pub fn header(&self) -> &DiscHeader { &self.disc_header }
/// A reference to the disc's boot header (BB2) for GameCube discs.
/// For Wii discs, use the boot header from the appropriate [PartitionInfo].
#[inline]
pub fn boot_header(&self) -> Option<&BootHeader> {
match &self.disc_data {
DiscReaderData::GameCube { .. } => Some(
BootHeader::ref_from_bytes(array_ref![
self.raw_boot,
BB2_OFFSET,
size_of::<BootHeader>()
])
.expect("Invalid boot header alignment"),
),
_ => None,
}
}
pub fn partitions(&self) -> &[PartitionInfo] { &self.partitions }
/// A reference to the raw FST for GameCube discs.
/// For Wii discs, use the FST from the appropriate [PartitionInfo].
#[inline]
pub fn fst(&self) -> Option<Fst<'_>> {
match &self.disc_data {
DiscReaderData::GameCube { raw_fst } => {
raw_fst.as_deref().and_then(|v| Fst::new(v).ok())
}
_ => None,
}
}
#[inline]
pub fn meta(&self) -> DiscMeta { self.io.meta() }
/// Opens a new, decrypted partition read stream for the specified partition index.
pub fn open_partition(
&self,
index: usize,
options: &OpenOptions,
) -> Result<Box<dyn PartitionBase>> {
if self.disc_header.is_gamecube() {
if index == 0 {
Ok(PartitionGC::new(self.io.clone(), self.disc_header.clone())?)
} else {
Err(Error::DiscFormat("GameCube discs only have one partition".to_string()))
options: &PartitionOptions,
) -> Result<Box<dyn PartitionReader>> {
match &self.disc_data {
DiscReaderData::GameCube { .. } => {
if index == 0 {
Ok(PartitionReaderGC::new(self.preloader.clone(), self.disc_size())?)
} else {
Err(Error::DiscFormat("GameCube discs only have one partition".to_string()))
}
}
DiscReaderData::Wii { partitions, .. } => {
if let Some(part) = partitions.get(index) {
Ok(PartitionReaderWii::new(self.preloader.clone(), part, options)?)
} else {
Err(Error::DiscFormat(format!("Partition {index} not found")))
}
}
} else if let Some(part) = self.partitions.get(index) {
Ok(PartitionWii::new(self.io.clone(), self.disc_header.clone(), part, options)?)
} else {
Err(Error::DiscFormat(format!("Partition {index} not found")))
}
}
@ -134,78 +272,139 @@ impl DiscReader {
pub fn open_partition_kind(
&self,
kind: PartitionKind,
options: &OpenOptions,
) -> Result<Box<dyn PartitionBase>> {
if self.disc_header.is_gamecube() {
if kind == PartitionKind::Data {
Ok(PartitionGC::new(self.io.clone(), self.disc_header.clone())?)
} else {
Err(Error::DiscFormat("GameCube discs only have a data partition".to_string()))
options: &PartitionOptions,
) -> Result<Box<dyn PartitionReader>> {
match &self.disc_data {
DiscReaderData::GameCube { .. } => {
if kind == PartitionKind::Data {
Ok(PartitionReaderGC::new(self.preloader.clone(), self.disc_size())?)
} else {
Err(Error::DiscFormat("GameCube discs only have a data partition".to_string()))
}
}
DiscReaderData::Wii { partitions, .. } => {
if let Some(part) = partitions.iter().find(|v| v.kind == kind) {
Ok(PartitionReaderWii::new(self.preloader.clone(), part, options)?)
} else {
Err(Error::DiscFormat(format!("Partition type {kind} not found")))
}
}
} else if let Some(part) = self.partitions.iter().find(|v| v.kind == kind) {
Ok(PartitionWii::new(self.io.clone(), self.disc_header.clone(), part, options)?)
} else {
Err(Error::DiscFormat(format!("Partition type {kind} not found")))
}
}
}
impl Read for DiscReader {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let block_idx = (self.pos / self.block_buf.len() as u64) as u32;
let abs_sector = (self.pos / SECTOR_SIZE as u64) as u32;
let partition = if self.disc_header.is_wii() {
self.partitions.iter().find(|part| {
abs_sector >= part.data_start_sector && abs_sector < part.data_end_sector
})
pub fn load_sector_group(
&mut self,
abs_sector: u32,
force_rehash: bool,
) -> io::Result<(&SectorGroup, bool)> {
let (request, max_groups) = if let Some(partition) =
self.orig_partitions().iter().find(|part| part.data_contains_sector(abs_sector))
{
let group_idx = (abs_sector - partition.data_start_sector) / 64;
let max_groups = (partition.data_end_sector - partition.data_start_sector).div_ceil(64);
let request = SectorGroupRequest {
group_idx,
partition_idx: Some(partition.index as u8),
mode: self.mode,
force_rehash,
};
(request, max_groups)
} else {
None
let group_idx = abs_sector / 64;
let max_groups = self.size.div_ceil(SECTOR_GROUP_SIZE as u64) as u32;
let request = SectorGroupRequest {
group_idx,
partition_idx: None,
mode: self.mode,
force_rehash,
};
(request, max_groups)
};
// Read new block
if block_idx != self.block_idx {
self.block = self.io.read_block(self.block_buf.as_mut(), block_idx, partition)?;
self.block_idx = block_idx;
// Load sector group
let (sector_group, updated) =
fetch_sector_group(request, max_groups, &mut self.sector_group, &self.preloader)?;
Ok((sector_group, updated))
}
pub fn fill_buf_internal(&mut self) -> io::Result<Bytes> {
let pos = self.pos;
let size = self.size;
if pos >= size {
return Ok(Bytes::new());
}
// Read new sector into buffer
if abs_sector != self.sector_idx {
if let Some(partition) = partition {
match self.mode {
EncryptionMode::Decrypted => self.block.decrypt(
self.sector_buf.as_mut(),
self.block_buf.as_ref(),
abs_sector,
partition,
)?,
EncryptionMode::Encrypted => self.block.encrypt(
self.sector_buf.as_mut(),
self.block_buf.as_ref(),
abs_sector,
partition,
)?,
}
} else {
self.block.copy_raw(
self.sector_buf.as_mut(),
self.block_buf.as_ref(),
abs_sector,
&self.disc_header,
)?;
// Read from modified disc header
if pos < size_of::<DiscHeader>() as u64 {
if let Some(alt_disc_header) = &self.alt_disc_header {
return Ok(Bytes::copy_from_slice(&alt_disc_header.as_bytes()[pos as usize..]));
}
self.sector_idx = abs_sector;
}
// Read from sector buffer
let offset = (self.pos % SECTOR_SIZE as u64) as usize;
let len = min(buf.len(), SECTOR_SIZE - offset);
buf[..len].copy_from_slice(&self.sector_buf[offset..offset + len]);
self.pos += len as u64;
Ok(len)
// Load sector group
let abs_sector = (pos / SECTOR_SIZE as u64) as u32;
let (sector_group, _updated) = self.load_sector_group(abs_sector, false)?;
// Calculate the number of consecutive sectors in the group
let group_sector = abs_sector - sector_group.start_sector;
let consecutive_sectors = sector_group.consecutive_sectors(group_sector);
if consecutive_sectors == 0 {
return Ok(Bytes::new());
}
let num_sectors = group_sector + consecutive_sectors;
// Read from sector group buffer
let group_start = sector_group.start_sector as u64 * SECTOR_SIZE as u64;
let offset = (pos - group_start) as usize;
let end = (num_sectors as u64 * SECTOR_SIZE as u64).min(size - group_start) as usize;
Ok(sector_group.data.slice(offset..end))
}
}
impl BufRead for DiscReader {
fn fill_buf(&mut self) -> io::Result<&[u8]> {
let pos = self.pos;
let size = self.size;
if pos >= size {
return Ok(&[]);
}
let mut this = self;
polonius!(|this| -> io::Result<&'polonius [u8]> {
// Read from modified disc header
if pos < size_of::<DiscHeader>() as u64 {
if let Some(alt_disc_header) = &this.alt_disc_header {
polonius_return!(Ok(&alt_disc_header.as_bytes()[pos as usize..]));
}
}
});
// Load sector group
let abs_sector = (pos / SECTOR_SIZE as u64) as u32;
let (sector_group, _updated) = this.load_sector_group(abs_sector, false)?;
// Calculate the number of consecutive sectors in the group
let group_sector = abs_sector - sector_group.start_sector;
let consecutive_sectors = sector_group.consecutive_sectors(group_sector);
if consecutive_sectors == 0 {
return Ok(&[]);
}
let num_sectors = group_sector + consecutive_sectors;
// Read from sector group buffer
let group_start = sector_group.start_sector as u64 * SECTOR_SIZE as u64;
let offset = (pos - group_start) as usize;
let end = (num_sectors as u64 * SECTOR_SIZE as u64).min(size - group_start) as usize;
Ok(&sector_group.data[offset..end])
}
#[inline]
fn consume(&mut self, amt: usize) { self.pos += amt as u64; }
}
impl_read_for_bufread!(DiscReader);
impl Seek for DiscReader {
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
self.pos = match pos {
@ -222,7 +421,10 @@ impl Seek for DiscReader {
}
}
fn read_partition_info(reader: &mut DiscReader) -> Result<Vec<PartitionInfo>> {
fn read_partition_info(
reader: &mut DirectDiscReader,
disc_header: Arc<DiscHeader>,
) -> Result<Vec<PartitionInfo>> {
reader.seek(SeekFrom::Start(WII_PART_GROUP_OFF)).context("Seeking to partition groups")?;
let part_groups: [WiiPartGroup; 4] = read_from(reader).context("Reading partition groups")?;
let mut part_info = Vec::new();
@ -241,7 +443,7 @@ fn read_partition_info(reader: &mut DiscReader) -> Result<Vec<PartitionInfo>> {
reader
.seek(SeekFrom::Start(offset))
.with_context(|| format!("Seeking to partition data {group_idx}:{part_idx}"))?;
let header: Box<WiiPartitionHeader> = read_box(reader)
let header: Arc<WiiPartitionHeader> = read_arc(reader)
.with_context(|| format!("Reading partition header {group_idx}:{part_idx}"))?;
let key = header.ticket.decrypt_title_key()?;
@ -251,8 +453,10 @@ fn read_partition_info(reader: &mut DiscReader) -> Result<Vec<PartitionInfo>> {
"Partition {group_idx}:{part_idx} offset is not sector aligned",
)));
}
let data_start_offset = entry.offset() + header.data_off();
let data_end_offset = data_start_offset + header.data_size();
let data_size = header.data_size();
let data_end_offset = data_start_offset + data_size;
if data_start_offset % SECTOR_SIZE as u64 != 0
|| data_end_offset % SECTOR_SIZE as u64 != 0
{
@ -260,30 +464,71 @@ fn read_partition_info(reader: &mut DiscReader) -> Result<Vec<PartitionInfo>> {
"Partition {group_idx}:{part_idx} data is not sector aligned",
)));
}
let mut info = PartitionInfo {
let start_sector = (start_offset / SECTOR_SIZE as u64) as u32;
let data_start_sector = (data_start_offset / SECTOR_SIZE as u64) as u32;
let mut data_end_sector = (data_end_offset / SECTOR_SIZE as u64) as u32;
reader.reset(DirectDiscReaderMode::Partition {
disc_header: disc_header.clone(),
data_start_sector,
key,
});
let raw_boot: Arc<[u8; BOOT_SIZE]> = read_arc(reader).context("Reading boot data")?;
let partition_disc_header =
DiscHeader::ref_from_bytes(array_ref![raw_boot, 0, size_of::<DiscHeader>()])
.expect("Invalid disc header alignment");
let boot_header = BootHeader::ref_from_bytes(&raw_boot[BB2_OFFSET..])
.expect("Invalid boot header alignment");
let raw_fst = if partition_disc_header.is_wii() {
let raw_fst = read_fst(reader, boot_header, true)?;
match Fst::new(&raw_fst) {
Ok(fst) => {
let max_fst_offset = fst
.nodes
.iter()
.filter_map(|n| match n.kind() {
NodeKind::File => Some(n.offset(true) + n.length() as u64),
_ => None,
})
.max()
.unwrap_or(0);
if max_fst_offset > data_size {
if data_size == 0 {
// Guess data size for decrypted partitions
data_end_sector =
max_fst_offset.div_ceil(SECTOR_SIZE as u64) as u32;
} else {
return Err(Error::DiscFormat(format!(
"Partition {group_idx}:{part_idx} FST exceeds data size",
)));
}
}
Some(raw_fst)
}
Err(e) => {
warn!("Partition {group_idx}:{part_idx} FST is not valid: {e}");
None
}
}
} else {
warn!("Partition {group_idx}:{part_idx} is not valid");
None
};
reader.reset(DirectDiscReaderMode::Raw);
part_info.push(PartitionInfo {
index: part_info.len(),
kind: entry.kind.get().into(),
start_sector: (start_offset / SECTOR_SIZE as u64) as u32,
data_start_sector: (data_start_offset / SECTOR_SIZE as u64) as u32,
data_end_sector: (data_end_offset / SECTOR_SIZE as u64) as u32,
start_sector,
data_start_sector,
data_end_sector,
key,
header,
disc_header: DiscHeader::new_box_zeroed(),
partition_header: PartitionHeader::new_box_zeroed(),
hash_table: None,
};
let mut partition_reader = PartitionWii::new(
reader.io.clone(),
reader.disc_header.clone(),
&info,
&OpenOptions::default(),
)?;
info.disc_header = read_box(&mut partition_reader).context("Reading disc header")?;
info.partition_header =
read_box(&mut partition_reader).context("Reading partition header")?;
part_info.push(info);
has_encryption: disc_header.has_partition_encryption(),
has_hashes: disc_header.has_partition_hashes(),
raw_boot,
raw_fst,
});
}
}
Ok(part_info)
@ -292,18 +537,9 @@ fn read_partition_info(reader: &mut DiscReader) -> Result<Vec<PartitionInfo>> {
fn guess_disc_size(part_info: &[PartitionInfo]) -> u64 {
let max_offset = part_info
.iter()
.flat_map(|v| {
let offset = v.start_sector as u64 * SECTOR_SIZE as u64;
[
offset + v.header.tmd_off() + v.header.tmd_size(),
offset + v.header.cert_chain_off() + v.header.cert_chain_size(),
offset + v.header.h3_table_off() + v.header.h3_table_size(),
offset + v.header.data_off() + v.header.data_size(),
]
})
.map(|v| v.data_end_sector as u64 * SECTOR_SIZE as u64)
.max()
.unwrap_or(0x50000);
// TODO add FST offsets (decrypted partitions)
if max_offset <= MINI_DVD_SIZE && !part_info.iter().any(|v| v.kind == PartitionKind::Data) {
// Datel disc
MINI_DVD_SIZE

View File

@ -1,42 +1,53 @@
//! Wii disc types.
use std::{
cmp::min,
ffi::CStr,
io,
io::{Read, Seek, SeekFrom},
io::{BufRead, Seek, SeekFrom},
mem::size_of,
sync::Arc,
};
use sha1::{Digest, Sha1};
use zerocopy::{big_endian::*, AsBytes, FromBytes, FromZeroes};
use zerocopy::{FromBytes, Immutable, IntoBytes, KnownLayout, big_endian::*};
use crate::{
array_ref,
Error, Result, ResultContext,
common::{HashBytes, KeyBytes, PartitionInfo},
disc::{
gcn::{read_part_meta, PartitionGC},
PartitionBase, PartitionMeta, SECTOR_SIZE,
SECTOR_GROUP_SIZE, SECTOR_SIZE,
gcn::{PartitionReaderGC, read_part_meta},
preloader::{Preloader, SectorGroup, SectorGroupRequest, fetch_sector_group},
},
fst::{Node, NodeKind},
io::{
aes_decrypt,
block::{Block, BlockIO, PartitionInfo},
KeyBytes,
read::{PartitionEncryption, PartitionMeta, PartitionOptions, PartitionReader},
util::{
aes::aes_cbc_decrypt,
array_ref,
digest::sha1_hash,
div_rem, impl_read_for_bufread,
read::{read_arc, read_arc_slice},
static_assert,
},
static_assert,
streams::{ReadStream, SharedWindowedReadStream},
util::{div_rem, read::read_box_slice},
DiscHeader, Error, OpenOptions, Result, ResultContext,
};
/// Size in bytes of the hashes block in a Wii disc sector
pub(crate) const HASHES_SIZE: usize = 0x400;
pub const HASHES_SIZE: usize = 0x400;
/// Size in bytes of the data block in a Wii disc sector (excluding hashes)
pub(crate) const SECTOR_DATA_SIZE: usize = SECTOR_SIZE - HASHES_SIZE; // 0x7C00
pub const SECTOR_DATA_SIZE: usize = SECTOR_SIZE - HASHES_SIZE; // 0x7C00
/// Size in bytes of the disc region info (region.bin)
pub const REGION_SIZE: usize = 0x20;
/// Size in bytes of the H3 table (h3.bin)
pub const H3_TABLE_SIZE: usize = 0x18000;
/// Offset of the disc region info
pub const REGION_OFFSET: u64 = 0x4E000;
// ppki (Retail)
const RVL_CERT_ISSUER_PPKI_TICKET: &str = "Root-CA00000001-XS00000003";
pub(crate) const RVL_CERT_ISSUER_PPKI_TICKET: &str = "Root-CA00000001-XS00000003";
#[rustfmt::skip]
const RETAIL_COMMON_KEYS: [KeyBytes; 3] = [
pub(crate) static RETAIL_COMMON_KEYS: [KeyBytes; 3] = [
/* RVL_KEY_RETAIL */
[0xeb, 0xe4, 0x2a, 0x22, 0x5e, 0x85, 0x93, 0xe4, 0x48, 0xd9, 0xc5, 0x45, 0x73, 0x81, 0xaa, 0xf7],
/* RVL_KEY_KOREAN */
@ -46,9 +57,9 @@ const RETAIL_COMMON_KEYS: [KeyBytes; 3] = [
];
// dpki (Debug)
const RVL_CERT_ISSUER_DPKI_TICKET: &str = "Root-CA00000002-XS00000006";
pub(crate) const RVL_CERT_ISSUER_DPKI_TICKET: &str = "Root-CA00000002-XS00000006";
#[rustfmt::skip]
const DEBUG_COMMON_KEYS: [KeyBytes; 3] = [
pub(crate) static DEBUG_COMMON_KEYS: [KeyBytes; 3] = [
/* RVL_KEY_DEBUG */
[0xa1, 0x60, 0x4a, 0x6a, 0x71, 0x23, 0xb5, 0x29, 0xae, 0x8b, 0xec, 0x32, 0xc8, 0x16, 0xfc, 0xaa],
/* RVL_KEY_KOREAN_DEBUG */
@ -57,7 +68,7 @@ const DEBUG_COMMON_KEYS: [KeyBytes; 3] = [
[0x2f, 0x5c, 0x1b, 0x29, 0x44, 0xe7, 0xfd, 0x6f, 0xc3, 0x97, 0x96, 0x4b, 0x05, 0x76, 0x91, 0xfa],
];
#[derive(Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
#[derive(Debug, PartialEq, FromBytes, IntoBytes, Immutable, KnownLayout)]
#[repr(C, align(4))]
pub(crate) struct WiiPartEntry {
pub(crate) offset: U32,
@ -72,7 +83,7 @@ impl WiiPartEntry {
pub(crate) const WII_PART_GROUP_OFF: u64 = 0x40000;
#[derive(Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
#[derive(Debug, PartialEq, FromBytes, IntoBytes, Immutable, KnownLayout)]
#[repr(C, align(4))]
pub(crate) struct WiiPartGroup {
pub(crate) part_count: U32,
@ -85,7 +96,8 @@ impl WiiPartGroup {
pub(crate) fn part_entry_off(&self) -> u64 { (self.part_entry_off.get() as u64) << 2 }
}
#[derive(Debug, Clone, PartialEq, FromBytes, FromZeroes, AsBytes)]
/// Signed blob header
#[derive(Debug, Clone, PartialEq, FromBytes, IntoBytes, Immutable, KnownLayout)]
#[repr(C, align(4))]
pub struct SignedHeader {
/// Signature type, always 0x00010001 (RSA-2048)
@ -97,43 +109,63 @@ pub struct SignedHeader {
static_assert!(size_of::<SignedHeader>() == 0x140);
#[derive(Debug, Clone, PartialEq, Default, FromBytes, FromZeroes, AsBytes)]
/// Ticket limit
#[derive(Debug, Clone, PartialEq, Default, FromBytes, IntoBytes, Immutable, KnownLayout)]
#[repr(C, align(4))]
pub struct TicketTimeLimit {
pub enable_time_limit: U32,
pub time_limit: U32,
pub struct TicketLimit {
/// Limit type
pub limit_type: U32,
/// Maximum value for the limit
pub max_value: U32,
}
static_assert!(size_of::<TicketTimeLimit>() == 8);
static_assert!(size_of::<TicketLimit>() == 8);
#[derive(Debug, Clone, PartialEq, FromBytes, FromZeroes, AsBytes)]
/// Wii ticket
#[derive(Debug, Clone, PartialEq, FromBytes, IntoBytes, Immutable, KnownLayout)]
#[repr(C, align(4))]
pub struct Ticket {
/// Signed blob header
pub header: SignedHeader,
/// Signature issuer
pub sig_issuer: [u8; 64],
/// ECDH data
pub ecdh: [u8; 60],
/// Ticket format version
pub version: u8,
_pad1: U16,
/// Title key (encrypted)
pub title_key: KeyBytes,
_pad2: u8,
/// Ticket ID
pub ticket_id: [u8; 8],
/// Console ID
pub console_id: [u8; 4],
/// Title ID
pub title_id: [u8; 8],
_pad3: U16,
/// Ticket title version
pub ticket_title_version: U16,
/// Permitted titles mask
pub permitted_titles_mask: U32,
/// Permit mask
pub permit_mask: U32,
/// Title export allowed
pub title_export_allowed: u8,
/// Common key index
pub common_key_idx: u8,
_pad4: [u8; 48],
/// Content access permissions
pub content_access_permissions: [u8; 64],
_pad5: [u8; 2],
pub time_limits: [TicketTimeLimit; 8],
/// Ticket limits
pub limits: [TicketLimit; 8],
}
static_assert!(size_of::<Ticket>() == 0x2A4);
impl Ticket {
/// Decrypts the ticket title key using the appropriate common key
pub fn decrypt_title_key(&self) -> Result<KeyBytes> {
let mut iv: KeyBytes = [0; 16];
iv[..8].copy_from_slice(&self.title_id);
@ -153,44 +185,81 @@ impl Ticket {
format!("unknown common key index {}", self.common_key_idx),
))?;
let mut title_key = self.title_key;
aes_decrypt(common_key, iv, &mut title_key);
aes_cbc_decrypt(common_key, &iv, &mut title_key);
Ok(title_key)
}
}
#[derive(Debug, Clone, PartialEq, FromBytes, FromZeroes, AsBytes)]
/// Title metadata header
#[derive(Debug, Clone, PartialEq, FromBytes, IntoBytes, Immutable, KnownLayout)]
#[repr(C, align(4))]
pub struct TmdHeader {
/// Signed blob header
pub header: SignedHeader,
/// Signature issuer
pub sig_issuer: [u8; 64],
/// Version
pub version: u8,
/// CA CRL version
pub ca_crl_version: u8,
/// Signer CRL version
pub signer_crl_version: u8,
/// Is vWii title
pub is_vwii: u8,
/// IOS ID
pub ios_id: [u8; 8],
/// Title ID
pub title_id: [u8; 8],
/// Title type
pub title_type: u32,
/// Group ID
pub group_id: U16,
_pad1: [u8; 2],
/// Region
pub region: U16,
/// Ratings
pub ratings: KeyBytes,
_pad2: [u8; 12],
/// IPC mask
pub ipc_mask: [u8; 12],
_pad3: [u8; 18],
/// Access flags
pub access_flags: U32,
/// Title version
pub title_version: U16,
/// Number of contents
pub num_contents: U16,
/// Boot index
pub boot_idx: U16,
/// Minor version (unused)
pub minor_version: U16,
}
static_assert!(size_of::<TmdHeader>() == 0x1E4);
pub const H3_TABLE_SIZE: usize = 0x18000;
/// TMD content metadata
#[derive(Clone, Debug, PartialEq, FromBytes, IntoBytes, Immutable, KnownLayout)]
#[repr(C, align(4))]
pub struct ContentMetadata {
/// Content ID
pub content_id: U32,
/// Content index
pub content_index: U16,
/// Content type
pub content_type: U16,
/// Content size
pub size: U64,
/// Content hash
pub hash: HashBytes,
}
#[derive(Debug, Clone, PartialEq, FromBytes, FromZeroes, AsBytes)]
static_assert!(size_of::<ContentMetadata>() == 0x24);
/// Wii partition header.
#[derive(Debug, Clone, PartialEq, FromBytes, IntoBytes, Immutable, KnownLayout)]
#[repr(C, align(4))]
pub struct WiiPartitionHeader {
/// Ticket
pub ticket: Ticket,
tmd_size: U32,
tmd_off: U32,
@ -204,246 +273,269 @@ pub struct WiiPartitionHeader {
static_assert!(size_of::<WiiPartitionHeader>() == 0x2C0);
impl WiiPartitionHeader {
/// TMD size in bytes
pub fn tmd_size(&self) -> u64 { self.tmd_size.get() as u64 }
/// TMD offset in bytes (relative to the partition start)
pub fn tmd_off(&self) -> u64 { (self.tmd_off.get() as u64) << 2 }
/// Certificate chain size in bytes
pub fn cert_chain_size(&self) -> u64 { self.cert_chain_size.get() as u64 }
/// Certificate chain offset in bytes (relative to the partition start)
pub fn cert_chain_off(&self) -> u64 { (self.cert_chain_off.get() as u64) << 2 }
/// H3 table offset in bytes (relative to the partition start)
pub fn h3_table_off(&self) -> u64 { (self.h3_table_off.get() as u64) << 2 }
/// H3 table size in bytes (always H3_TABLE_SIZE)
pub fn h3_table_size(&self) -> u64 { H3_TABLE_SIZE as u64 }
/// Data offset in bytes (relative to the partition start)
pub fn data_off(&self) -> u64 { (self.data_off.get() as u64) << 2 }
/// Data size in bytes
pub fn data_size(&self) -> u64 { (self.data_size.get() as u64) << 2 }
}
pub struct PartitionWii {
io: Box<dyn BlockIO>,
pub(crate) struct PartitionReaderWii {
preloader: Arc<Preloader>,
partition: PartitionInfo,
block: Block,
block_buf: Box<[u8]>,
block_idx: u32,
sector_buf: Box<[u8; SECTOR_SIZE]>,
sector: u32,
pos: u64,
verify: bool,
raw_tmd: Box<[u8]>,
raw_cert_chain: Box<[u8]>,
raw_h3_table: Box<[u8]>,
options: PartitionOptions,
sector_group: Option<SectorGroup>,
meta: Option<PartitionMeta>,
}
impl Clone for PartitionWii {
impl Clone for PartitionReaderWii {
fn clone(&self) -> Self {
Self {
io: self.io.clone(),
preloader: self.preloader.clone(),
partition: self.partition.clone(),
block: Block::default(),
block_buf: <u8>::new_box_slice_zeroed(self.block_buf.len()),
block_idx: u32::MAX,
sector_buf: <[u8; SECTOR_SIZE]>::new_box_zeroed(),
sector: u32::MAX,
pos: 0,
verify: self.verify,
raw_tmd: self.raw_tmd.clone(),
raw_cert_chain: self.raw_cert_chain.clone(),
raw_h3_table: self.raw_h3_table.clone(),
options: self.options.clone(),
sector_group: None,
meta: self.meta.clone(),
}
}
}
impl PartitionWii {
impl PartitionReaderWii {
pub fn new(
inner: Box<dyn BlockIO>,
disc_header: Box<DiscHeader>,
preloader: Arc<Preloader>,
partition: &PartitionInfo,
options: &OpenOptions,
options: &PartitionOptions,
) -> Result<Box<Self>> {
let block_size = inner.block_size();
let mut reader = PartitionGC::new(inner, disc_header)?;
// Read TMD, cert chain, and H3 table
let offset = partition.start_sector as u64 * SECTOR_SIZE as u64;
reader
.seek(SeekFrom::Start(offset + partition.header.tmd_off()))
.context("Seeking to TMD offset")?;
let raw_tmd: Box<[u8]> = read_box_slice(&mut reader, partition.header.tmd_size() as usize)
.context("Reading TMD")?;
reader
.seek(SeekFrom::Start(offset + partition.header.cert_chain_off()))
.context("Seeking to cert chain offset")?;
let raw_cert_chain: Box<[u8]> =
read_box_slice(&mut reader, partition.header.cert_chain_size() as usize)
.context("Reading cert chain")?;
reader
.seek(SeekFrom::Start(offset + partition.header.h3_table_off()))
.context("Seeking to H3 table offset")?;
let raw_h3_table: Box<[u8]> =
read_box_slice(&mut reader, H3_TABLE_SIZE).context("Reading H3 table")?;
Ok(Box::new(Self {
io: reader.into_inner(),
let mut reader = Self {
preloader,
partition: partition.clone(),
block: Block::default(),
block_buf: <u8>::new_box_slice_zeroed(block_size as usize),
block_idx: u32::MAX,
sector_buf: <[u8; SECTOR_SIZE]>::new_box_zeroed(),
sector: u32::MAX,
pos: 0,
verify: options.validate_hashes,
raw_tmd,
raw_cert_chain,
raw_h3_table,
}))
options: options.clone(),
sector_group: None,
meta: None,
};
if options.validate_hashes {
// Ensure we cache the H3 table
reader.meta()?;
}
Ok(Box::new(reader))
}
#[inline]
pub fn len(&self) -> u64 { self.partition.data_size() }
}
impl Read for PartitionWii {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let part_sector = (self.pos / SECTOR_DATA_SIZE as u64) as u32;
impl BufRead for PartitionReaderWii {
fn fill_buf(&mut self) -> io::Result<&[u8]> {
let (part_sector, sector_offset) = if self.partition.has_hashes {
(
(self.pos / SECTOR_DATA_SIZE as u64) as u32,
(self.pos % SECTOR_DATA_SIZE as u64) as usize,
)
} else {
((self.pos / SECTOR_SIZE as u64) as u32, (self.pos % SECTOR_SIZE as u64) as usize)
};
let abs_sector = self.partition.data_start_sector + part_sector;
if abs_sector >= self.partition.data_end_sector {
return Ok(0);
}
let block_idx =
(abs_sector as u64 * SECTOR_SIZE as u64 / self.block_buf.len() as u64) as u32;
// Read new block if necessary
if block_idx != self.block_idx {
self.block =
self.io.read_block(self.block_buf.as_mut(), block_idx, Some(&self.partition))?;
self.block_idx = block_idx;
return Ok(&[]);
}
// Decrypt sector if necessary
if abs_sector != self.sector {
self.block.decrypt(
self.sector_buf.as_mut(),
self.block_buf.as_ref(),
abs_sector,
&self.partition,
)?;
if self.verify {
verify_hashes(self.sector_buf.as_ref(), part_sector, self.raw_h3_table.as_ref())?;
let group_idx = part_sector / 64;
let group_sector = part_sector % 64;
let max_groups =
(self.partition.data_end_sector - self.partition.data_start_sector).div_ceil(64);
let request = SectorGroupRequest {
group_idx,
partition_idx: Some(self.partition.index as u8),
mode: if self.options.validate_hashes {
PartitionEncryption::ForceDecrypted
} else {
PartitionEncryption::ForceDecryptedNoHashes
},
force_rehash: false,
};
// Load sector group
let (sector_group, updated) =
fetch_sector_group(request, max_groups, &mut self.sector_group, &self.preloader)?;
if updated && self.options.validate_hashes {
if let Some(h3_table) = self.meta.as_ref().and_then(|m| m.raw_h3_table.as_deref()) {
verify_hashes(
array_ref![sector_group.data, 0, SECTOR_GROUP_SIZE],
group_idx,
h3_table,
)?;
}
self.sector = abs_sector;
}
let offset = (self.pos % SECTOR_DATA_SIZE as u64) as usize;
let len = min(buf.len(), SECTOR_DATA_SIZE - offset);
buf[..len]
.copy_from_slice(&self.sector_buf[HASHES_SIZE + offset..HASHES_SIZE + offset + len]);
self.pos += len as u64;
Ok(len)
// Read from sector group buffer
let consecutive_sectors = sector_group.consecutive_sectors(group_sector);
if consecutive_sectors == 0 {
return Ok(&[]);
}
let group_sector_offset = group_sector as usize * SECTOR_SIZE;
if self.partition.has_hashes {
// Read until end of sector (avoid the next hash block)
let offset = group_sector_offset + HASHES_SIZE + sector_offset;
let end = group_sector_offset + SECTOR_SIZE;
Ok(&sector_group.data[offset..end])
} else {
// Read until end of sector group (no hashes)
let offset = group_sector_offset + sector_offset;
let end = (group_sector + consecutive_sectors) as usize * SECTOR_SIZE;
Ok(&sector_group.data[offset..end])
}
}
#[inline]
fn consume(&mut self, amt: usize) { self.pos += amt as u64; }
}
impl Seek for PartitionWii {
impl_read_for_bufread!(PartitionReaderWii);
impl Seek for PartitionReaderWii {
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
self.pos = match pos {
SeekFrom::Start(v) => v,
SeekFrom::End(_) => {
return Err(io::Error::new(
io::ErrorKind::Unsupported,
"WiiPartitionReader: SeekFrom::End is not supported".to_string(),
));
}
SeekFrom::End(v) => self.len().saturating_add_signed(v),
SeekFrom::Current(v) => self.pos.saturating_add_signed(v),
};
Ok(self.pos)
}
fn stream_position(&mut self) -> io::Result<u64> { Ok(self.pos) }
}
#[inline(always)]
pub(crate) fn as_digest(slice: &[u8; 20]) -> digest::Output<Sha1> { (*slice).into() }
fn verify_hashes(buf: &[u8; SECTOR_GROUP_SIZE], group_idx: u32, h3_table: &[u8]) -> io::Result<()> {
for sector in 0..64 {
let buf = array_ref![buf, sector * SECTOR_SIZE, SECTOR_SIZE];
let part_sector = group_idx * 64 + sector as u32;
let (cluster, sector) = div_rem(part_sector as usize, 8);
let (group, sub_group) = div_rem(cluster, 8);
fn verify_hashes(buf: &[u8; SECTOR_SIZE], part_sector: u32, h3_table: &[u8]) -> io::Result<()> {
let (cluster, sector) = div_rem(part_sector as usize, 8);
let (group, sub_group) = div_rem(cluster, 8);
// H0 hashes
for i in 0..31 {
let mut hash = Sha1::new();
hash.update(array_ref![buf, (i + 1) * 0x400, 0x400]);
let expected = as_digest(array_ref![buf, i * 20, 20]);
let output = hash.finalize();
if output != expected {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("Invalid H0 hash! (block {:?}) {:x}\n\texpected {:x}", i, output, expected),
));
// H0 hashes
for i in 0..31 {
let expected = array_ref![buf, i * 20, 20];
let output = sha1_hash(array_ref![buf, (i + 1) * 0x400, 0x400]);
if output != *expected {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("Invalid H0 hash! (block {i})"),
));
}
}
}
// H1 hash
{
let mut hash = Sha1::new();
hash.update(array_ref![buf, 0, 0x26C]);
let expected = as_digest(array_ref![buf, 0x280 + sector * 20, 20]);
let output = hash.finalize();
if output != expected {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"Invalid H1 hash! (subgroup {:?}) {:x}\n\texpected {:x}",
sector, output, expected
),
));
// H1 hash
{
let expected = array_ref![buf, 0x280 + sector * 20, 20];
let output = sha1_hash(array_ref![buf, 0, 0x26C]);
if output != *expected {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("Invalid H1 hash! (sector {sector})",),
));
}
}
}
// H2 hash
{
let mut hash = Sha1::new();
hash.update(array_ref![buf, 0x280, 0xA0]);
let expected = as_digest(array_ref![buf, 0x340 + sub_group * 20, 20]);
let output = hash.finalize();
if output != expected {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"Invalid H2 hash! (group {:?}) {:x}\n\texpected {:x}",
sub_group, output, expected
),
));
// H2 hash
{
let expected = array_ref![buf, 0x340 + sub_group * 20, 20];
let output = sha1_hash(array_ref![buf, 0x280, 0xA0]);
if output != *expected {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("Invalid H2 hash! (subgroup {sub_group})"),
));
}
}
}
// H3 hash
{
let mut hash = Sha1::new();
hash.update(array_ref![buf, 0x340, 0xA0]);
let expected = as_digest(array_ref![h3_table, group * 20, 20]);
let output = hash.finalize();
if output != expected {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("Invalid H3 hash! {:x}\n\texpected {:x}", output, expected),
));
// H3 hash
{
let expected = array_ref![h3_table, group * 20, 20];
let output = sha1_hash(array_ref![buf, 0x340, 0xA0]);
if output != *expected {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("Invalid H3 hash! (group {group})"),
));
}
}
}
Ok(())
}
impl PartitionBase for PartitionWii {
fn meta(&mut self) -> Result<Box<PartitionMeta>> {
self.seek(SeekFrom::Start(0)).context("Seeking to partition header")?;
impl PartitionReader for PartitionReaderWii {
fn is_wii(&self) -> bool { true }
fn meta(&mut self) -> Result<PartitionMeta> {
if let Some(meta) = &self.meta {
return Ok(meta.clone());
}
self.rewind().context("Seeking to partition header")?;
let mut meta = read_part_meta(self, true)?;
meta.raw_ticket = Some(Box::from(self.partition.header.ticket.as_bytes()));
meta.raw_tmd = Some(self.raw_tmd.clone());
meta.raw_cert_chain = Some(self.raw_cert_chain.clone());
meta.raw_h3_table = Some(self.raw_h3_table.clone());
meta.raw_ticket = Some(Arc::from(self.partition.header.ticket.as_bytes()));
// Read TMD, cert chain, and H3 table
let mut reader = PartitionReaderGC::new(self.preloader.clone(), u64::MAX)?;
let offset = self.partition.start_sector as u64 * SECTOR_SIZE as u64;
meta.raw_tmd = if self.partition.header.tmd_size() != 0 {
reader
.seek(SeekFrom::Start(offset + self.partition.header.tmd_off()))
.context("Seeking to TMD offset")?;
Some(
read_arc_slice::<u8, _>(&mut reader, self.partition.header.tmd_size() as usize)
.context("Reading TMD")?,
)
} else {
None
};
meta.raw_cert_chain = if self.partition.header.cert_chain_size() != 0 {
reader
.seek(SeekFrom::Start(offset + self.partition.header.cert_chain_off()))
.context("Seeking to cert chain offset")?;
Some(
read_arc_slice::<u8, _>(
&mut reader,
self.partition.header.cert_chain_size() as usize,
)
.context("Reading cert chain")?,
)
} else {
None
};
meta.raw_h3_table = if self.partition.has_hashes {
reader
.seek(SeekFrom::Start(offset + self.partition.header.h3_table_off()))
.context("Seeking to H3 table offset")?;
Some(read_arc::<[u8; H3_TABLE_SIZE], _>(&mut reader).context("Reading H3 table")?)
} else {
None
};
self.meta = Some(meta.clone());
Ok(meta)
}
fn open_file(&mut self, node: &Node) -> io::Result<SharedWindowedReadStream> {
assert_eq!(node.kind(), NodeKind::File);
self.new_window(node.offset(true), node.length())
}
fn ideal_buffer_size(&self) -> usize { SECTOR_DATA_SIZE }
}

257
nod/src/disc/writer.rs Normal file
View File

@ -0,0 +1,257 @@
use std::{
collections::VecDeque,
io,
io::{BufRead, Read},
};
use bytes::{Bytes, BytesMut};
use dyn_clone::DynClone;
use crate::{
Error, Result, ResultContext,
common::PartitionInfo,
disc::{
SECTOR_SIZE,
reader::DiscReader,
wii::{HASHES_SIZE, SECTOR_DATA_SIZE},
},
util::{aes::decrypt_sector_b2b, array_ref, array_ref_mut, lfg::LaggedFibonacci},
write::{DataCallback, DiscFinalization, DiscWriterWeight, ProcessOptions},
};
/// A trait for writing disc images.
pub trait DiscWriter: DynClone {
/// Processes the disc writer to completion.
///
/// The data callback will be called, in order, for each block of data to write to the output
/// file. The callback should write all data before returning, or return an error if writing
/// fails.
fn process(
&self,
data_callback: &mut DataCallback,
options: &ProcessOptions,
) -> Result<DiscFinalization>;
/// Returns the progress upper bound for the disc writer.
///
/// For most formats, this has no relation to the written disc size, but can be used to display
/// progress.
fn progress_bound(&self) -> u64;
/// Returns the weight of the disc writer.
///
/// This can help determine the number of threads to dedicate for output processing, and may
/// differ based on the format's configuration, such as whether compression is enabled.
fn weight(&self) -> DiscWriterWeight;
}
dyn_clone::clone_trait_object!(DiscWriter);
#[derive(Default)]
pub struct BlockResult<T> {
/// Input block index
pub block_idx: u32,
/// Input disc data (before processing)
pub disc_data: Bytes,
/// Output block data (after processing). If None, the disc data is used.
pub block_data: Bytes,
/// Output metadata
pub meta: T,
}
pub trait BlockProcessor: Clone + Send {
type BlockMeta;
fn process_block(&mut self, block_idx: u32) -> io::Result<BlockResult<Self::BlockMeta>>;
}
pub fn read_block(reader: &mut DiscReader, block_size: usize) -> io::Result<(Bytes, Bytes)> {
let initial_block = reader.fill_buf_internal()?;
if initial_block.len() >= block_size {
// Happy path: we have a full block that we can cheaply slice
let data = initial_block.slice(0..block_size);
reader.consume(block_size);
return Ok((data.clone(), data));
} else if initial_block.is_empty() {
return Err(io::Error::from(io::ErrorKind::UnexpectedEof));
}
reader.consume(initial_block.len());
// Combine smaller blocks into a new buffer
let mut buf = BytesMut::zeroed(block_size);
let mut len = initial_block.len();
buf[..len].copy_from_slice(initial_block.as_ref());
drop(initial_block);
while len < block_size {
let read = reader.read(&mut buf[len..])?;
if read == 0 {
break;
}
len += read;
}
// The block data is full size, padded with zeroes
let block_data = buf.freeze();
// The disc data is the actual data read, without padding
let disc_data = block_data.slice(0..len);
Ok((block_data, disc_data))
}
/// Process blocks in parallel, ensuring that they are written in order.
pub(crate) fn par_process<P, T>(
mut processor: P,
block_count: u32,
num_threads: usize,
mut callback: impl FnMut(BlockResult<T>) -> Result<()>,
) -> Result<()>
where
T: Send,
P: BlockProcessor<BlockMeta = T>,
{
if num_threads == 0 {
// Fall back to single-threaded processing
for block_idx in 0..block_count {
let block = processor
.process_block(block_idx)
.with_context(|| format!("Failed to process block {block_idx}"))?;
callback(block)?;
}
return Ok(());
}
std::thread::scope(|s| {
let (block_tx, block_rx) = crossbeam_channel::bounded(block_count as usize);
for block_idx in 0..block_count {
block_tx.send(block_idx).unwrap();
}
drop(block_tx); // Disconnect channel
let (result_tx, result_rx) = crossbeam_channel::bounded(0);
// Spawn threads to process blocks
for _ in 0..num_threads - 1 {
let block_rx = block_rx.clone();
let result_tx = result_tx.clone();
let mut processor = processor.clone();
s.spawn(move || {
while let Ok(block_idx) = block_rx.recv() {
let result = processor
.process_block(block_idx)
.with_context(|| format!("Failed to process block {block_idx}"));
let failed = result.is_err(); // Stop processing if an error occurs
if result_tx.send(result).is_err() || failed {
break;
}
}
});
}
// Last iteration moves instead of cloning
s.spawn(move || {
while let Ok(block_idx) = block_rx.recv() {
let result = processor
.process_block(block_idx)
.with_context(|| format!("Failed to process block {block_idx}"));
let failed = result.is_err(); // Stop processing if an error occurs
if result_tx.send(result).is_err() || failed {
break;
}
}
});
// Main thread processes results
let mut current_block = 0;
let mut out_of_order = VecDeque::<BlockResult<T>>::new();
while let Ok(result) = result_rx.recv() {
let result = result?;
if result.block_idx == current_block {
callback(result)?;
current_block += 1;
// Check if any out of order blocks can be written
while out_of_order.front().is_some_and(|r| r.block_idx == current_block) {
callback(out_of_order.pop_front().unwrap())?;
current_block += 1;
}
} else {
// Insert sorted
match out_of_order.binary_search_by_key(&result.block_idx, |r| r.block_idx) {
Ok(idx) => Err(Error::Other(format!("Unexpected duplicate block {idx}")))?,
Err(idx) => out_of_order.insert(idx, result),
}
}
}
Ok(())
})
}
/// The determined block type.
pub enum CheckBlockResult {
Normal,
Zeroed,
Junk,
}
/// Check if a block is zeroed or junk data.
pub(crate) fn check_block(
buf: &[u8],
decrypted_block: &mut [u8],
input_position: u64,
partition_info: &[PartitionInfo],
lfg: &mut LaggedFibonacci,
disc_id: [u8; 4],
disc_num: u8,
) -> io::Result<CheckBlockResult> {
let start_sector = (input_position / SECTOR_SIZE as u64) as u32;
let end_sector = ((input_position + buf.len() as u64) / SECTOR_SIZE as u64) as u32;
if let Some(partition) = partition_info.iter().find(|p| {
p.has_hashes && start_sector >= p.data_start_sector && end_sector < p.data_end_sector
}) {
if input_position % SECTOR_SIZE as u64 != 0 {
return Err(io::Error::other("Partition block not aligned to sector boundary"));
}
if buf.len() % SECTOR_SIZE != 0 {
return Err(io::Error::other("Partition block not a multiple of sector size"));
}
let block = if partition.has_encryption {
if decrypted_block.len() < buf.len() {
return Err(io::Error::other("Decrypted block buffer too small"));
}
for i in 0..buf.len() / SECTOR_SIZE {
decrypt_sector_b2b(
array_ref![buf, SECTOR_SIZE * i, SECTOR_SIZE],
array_ref_mut![decrypted_block, SECTOR_SIZE * i, SECTOR_SIZE],
&partition.key,
);
}
&decrypted_block[..buf.len()]
} else {
buf
};
if sector_data_iter(block).all(|sector_data| sector_data.iter().all(|&b| b == 0)) {
return Ok(CheckBlockResult::Zeroed);
}
let partition_start = partition.data_start_sector as u64 * SECTOR_SIZE as u64;
let partition_offset =
((input_position - partition_start) / SECTOR_SIZE as u64) * SECTOR_DATA_SIZE as u64;
if sector_data_iter(block).enumerate().all(|(i, sector_data)| {
let sector_offset = partition_offset + i as u64 * SECTOR_DATA_SIZE as u64;
lfg.check_sector_chunked(sector_data, disc_id, disc_num, sector_offset)
== sector_data.len()
}) {
return Ok(CheckBlockResult::Junk);
}
} else {
if buf.iter().all(|&b| b == 0) {
return Ok(CheckBlockResult::Zeroed);
}
if lfg.check_sector_chunked(buf, disc_id, disc_num, input_position) == buf.len() {
return Ok(CheckBlockResult::Junk);
}
}
Ok(CheckBlockResult::Normal)
}
#[inline]
fn sector_data_iter(buf: &[u8]) -> impl Iterator<Item = &[u8; SECTOR_DATA_SIZE]> {
buf.chunks_exact(SECTOR_SIZE).map(|chunk| (&chunk[HASHES_SIZE..]).try_into().unwrap())
}

View File

@ -1,170 +0,0 @@
//! Disc file system types
use std::{borrow::Cow, ffi::CStr, mem::size_of};
use encoding_rs::SHIFT_JIS;
use zerocopy::{big_endian::*, AsBytes, FromBytes, FromZeroes};
use crate::{static_assert, Result};
/// File system node kind.
#[derive(Clone, Debug, PartialEq)]
pub enum NodeKind {
/// Node is a file.
File,
/// Node is a directory.
Directory,
/// Invalid node kind. (Should not normally occur)
Invalid,
}
/// An individual file system node.
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
#[repr(C, align(4))]
pub struct Node {
kind: u8,
// u24 big-endian
name_offset: [u8; 3],
pub(crate) offset: U32,
length: U32,
}
static_assert!(size_of::<Node>() == 12);
impl Node {
/// File system node kind.
pub fn kind(&self) -> NodeKind {
match self.kind {
0 => NodeKind::File,
1 => NodeKind::Directory,
_ => NodeKind::Invalid,
}
}
/// Whether the node is a file.
pub fn is_file(&self) -> bool { self.kind == 0 }
/// Whether the node is a directory.
pub fn is_dir(&self) -> bool { self.kind == 1 }
/// Offset in the string table to the filename.
pub fn name_offset(&self) -> u32 {
u32::from_be_bytes([0, self.name_offset[0], self.name_offset[1], self.name_offset[2]])
}
/// For files, this is the partition offset of the file data. (Wii: >> 2)
///
/// For directories, this is the parent node index in the FST.
pub fn offset(&self, is_wii: bool) -> u64 {
if is_wii && self.kind == 0 {
self.offset.get() as u64 * 4
} else {
self.offset.get() as u64
}
}
/// For files, this is the byte size of the file.
///
/// For directories, this is the child end index in the FST.
///
/// Number of child files and directories recursively is `length - offset`.
pub fn length(&self) -> u64 { self.length.get() as u64 }
}
/// A view into the file system table (FST).
pub struct Fst<'a> {
/// The nodes in the FST.
pub nodes: &'a [Node],
/// The string table containing all file and directory names.
pub string_table: &'a [u8],
}
impl<'a> Fst<'a> {
/// Create a new FST view from a buffer.
pub fn new(buf: &'a [u8]) -> Result<Self, &'static str> {
let Some(root_node) = Node::ref_from_prefix(buf) else {
return Err("FST root node not found");
};
// String table starts after the last node
let string_base = root_node.length() * size_of::<Node>() as u64;
if string_base >= buf.len() as u64 {
return Err("FST string table out of bounds");
}
let (node_buf, string_table) = buf.split_at(string_base as usize);
let nodes = Node::slice_from(node_buf).unwrap();
Ok(Self { nodes, string_table })
}
/// Iterate over the nodes in the FST.
pub fn iter(&self) -> FstIter { FstIter { fst: self, idx: 1 } }
/// Get the name of a node.
pub fn get_name(&self, node: &Node) -> Result<Cow<str>, String> {
let name_buf = self.string_table.get(node.name_offset() as usize..).ok_or_else(|| {
format!(
"FST: name offset {} out of bounds (string table size: {})",
node.name_offset(),
self.string_table.len()
)
})?;
let c_string = CStr::from_bytes_until_nul(name_buf).map_err(|_| {
format!("FST: name at offset {} not null-terminated", node.name_offset())
})?;
let (decoded, _, errors) = SHIFT_JIS.decode(c_string.to_bytes());
if errors {
return Err(format!("FST: Failed to decode name at offset {}", node.name_offset()));
}
Ok(decoded)
}
/// Finds a particular file or directory by path.
pub fn find(&self, path: &str) -> Option<(usize, &Node)> {
let mut split = path.trim_matches('/').split('/');
let mut current = split.next()?;
let mut idx = 1;
let mut stop_at = None;
while let Some(node) = self.nodes.get(idx) {
if self.get_name(node).as_ref().map_or(false, |name| name.eq_ignore_ascii_case(current))
{
if let Some(next) = split.next() {
current = next;
} else {
return Some((idx, node));
}
// Descend into directory
idx += 1;
stop_at = Some(node.length() as usize + idx);
} else if node.is_dir() {
// Skip directory
idx = node.length() as usize;
} else {
// Skip file
idx += 1;
}
if let Some(stop) = stop_at {
if idx >= stop {
break;
}
}
}
None
}
}
/// Iterator over the nodes in an FST.
pub struct FstIter<'a> {
fst: &'a Fst<'a>,
idx: usize,
}
impl<'a> Iterator for FstIter<'a> {
type Item = (usize, &'a Node, Result<Cow<'a, str>, String>);
fn next(&mut self) -> Option<Self::Item> {
let idx = self.idx;
let node = self.fst.nodes.get(idx)?;
let name = self.fst.get_name(node);
self.idx += 1;
Some((idx, node, name))
}
}

View File

@ -1,85 +1,69 @@
use std::{cmp::min, fs, fs::File, io, path::Path};
use std::{fs, io, io::Read, path::Path};
use dyn_clone::DynClone;
use zerocopy::transmute_ref;
use crate::{
array_ref,
Error, Result, ResultContext,
common::{Format, KeyBytes, MagicBytes, PartitionInfo},
disc::{
hashes::HashTable,
wii::{WiiPartitionHeader, HASHES_SIZE, SECTOR_DATA_SIZE},
SECTOR_SIZE,
DiscHeader, GCN_MAGIC, SECTOR_SIZE, WII_MAGIC,
wii::{HASHES_SIZE, SECTOR_DATA_SIZE},
},
io::{
split::SplitFileReader,
wia::{WIAException, WIAExceptionList},
},
read::{DiscMeta, DiscStream},
util::{
aes::decrypt_sector,
array_ref, array_ref_mut,
lfg::LaggedFibonacci,
read::{read_at, read_from},
},
io::{aes_decrypt, aes_encrypt, KeyBytes, MagicBytes},
util::{lfg::LaggedFibonacci, read::read_from},
DiscHeader, DiscMeta, Error, PartitionHeader, PartitionKind, Result, ResultContext,
};
/// Block I/O trait for reading disc images.
pub trait BlockIO: DynClone + Send + Sync {
/// Reads a block from the disc image.
fn read_block_internal(
&mut self,
out: &mut [u8],
block: u32,
partition: Option<&PartitionInfo>,
) -> io::Result<Block>;
/// Reads a full block from the disc image, combining smaller blocks if necessary.
fn read_block(
&mut self,
out: &mut [u8],
block: u32,
partition: Option<&PartitionInfo>,
) -> io::Result<Block> {
let block_size_internal = self.block_size_internal();
let block_size = self.block_size();
if block_size_internal == block_size {
self.read_block_internal(out, block, partition)
} else {
let mut offset = 0usize;
let mut result = None;
let mut block_idx =
((block as u64 * block_size as u64) / block_size_internal as u64) as u32;
while offset < block_size as usize {
let block = self.read_block_internal(
&mut out[offset..offset + block_size_internal as usize],
block_idx,
partition,
)?;
if result.is_none() {
result = Some(block);
} else if result != Some(block) {
if block == Block::Zero {
out[offset..offset + block_size_internal as usize].fill(0);
} else {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Inconsistent block types in split block",
));
}
}
offset += block_size_internal as usize;
block_idx += 1;
}
Ok(result.unwrap_or_default())
}
}
/// The format's block size in bytes. Can be smaller than the sector size (0x8000).
fn block_size_internal(&self) -> u32;
/// Block reader trait for reading disc images.
pub trait BlockReader: DynClone + Send {
/// Reads a block from the disc image containing the specified sector.
fn read_block(&mut self, out: &mut [u8], sector: u32) -> io::Result<Block>;
/// The block size used for processing. Must be a multiple of the sector size (0x8000).
fn block_size(&self) -> u32 { self.block_size_internal().max(SECTOR_SIZE as u32) }
fn block_size(&self) -> u32;
/// Returns extra metadata included in the disc file format, if any.
fn meta(&self) -> DiscMeta;
}
dyn_clone::clone_trait_object!(BlockIO);
dyn_clone::clone_trait_object!(BlockReader);
/// Creates a new [`BlockIO`] instance.
pub fn open(filename: &Path) -> Result<Box<dyn BlockIO>> {
/// Creates a new [`BlockReader`] instance from a stream.
pub fn new(mut stream: Box<dyn DiscStream>) -> Result<Box<dyn BlockReader>> {
let io: Box<dyn BlockReader> =
match detect_stream(stream.as_mut()).context("Detecting file type")? {
Some(Format::Iso) => crate::io::iso::BlockReaderISO::new(stream)?,
Some(Format::Ciso) => crate::io::ciso::BlockReaderCISO::new(stream)?,
Some(Format::Gcz) => {
#[cfg(feature = "compress-zlib")]
{
crate::io::gcz::BlockReaderGCZ::new(stream)?
}
#[cfg(not(feature = "compress-zlib"))]
return Err(Error::DiscFormat("GCZ support is disabled".to_string()));
}
Some(Format::Nfs) => {
return Err(Error::DiscFormat("NFS requires a filesystem path".to_string()));
}
Some(Format::Wbfs) => crate::io::wbfs::BlockReaderWBFS::new(stream)?,
Some(Format::Wia | Format::Rvz) => crate::io::wia::BlockReaderWIA::new(stream)?,
Some(Format::Tgc) => crate::io::tgc::BlockReaderTGC::new(stream)?,
None => return Err(Error::DiscFormat("Unknown disc format".to_string())),
};
check_block_size(io.as_ref())?;
Ok(io)
}
/// Creates a new [`BlockReader`] instance from a filesystem path.
pub fn open(filename: &Path) -> Result<Box<dyn BlockReader>> {
let path_result = fs::canonicalize(filename);
if let Err(err) = path_result {
return Err(Error::Io(format!("Failed to open {}", filename.display()), err));
@ -92,40 +76,79 @@ pub fn open(filename: &Path) -> Result<Box<dyn BlockIO>> {
if !meta.unwrap().is_file() {
return Err(Error::DiscFormat(format!("Input is not a file: {}", filename.display())));
}
let magic: MagicBytes = {
let mut file =
File::open(path).with_context(|| format!("Opening file {}", filename.display()))?;
read_from(&mut file)
.with_context(|| format!("Reading magic bytes from {}", filename.display()))?
};
let io: Box<dyn BlockIO> = match magic {
crate::io::ciso::CISO_MAGIC => crate::io::ciso::DiscIOCISO::new(path)?,
#[cfg(feature = "compress-zlib")]
crate::io::gcz::GCZ_MAGIC => crate::io::gcz::DiscIOGCZ::new(path)?,
crate::io::nfs::NFS_MAGIC => match path.parent() {
let mut stream = Box::new(SplitFileReader::new(filename)?);
let io: Box<dyn BlockReader> = match detect_stream(stream.as_mut())
.context("Detecting file type")?
{
Some(Format::Iso) => crate::io::iso::BlockReaderISO::new(stream)?,
Some(Format::Ciso) => crate::io::ciso::BlockReaderCISO::new(stream)?,
Some(Format::Gcz) => {
#[cfg(feature = "compress-zlib")]
{
crate::io::gcz::BlockReaderGCZ::new(stream)?
}
#[cfg(not(feature = "compress-zlib"))]
return Err(Error::DiscFormat("GCZ support is disabled".to_string()));
}
Some(Format::Nfs) => match path.parent() {
Some(parent) if parent.is_dir() => {
crate::io::nfs::DiscIONFS::new(path.parent().unwrap())?
crate::io::nfs::BlockReaderNFS::new(path.parent().unwrap())?
}
_ => {
return Err(Error::DiscFormat("Failed to locate NFS parent directory".to_string()));
}
},
crate::io::wbfs::WBFS_MAGIC => crate::io::wbfs::DiscIOWBFS::new(path)?,
crate::io::wia::WIA_MAGIC | crate::io::wia::RVZ_MAGIC => {
crate::io::wia::DiscIOWIA::new(path)?
}
crate::io::tgc::TGC_MAGIC => crate::io::tgc::DiscIOTGC::new(path)?,
_ => crate::io::iso::DiscIOISO::new(path)?,
Some(Format::Tgc) => crate::io::tgc::BlockReaderTGC::new(stream)?,
Some(Format::Wbfs) => crate::io::wbfs::BlockReaderWBFS::new(stream)?,
Some(Format::Wia | Format::Rvz) => crate::io::wia::BlockReaderWIA::new(stream)?,
None => return Err(Error::DiscFormat("Unknown disc format".to_string())),
};
if io.block_size_internal() < SECTOR_SIZE as u32
&& SECTOR_SIZE as u32 % io.block_size_internal() != 0
{
return Err(Error::DiscFormat(format!(
"Sector size {} is not divisible by block size {}",
SECTOR_SIZE,
io.block_size_internal(),
)));
check_block_size(io.as_ref())?;
Ok(io)
}
pub const CISO_MAGIC: MagicBytes = *b"CISO";
pub const GCZ_MAGIC: MagicBytes = [0x01, 0xC0, 0x0B, 0xB1];
pub const NFS_MAGIC: MagicBytes = *b"EGGS";
pub const TGC_MAGIC: MagicBytes = [0xAE, 0x0F, 0x38, 0xA2];
pub const WBFS_MAGIC: MagicBytes = *b"WBFS";
pub const WIA_MAGIC: MagicBytes = *b"WIA\x01";
pub const RVZ_MAGIC: MagicBytes = *b"RVZ\x01";
pub fn detect<R>(stream: &mut R) -> io::Result<Option<Format>>
where R: Read + ?Sized {
match read_from(stream) {
Ok(ref magic) => Ok(detect_internal(magic)),
Err(e) if e.kind() == io::ErrorKind::UnexpectedEof => Ok(None),
Err(e) => Err(e),
}
}
fn detect_stream(stream: &mut dyn DiscStream) -> io::Result<Option<Format>> {
match read_at(stream, 0) {
Ok(ref magic) => Ok(detect_internal(magic)),
Err(e) if e.kind() == io::ErrorKind::UnexpectedEof => Ok(None),
Err(e) => Err(e),
}
}
fn detect_internal(data: &[u8; 0x20]) -> Option<Format> {
match *array_ref!(data, 0, 4) {
CISO_MAGIC => Some(Format::Ciso),
GCZ_MAGIC => Some(Format::Gcz),
NFS_MAGIC => Some(Format::Nfs),
TGC_MAGIC => Some(Format::Tgc),
WBFS_MAGIC => Some(Format::Wbfs),
WIA_MAGIC => Some(Format::Wia),
RVZ_MAGIC => Some(Format::Rvz),
_ if *array_ref!(data, 0x18, 4) == WII_MAGIC || *array_ref!(data, 0x1C, 4) == GCN_MAGIC => {
Some(Format::Iso)
}
_ => None,
}
}
fn check_block_size(io: &dyn BlockReader) -> Result<()> {
if io.block_size() % SECTOR_SIZE as u32 != 0 {
return Err(Error::DiscFormat(format!(
"Block size {} is not a multiple of sector size {}",
@ -133,219 +156,270 @@ pub fn open(filename: &Path) -> Result<Box<dyn BlockIO>> {
SECTOR_SIZE
)));
}
Ok(io)
Ok(())
}
/// Wii partition information.
#[derive(Debug, Clone)]
pub struct PartitionInfo {
/// The partition index.
pub index: usize,
/// The kind of disc partition.
pub kind: PartitionKind,
/// The start sector of the partition.
pub start_sector: u32,
/// The start sector of the partition's (encrypted) data.
pub data_start_sector: u32,
/// The end sector of the partition's (encrypted) data.
pub data_end_sector: u32,
/// The AES key for the partition, also known as the "title key".
pub key: KeyBytes,
/// The Wii partition header.
pub header: Box<WiiPartitionHeader>,
/// The disc header within the partition.
pub disc_header: Box<DiscHeader>,
/// The partition header within the partition.
pub partition_header: Box<PartitionHeader>,
/// The hash table for the partition, if rebuilt.
pub hash_table: Option<HashTable>,
/// A block of sectors within a disc image.
#[derive(Debug, Clone, Default)]
pub struct Block {
/// The starting sector of the block.
pub sector: u32,
/// The number of sectors in the block.
pub count: u32,
/// The block kind.
pub kind: BlockKind,
/// Any hash exceptions for the block.
pub hash_exceptions: Box<[WIAExceptionList]>,
/// The duration of I/O operations, if available.
pub io_duration: Option<std::time::Duration>,
}
/// The block kind returned by [`BlockIO::read_block`].
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
pub enum Block {
impl Block {
/// Creates a new block from a block of sectors.
#[inline]
pub fn new(block_idx: u32, block_size: u32, kind: BlockKind) -> Self {
let sectors_per_block = block_size / SECTOR_SIZE as u32;
Self {
sector: block_idx * sectors_per_block,
count: sectors_per_block,
kind,
hash_exceptions: Default::default(),
io_duration: None,
}
}
/// Creates a new block from a single sector.
#[inline]
pub fn sector(sector: u32, kind: BlockKind) -> Self {
Self { sector, count: 1, kind, hash_exceptions: Default::default(), io_duration: None }
}
/// Creates a new block from a range of sectors.
#[inline]
pub fn sectors(sector: u32, count: u32, kind: BlockKind) -> Self {
Self { sector, count, kind, hash_exceptions: Default::default(), io_duration: None }
}
/// Returns whether the block contains the specified sector.
#[inline]
pub fn contains(&self, sector: u32) -> bool {
sector >= self.sector && sector < self.sector + self.count
}
/// Returns an error if the block does not contain the specified sector.
pub fn ensure_contains(&self, sector: u32) -> io::Result<()> {
if !self.contains(sector) {
return Err(io::Error::other(format!(
"Sector {} not in block range {}-{}",
sector,
self.sector,
self.sector + self.count
)));
}
Ok(())
}
/// Decrypts block data in-place. The decrypted data can be accessed using
/// [`partition_data`](Block::partition_data).
pub(crate) fn decrypt_block(&self, data: &mut [u8], key: Option<KeyBytes>) -> io::Result<()> {
match self.kind {
BlockKind::None => {}
BlockKind::Raw => {
if let Some(key) = key {
for i in 0..self.count as usize {
decrypt_sector(array_ref_mut![data, i * SECTOR_SIZE, SECTOR_SIZE], &key);
}
}
}
BlockKind::PartDecrypted { .. } => {
// no-op
}
BlockKind::Junk => {
// unsupported, used for DirectDiscReader
data.fill(0);
}
BlockKind::Zero => data.fill(0),
}
Ok(())
}
/// Copies a sector's raw data to the output buffer. Returns whether the sector is encrypted
/// and whether it has hashes.
pub(crate) fn copy_sector(
&self,
out: &mut [u8; SECTOR_SIZE],
data: &[u8],
abs_sector: u32,
disc_header: &DiscHeader,
partition: Option<&PartitionInfo>,
) -> io::Result<(bool, bool)> {
let mut encrypted = false;
let mut has_hashes = false;
match self.kind {
BlockKind::None => {}
BlockKind::Raw => {
*out = *self.sector_buf(data, abs_sector)?;
if partition.is_some_and(|p| p.has_encryption) {
encrypted = true;
}
if partition.is_some_and(|p| p.has_hashes) {
has_hashes = true;
}
}
BlockKind::PartDecrypted { hash_block } => {
if hash_block {
*out = *self.sector_buf(data, abs_sector)?;
has_hashes = partition.is_some_and(|p| p.has_hashes);
} else {
*array_ref_mut![out, HASHES_SIZE, SECTOR_DATA_SIZE] =
*self.sector_data_buf(data, abs_sector)?;
}
}
BlockKind::Junk => generate_junk_sector(out, abs_sector, partition, disc_header),
BlockKind::Zero => out.fill(0),
}
Ok((encrypted, has_hashes))
}
/// Returns a sector's data from the block buffer.
pub(crate) fn sector_buf<'a>(
&self,
data: &'a [u8],
abs_sector: u32,
) -> io::Result<&'a [u8; SECTOR_SIZE]> {
self.ensure_contains(abs_sector)?;
let block_offset = ((abs_sector - self.sector) * SECTOR_SIZE as u32) as usize;
Ok(array_ref!(data, block_offset, SECTOR_SIZE))
}
/// Returns a sector's partition data (excluding hashes) from the block buffer.
pub(crate) fn sector_data_buf<'a>(
&self,
data: &'a [u8],
abs_sector: u32,
) -> io::Result<&'a [u8; SECTOR_DATA_SIZE]> {
self.ensure_contains(abs_sector)?;
let block_offset = ((abs_sector - self.sector) * SECTOR_DATA_SIZE as u32) as usize;
Ok(array_ref!(data, block_offset, SECTOR_DATA_SIZE))
}
/// Returns raw data from the block buffer, starting at the specified position.
pub(crate) fn data<'a>(&self, data: &'a [u8], pos: u64) -> io::Result<&'a [u8]> {
if self.kind == BlockKind::None {
return Ok(&[]);
}
self.ensure_contains((pos / SECTOR_SIZE as u64) as u32)?;
let offset = (pos - self.sector as u64 * SECTOR_SIZE as u64) as usize;
let end = self.count as usize * SECTOR_SIZE;
Ok(&data[offset..end])
}
/// Returns partition data (excluding hashes) from the block buffer, starting at the specified
/// position within the partition.
///
/// If the block does not contain hashes, this will return the full block data. Otherwise, this
/// will return only the corresponding sector's data, ending at the sector boundary, to avoid
/// reading into the next sector's hash block.
pub(crate) fn partition_data<'a>(
&self,
data: &'a [u8],
pos: u64,
data_start_sector: u32,
partition_has_hashes: bool,
) -> io::Result<&'a [u8]> {
let block_has_hashes = match self.kind {
BlockKind::Raw => partition_has_hashes,
BlockKind::PartDecrypted { hash_block, .. } => hash_block && partition_has_hashes,
BlockKind::Junk | BlockKind::Zero => false,
BlockKind::None => return Ok(&[]),
};
let (part_sector, sector_offset) = if partition_has_hashes {
((pos / SECTOR_DATA_SIZE as u64) as u32, (pos % SECTOR_DATA_SIZE as u64) as usize)
} else {
((pos / SECTOR_SIZE as u64) as u32, (pos % SECTOR_SIZE as u64) as usize)
};
let abs_sector = part_sector + data_start_sector;
self.ensure_contains(abs_sector)?;
let block_sector = (abs_sector - self.sector) as usize;
if block_has_hashes {
let offset = block_sector * SECTOR_SIZE + HASHES_SIZE + sector_offset;
let end = (block_sector + 1) * SECTOR_SIZE; // end of sector
Ok(&data[offset..end])
} else if partition_has_hashes {
let offset = block_sector * SECTOR_DATA_SIZE + sector_offset;
let end = self.count as usize * SECTOR_DATA_SIZE; // end of block
Ok(&data[offset..end])
} else {
let offset = block_sector * SECTOR_SIZE + sector_offset;
let end = self.count as usize * SECTOR_SIZE; // end of block
Ok(&data[offset..end])
}
}
pub(crate) fn append_hash_exceptions(
&self,
abs_sector: u32,
group_sector: u32,
out: &mut Vec<WIAException>,
) -> io::Result<()> {
self.ensure_contains(abs_sector)?;
let block_sector = abs_sector - self.sector;
let group = (block_sector / 64) as usize;
let base_offset = ((block_sector % 64) as usize * HASHES_SIZE) as u16;
let new_base_offset = (group_sector * HASHES_SIZE as u32) as u16;
out.extend(self.hash_exceptions.get(group).iter().flat_map(|list| {
list.iter().filter_map(|exception| {
let offset = exception.offset.get();
if offset >= base_offset && offset < base_offset + HASHES_SIZE as u16 {
let new_offset = (offset - base_offset) + new_base_offset;
Some(WIAException { offset: new_offset.into(), hash: exception.hash })
} else {
None
}
})
}));
Ok(())
}
}
/// The block kind.
#[derive(Debug, Copy, Clone, PartialEq, Default)]
pub enum BlockKind {
/// Empty block, likely end of disc
#[default]
None,
/// Raw data or encrypted Wii partition data
Raw,
/// Decrypted Wii partition data
PartDecrypted {
/// Whether the sector has its hash block intact
has_hashes: bool,
hash_block: bool,
},
/// Wii partition junk data
Junk,
/// All zeroes
#[default]
Zero,
}
impl Block {
/// Decrypts the block's data (if necessary) and writes it to the output buffer.
pub(crate) fn decrypt(
self,
out: &mut [u8; SECTOR_SIZE],
data: &[u8],
abs_sector: u32,
partition: &PartitionInfo,
) -> io::Result<()> {
let part_sector = abs_sector - partition.data_start_sector;
match self {
Block::Raw => {
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, abs_sector)?);
decrypt_sector(out, partition);
}
Block::PartDecrypted { has_hashes } => {
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, abs_sector)?);
if !has_hashes {
rebuild_hash_block(out, part_sector, partition);
}
}
Block::Junk => {
generate_junk(out, part_sector, Some(partition), &partition.disc_header);
rebuild_hash_block(out, part_sector, partition);
}
Block::Zero => {
out.fill(0);
rebuild_hash_block(out, part_sector, partition);
}
}
Ok(())
}
/// Encrypts the block's data (if necessary) and writes it to the output buffer.
pub(crate) fn encrypt(
self,
out: &mut [u8; SECTOR_SIZE],
data: &[u8],
abs_sector: u32,
partition: &PartitionInfo,
) -> io::Result<()> {
let part_sector = abs_sector - partition.data_start_sector;
match self {
Block::Raw => {
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, abs_sector)?);
}
Block::PartDecrypted { has_hashes } => {
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, abs_sector)?);
if !has_hashes {
rebuild_hash_block(out, part_sector, partition);
}
encrypt_sector(out, partition);
}
Block::Junk => {
generate_junk(out, part_sector, Some(partition), &partition.disc_header);
rebuild_hash_block(out, part_sector, partition);
encrypt_sector(out, partition);
}
Block::Zero => {
out.fill(0);
rebuild_hash_block(out, part_sector, partition);
encrypt_sector(out, partition);
}
}
Ok(())
}
/// Copies the block's raw data to the output buffer.
pub(crate) fn copy_raw(
self,
out: &mut [u8; SECTOR_SIZE],
data: &[u8],
abs_sector: u32,
disc_header: &DiscHeader,
) -> io::Result<()> {
match self {
Block::Raw => {
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, abs_sector)?);
}
Block::PartDecrypted { .. } => {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Cannot copy decrypted data as raw",
));
}
Block::Junk => generate_junk(out, abs_sector, None, disc_header),
Block::Zero => out.fill(0),
}
Ok(())
}
}
#[inline(always)]
fn block_sector<const N: usize>(data: &[u8], sector_idx: u32) -> io::Result<&[u8; N]> {
if data.len() % N != 0 {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("Expected block size {} to be a multiple of {}", data.len(), N),
));
}
let rel_sector = sector_idx % (data.len() / N) as u32;
let offset = rel_sector as usize * N;
data.get(offset..offset + N)
.ok_or_else(|| {
io::Error::new(
io::ErrorKind::InvalidData,
format!(
"Sector {} out of range (block size {}, sector size {})",
rel_sector,
data.len(),
N
),
)
})
.map(|v| unsafe { &*(v as *const [u8] as *const [u8; N]) })
}
fn generate_junk(
/// Generates junk data for a single sector.
pub fn generate_junk_sector(
out: &mut [u8; SECTOR_SIZE],
sector: u32,
abs_sector: u32,
partition: Option<&PartitionInfo>,
disc_header: &DiscHeader,
) {
let (mut pos, mut offset) = if partition.is_some() {
let (pos, offset) = if partition.is_some_and(|p| p.has_hashes) {
let sector = abs_sector - partition.unwrap().data_start_sector;
(sector as u64 * SECTOR_DATA_SIZE as u64, HASHES_SIZE)
} else {
(sector as u64 * SECTOR_SIZE as u64, 0)
(abs_sector as u64 * SECTOR_SIZE as u64, 0)
};
out[..offset].fill(0);
while offset < SECTOR_SIZE {
// The LFG spans a single sector of the decrypted data,
// so we may need to initialize it multiple times
let mut lfg = LaggedFibonacci::default();
lfg.init_with_seed(*array_ref![disc_header.game_id, 0, 4], disc_header.disc_num, pos);
let sector_end = (pos + SECTOR_SIZE as u64) & !(SECTOR_SIZE as u64 - 1);
let len = min(SECTOR_SIZE - offset, (sector_end - pos) as usize);
lfg.fill(&mut out[offset..offset + len]);
pos += len as u64;
offset += len;
}
}
fn rebuild_hash_block(out: &mut [u8; SECTOR_SIZE], part_sector: u32, partition: &PartitionInfo) {
let Some(hash_table) = partition.hash_table.as_ref() else {
return;
};
let sector_idx = part_sector as usize;
let h0_hashes: &[u8; 0x26C] =
transmute_ref!(array_ref![hash_table.h0_hashes, sector_idx * 31, 31]);
out[0..0x26C].copy_from_slice(h0_hashes);
let h1_hashes: &[u8; 0xA0] =
transmute_ref!(array_ref![hash_table.h1_hashes, sector_idx & !7, 8]);
out[0x280..0x320].copy_from_slice(h1_hashes);
let h2_hashes: &[u8; 0xA0] =
transmute_ref!(array_ref![hash_table.h2_hashes, (sector_idx / 8) & !7, 8]);
out[0x340..0x3E0].copy_from_slice(h2_hashes);
}
fn encrypt_sector(out: &mut [u8; SECTOR_SIZE], partition: &PartitionInfo) {
aes_encrypt(&partition.key, [0u8; 16], &mut out[..HASHES_SIZE]);
// Data IV from encrypted hash block
let iv = *array_ref![out, 0x3D0, 16];
aes_encrypt(&partition.key, iv, &mut out[HASHES_SIZE..]);
}
fn decrypt_sector(out: &mut [u8; SECTOR_SIZE], partition: &PartitionInfo) {
// Data IV from encrypted hash block
let iv = *array_ref![out, 0x3D0, 16];
aes_decrypt(&partition.key, [0u8; 16], &mut out[..HASHES_SIZE]);
aes_decrypt(&partition.key, iv, &mut out[HASHES_SIZE..]);
let mut lfg = LaggedFibonacci::default();
lfg.fill_sector_chunked(
&mut out[offset..],
*array_ref![disc_header.game_id, 0, 4],
disc_header.disc_num,
pos,
);
}

View File

@ -1,30 +1,43 @@
use std::{
io,
io::{Read, Seek, SeekFrom},
io::{Seek, SeekFrom},
mem::size_of,
path::Path,
sync::Arc,
};
use zerocopy::{little_endian::*, AsBytes, FromBytes, FromZeroes};
use bytes::{BufMut, Bytes, BytesMut};
use zerocopy::{FromBytes, FromZeros, Immutable, IntoBytes, KnownLayout, little_endian::*};
use crate::{
disc::SECTOR_SIZE,
io::{
block::{Block, BlockIO, PartitionInfo},
nkit::NKitHeader,
split::SplitFileReader,
Format, MagicBytes,
Error, Result, ResultContext,
common::{Compression, Format, MagicBytes},
disc::{
SECTOR_SIZE,
reader::DiscReader,
writer::{
BlockProcessor, BlockResult, CheckBlockResult, DiscWriter, check_block, par_process,
read_block,
},
},
static_assert,
util::read::read_from,
DiscMeta, Error, Result, ResultContext,
io::{
block::{Block, BlockKind, BlockReader, CISO_MAGIC},
nkit::{JunkBits, NKitHeader},
},
read::{DiscMeta, DiscStream},
util::{
array_ref,
digest::DigestManager,
lfg::LaggedFibonacci,
read::{box_to_bytes, read_arc_at},
static_assert,
},
write::{DataCallback, DiscFinalization, DiscWriterWeight, FormatOptions, ProcessOptions},
};
pub const CISO_MAGIC: MagicBytes = *b"CISO";
pub const CISO_MAP_SIZE: usize = SECTOR_SIZE - 8;
/// CISO header (little endian)
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
#[derive(Clone, Debug, PartialEq, FromBytes, IntoBytes, Immutable, KnownLayout)]
#[repr(C, align(4))]
struct CISOHeader {
magic: MagicBytes,
@ -35,25 +48,24 @@ struct CISOHeader {
static_assert!(size_of::<CISOHeader>() == SECTOR_SIZE);
#[derive(Clone)]
pub struct DiscIOCISO {
inner: SplitFileReader,
header: CISOHeader,
block_map: [u16; CISO_MAP_SIZE],
pub struct BlockReaderCISO {
inner: Box<dyn DiscStream>,
header: Arc<CISOHeader>,
block_map: Arc<[u16; CISO_MAP_SIZE]>,
nkit_header: Option<NKitHeader>,
}
impl DiscIOCISO {
pub fn new(filename: &Path) -> Result<Box<Self>> {
let mut inner = SplitFileReader::new(filename)?;
impl BlockReaderCISO {
pub fn new(mut inner: Box<dyn DiscStream>) -> Result<Box<Self>> {
// Read header
let header: CISOHeader = read_from(&mut inner).context("Reading CISO header")?;
let header: Arc<CISOHeader> =
read_arc_at(inner.as_mut(), 0).context("Reading CISO header")?;
if header.magic != CISO_MAGIC {
return Err(Error::DiscFormat("Invalid CISO magic".to_string()));
}
// Build block map
let mut block_map = [0u16; CISO_MAP_SIZE];
let mut block_map = <[u16; CISO_MAP_SIZE]>::new_box_zeroed()?;
let mut block = 0u16;
for (presence, out) in header.block_present.iter().zip(block_map.iter_mut()) {
if *presence == 1 {
@ -64,59 +76,54 @@ impl DiscIOCISO {
}
}
let file_size = SECTOR_SIZE as u64 + block as u64 * header.block_size.get() as u64;
if file_size > inner.len() {
let len = inner.stream_len().context("Determining stream length")?;
if file_size > len {
return Err(Error::DiscFormat(format!(
"CISO file size mismatch: expected at least {} bytes, got {}",
file_size,
inner.len()
file_size, len
)));
}
// Read NKit header if present (after CISO data)
let nkit_header = if inner.len() > file_size + 4 {
inner.seek(SeekFrom::Start(file_size)).context("Seeking to NKit header")?;
NKitHeader::try_read_from(&mut inner, header.block_size.get(), true)
let nkit_header = if len > file_size + 12 {
NKitHeader::try_read_from(inner.as_mut(), file_size, header.block_size.get(), true)
} else {
None
};
Ok(Box::new(Self { inner, header, block_map, nkit_header }))
Ok(Box::new(Self { inner, header, block_map: Arc::from(block_map), nkit_header }))
}
}
impl BlockIO for DiscIOCISO {
fn read_block_internal(
&mut self,
out: &mut [u8],
block: u32,
_partition: Option<&PartitionInfo>,
) -> io::Result<Block> {
if block >= CISO_MAP_SIZE as u32 {
impl BlockReader for BlockReaderCISO {
fn read_block(&mut self, out: &mut [u8], sector: u32) -> io::Result<Block> {
let block_size = self.header.block_size.get();
let block_idx = ((sector as u64 * SECTOR_SIZE as u64) / block_size as u64) as u32;
if block_idx >= CISO_MAP_SIZE as u32 {
// Out of bounds
return Ok(Block::Zero);
return Ok(Block::new(block_idx, block_size, BlockKind::None));
}
// Find the block in the map
let phys_block = self.block_map[block as usize];
let phys_block = self.block_map[block_idx as usize];
if phys_block == u16::MAX {
// Check if block is junk data
if self.nkit_header.as_ref().and_then(|h| h.is_junk_block(block)).unwrap_or(false) {
return Ok(Block::Junk);
if self.nkit_header.as_ref().and_then(|h| h.is_junk_block(block_idx)).unwrap_or(false) {
return Ok(Block::new(block_idx, block_size, BlockKind::Junk));
};
// Otherwise, read zeroes
return Ok(Block::Zero);
return Ok(Block::new(block_idx, block_size, BlockKind::Zero));
}
// Read block
let file_offset = size_of::<CISOHeader>() as u64
+ phys_block as u64 * self.header.block_size.get() as u64;
self.inner.seek(SeekFrom::Start(file_offset))?;
self.inner.read_exact(out)?;
Ok(Block::Raw)
let file_offset = size_of::<CISOHeader>() as u64 + phys_block as u64 * block_size as u64;
self.inner.read_exact_at(out, file_offset)?;
Ok(Block::new(block_idx, block_size, BlockKind::Raw))
}
fn block_size_internal(&self) -> u32 { self.header.block_size.get() }
fn block_size(&self) -> u32 { self.header.block_size.get() }
fn meta(&self) -> DiscMeta {
let mut result = DiscMeta {
@ -130,3 +137,187 @@ impl BlockIO for DiscIOCISO {
result
}
}
struct BlockProcessorCISO {
inner: DiscReader,
block_size: u32,
decrypted_block: Box<[u8]>,
lfg: LaggedFibonacci,
disc_id: [u8; 4],
disc_num: u8,
}
impl Clone for BlockProcessorCISO {
fn clone(&self) -> Self {
Self {
inner: self.inner.clone(),
block_size: self.block_size,
decrypted_block: <[u8]>::new_box_zeroed_with_elems(self.block_size as usize).unwrap(),
lfg: LaggedFibonacci::default(),
disc_id: self.disc_id,
disc_num: self.disc_num,
}
}
}
impl BlockProcessor for BlockProcessorCISO {
type BlockMeta = CheckBlockResult;
fn process_block(&mut self, block_idx: u32) -> io::Result<BlockResult<Self::BlockMeta>> {
let block_size = self.block_size as usize;
let input_position = block_idx as u64 * block_size as u64;
self.inner.seek(SeekFrom::Start(input_position))?;
let (block_data, disc_data) = read_block(&mut self.inner, block_size)?;
// Check if block is zeroed or junk
let result = match check_block(
disc_data.as_ref(),
&mut self.decrypted_block,
input_position,
self.inner.partitions(),
&mut self.lfg,
self.disc_id,
self.disc_num,
)? {
CheckBlockResult::Normal => {
BlockResult { block_idx, disc_data, block_data, meta: CheckBlockResult::Normal }
}
CheckBlockResult::Zeroed => BlockResult {
block_idx,
disc_data,
block_data: Bytes::new(),
meta: CheckBlockResult::Zeroed,
},
CheckBlockResult::Junk => BlockResult {
block_idx,
disc_data,
block_data: Bytes::new(),
meta: CheckBlockResult::Junk,
},
};
Ok(result)
}
}
#[derive(Clone)]
pub struct DiscWriterCISO {
inner: DiscReader,
block_size: u32,
block_count: u32,
disc_size: u64,
}
pub const DEFAULT_BLOCK_SIZE: u32 = 0x200000; // 2 MiB
impl DiscWriterCISO {
pub fn new(inner: DiscReader, options: &FormatOptions) -> Result<Box<dyn DiscWriter>> {
if options.format != Format::Ciso {
return Err(Error::DiscFormat("Invalid format for CISO writer".to_string()));
}
if options.compression != Compression::None {
return Err(Error::DiscFormat("CISO does not support compression".to_string()));
}
let block_size = DEFAULT_BLOCK_SIZE;
let disc_size = inner.disc_size();
let block_count = disc_size.div_ceil(block_size as u64) as u32;
if block_count > CISO_MAP_SIZE as u32 {
return Err(Error::DiscFormat(format!(
"CISO block count exceeds maximum: {} > {}",
block_count, CISO_MAP_SIZE
)));
}
Ok(Box::new(Self { inner, block_size, block_count, disc_size }))
}
}
impl DiscWriter for DiscWriterCISO {
fn process(
&self,
data_callback: &mut DataCallback,
options: &ProcessOptions,
) -> Result<DiscFinalization> {
data_callback(BytesMut::zeroed(SECTOR_SIZE).freeze(), 0, self.disc_size)
.context("Failed to write header")?;
// Determine junk data values
let disc_header = self.inner.header();
let disc_id = *array_ref![disc_header.game_id, 0, 4];
let disc_num = disc_header.disc_num;
// Create hashers
let digest = DigestManager::new(options);
let block_size = self.block_size;
let mut junk_bits = JunkBits::new(block_size);
let mut input_position = 0;
let mut block_count = 0;
let mut header = CISOHeader::new_box_zeroed()?;
header.magic = CISO_MAGIC;
header.block_size = block_size.into();
par_process(
BlockProcessorCISO {
inner: self.inner.clone(),
block_size,
decrypted_block: <[u8]>::new_box_zeroed_with_elems(block_size as usize).unwrap(),
lfg: LaggedFibonacci::default(),
disc_id,
disc_num,
},
self.block_count,
options.processor_threads,
|block| -> Result<()> {
// Update hashers
let disc_data_len = block.disc_data.len() as u64;
digest.send(block.disc_data);
// Check if block is zeroed or junk
match block.meta {
CheckBlockResult::Normal => {
header.block_present[block.block_idx as usize] = 1;
block_count += 1;
}
CheckBlockResult::Zeroed => {}
CheckBlockResult::Junk => {
junk_bits.set(block.block_idx, true);
}
}
input_position += disc_data_len;
data_callback(block.block_data, input_position, self.disc_size)
.with_context(|| format!("Failed to write block {}", block.block_idx))?;
Ok(())
},
)?;
// Collect hash results
let digest_results = digest.finish();
let mut nkit_header = NKitHeader {
version: 2,
size: Some(self.disc_size),
crc32: None,
md5: None,
sha1: None,
xxh64: None,
junk_bits: Some(junk_bits),
encrypted: true,
};
nkit_header.apply_digests(&digest_results);
// Write NKit header after data
let mut buffer = BytesMut::new().writer();
nkit_header.write_to(&mut buffer).context("Writing NKit header")?;
data_callback(buffer.into_inner().freeze(), self.disc_size, self.disc_size)
.context("Failed to write NKit header")?;
let header = Bytes::from(box_to_bytes(header));
let mut finalization = DiscFinalization { header, ..Default::default() };
finalization.apply_digests(&digest_results);
Ok(finalization)
}
fn progress_bound(&self) -> u64 { self.disc_size }
fn weight(&self) -> DiscWriterWeight { DiscWriterWeight::Medium }
}

View File

@ -1,30 +1,35 @@
use std::{
io,
io::{Read, Seek, SeekFrom},
io::{Seek, SeekFrom},
mem::size_of,
path::Path,
sync::Arc,
};
use adler::adler32_slice;
use miniz_oxide::{inflate, inflate::core::inflate_flags};
use zerocopy::{little_endian::*, AsBytes, FromBytes, FromZeroes};
use zstd::zstd_safe::WriteBuf;
use bytes::{BufMut, Bytes, BytesMut};
use zerocopy::{FromBytes, FromZeros, Immutable, IntoBytes, KnownLayout, little_endian::*};
use crate::{
io::{
block::{Block, BlockIO},
split::SplitFileReader,
MagicBytes,
Error, Result, ResultContext,
common::{Compression, Format, MagicBytes},
disc::{
SECTOR_SIZE,
reader::DiscReader,
writer::{BlockProcessor, BlockResult, DiscWriter, par_process, read_block},
},
static_assert,
util::read::{read_box_slice, read_from},
Compression, DiscMeta, Error, Format, PartitionInfo, Result, ResultContext,
io::block::{Block, BlockKind, BlockReader, GCZ_MAGIC},
read::{DiscMeta, DiscStream},
util::{
compress::{Compressor, DecompressionKind, Decompressor},
digest::DigestManager,
read::{read_arc_slice_at, read_at},
static_assert,
},
write::{DataCallback, DiscFinalization, DiscWriterWeight, FormatOptions, ProcessOptions},
};
pub const GCZ_MAGIC: MagicBytes = [0x01, 0xC0, 0x0B, 0xB1];
/// GCZ header (little endian)
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
#[derive(Clone, Debug, PartialEq, FromBytes, IntoBytes, Immutable, KnownLayout)]
#[repr(C, align(4))]
struct GCZHeader {
magic: MagicBytes,
@ -37,34 +42,34 @@ struct GCZHeader {
static_assert!(size_of::<GCZHeader>() == 32);
pub struct DiscIOGCZ {
inner: SplitFileReader,
pub struct BlockReaderGCZ {
inner: Box<dyn DiscStream>,
header: GCZHeader,
block_map: Box<[U64]>,
block_hashes: Box<[U32]>,
block_map: Arc<[U64]>,
block_hashes: Arc<[U32]>,
block_buf: Box<[u8]>,
data_offset: u64,
decompressor: Decompressor,
}
impl Clone for DiscIOGCZ {
impl Clone for BlockReaderGCZ {
fn clone(&self) -> Self {
Self {
inner: self.inner.clone(),
header: self.header.clone(),
block_map: self.block_map.clone(),
block_hashes: self.block_hashes.clone(),
block_buf: <u8>::new_box_slice_zeroed(self.block_buf.len()),
block_buf: <[u8]>::new_box_zeroed_with_elems(self.block_buf.len()).unwrap(),
data_offset: self.data_offset,
decompressor: self.decompressor.clone(),
}
}
}
impl DiscIOGCZ {
pub fn new(filename: &Path) -> Result<Box<Self>> {
let mut inner = SplitFileReader::new(filename)?;
impl BlockReaderGCZ {
pub fn new(mut inner: Box<dyn DiscStream>) -> Result<Box<Self>> {
// Read header
let header: GCZHeader = read_from(&mut inner).context("Reading GCZ header")?;
let header: GCZHeader = read_at(inner.as_mut(), 0).context("Reading GCZ header")?;
if header.magic != GCZ_MAGIC {
return Err(Error::DiscFormat("Invalid GCZ magic".to_string()));
}
@ -72,114 +77,117 @@ impl DiscIOGCZ {
// Read block map and hashes
let block_count = header.block_count.get();
let block_map =
read_box_slice(&mut inner, block_count as usize).context("Reading GCZ block map")?;
let block_hashes =
read_box_slice(&mut inner, block_count as usize).context("Reading GCZ block hashes")?;
read_arc_slice_at(inner.as_mut(), block_count as usize, size_of::<GCZHeader>() as u64)
.context("Reading GCZ block map")?;
let block_hashes = read_arc_slice_at(
inner.as_mut(),
block_count as usize,
size_of::<GCZHeader>() as u64 + block_count as u64 * 8,
)
.context("Reading GCZ block hashes")?;
// header + block_count * (u64 + u32)
let data_offset = size_of::<GCZHeader>() as u64 + block_count as u64 * 12;
let block_buf = <u8>::new_box_slice_zeroed(header.block_size.get() as usize);
Ok(Box::new(Self { inner, header, block_map, block_hashes, block_buf, data_offset }))
let block_buf = <[u8]>::new_box_zeroed_with_elems(header.block_size.get() as usize)?;
let decompressor = Decompressor::new(DecompressionKind::Deflate);
Ok(Box::new(Self {
inner,
header,
block_map,
block_hashes,
block_buf,
data_offset,
decompressor,
}))
}
}
impl BlockIO for DiscIOGCZ {
fn read_block_internal(
&mut self,
out: &mut [u8],
block: u32,
_partition: Option<&PartitionInfo>,
) -> io::Result<Block> {
if block >= self.header.block_count.get() {
impl BlockReader for BlockReaderGCZ {
fn read_block(&mut self, out: &mut [u8], sector: u32) -> io::Result<Block> {
let block_size = self.header.block_size.get();
let block_idx = ((sector as u64 * SECTOR_SIZE as u64) / block_size as u64) as u32;
if block_idx >= self.header.block_count.get() {
// Out of bounds
return Ok(Block::Zero);
return Ok(Block::new(block_idx, block_size, BlockKind::None));
}
// Find block offset and size
let mut file_offset = self.block_map[block as usize].get();
let mut file_offset = self.block_map[block_idx as usize].get();
let mut compressed = true;
if file_offset & (1 << 63) != 0 {
file_offset &= !(1 << 63);
compressed = false;
}
let compressed_size =
((self.block_map.get(block as usize + 1).unwrap_or(&self.header.compressed_size).get()
& !(1 << 63))
- file_offset) as usize;
if compressed_size > self.block_buf.len() {
let compressed_size = ((self
.block_map
.get(block_idx as usize + 1)
.unwrap_or(&self.header.compressed_size)
.get()
& !(1 << 63))
- file_offset) as usize;
if compressed_size > block_size as usize {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"Compressed block size exceeds block size: {} > {}",
compressed_size,
self.block_buf.len()
compressed_size, block_size
),
));
} else if !compressed && compressed_size != self.block_buf.len() {
} else if !compressed && compressed_size != block_size as usize {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"Uncompressed block size does not match block size: {} != {}",
compressed_size,
self.block_buf.len()
compressed_size, block_size
),
));
}
// Read block
self.inner.seek(SeekFrom::Start(self.data_offset + file_offset))?;
self.inner.read_exact(&mut self.block_buf[..compressed_size])?;
self.inner.read_exact_at(
&mut self.block_buf[..compressed_size],
self.data_offset + file_offset,
)?;
// Verify block checksum
let checksum = adler32_slice(&self.block_buf[..compressed_size]);
let expected_checksum = self.block_hashes[block as usize].get();
let expected_checksum = self.block_hashes[block_idx as usize].get();
if checksum != expected_checksum {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"Block checksum mismatch: {:#010x} != {:#010x}",
checksum, expected_checksum
"Block {} checksum mismatch: {:#010x} != {:#010x}",
block_idx, checksum, expected_checksum
),
));
}
if compressed {
// Decompress block
let mut decompressor = inflate::core::DecompressorOxide::new();
let input = &self.block_buf[..compressed_size];
let (status, in_size, out_size) = inflate::core::decompress(
&mut decompressor,
input,
out,
0,
inflate_flags::TINFL_FLAG_PARSE_ZLIB_HEADER
| inflate_flags::TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF,
);
if status != inflate::TINFLStatus::Done
|| in_size != compressed_size
|| out_size != self.block_buf.len()
{
let out_len = self.decompressor.decompress(&self.block_buf[..compressed_size], out)?;
if out_len != block_size as usize {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"Deflate decompression failed: {:?} (in: {}, out: {})",
status, in_size, out_size
"Block {} decompression failed: in: {}, out: {}",
block_idx, compressed_size, out_len
),
));
}
} else {
// Copy uncompressed block
out.copy_from_slice(self.block_buf.as_slice());
out.copy_from_slice(self.block_buf.as_ref());
}
Ok(Block::Raw)
Ok(Block::new(block_idx, block_size, BlockKind::Raw))
}
fn block_size_internal(&self) -> u32 { self.header.block_size.get() }
fn block_size(&self) -> u32 { self.header.block_size.get() }
fn meta(&self) -> DiscMeta {
DiscMeta {
format: Format::Gcz,
compression: Compression::Deflate,
compression: Compression::Deflate(0),
block_size: Some(self.header.block_size.get()),
lossless: true,
disc_size: Some(self.header.disc_size.get()),
@ -187,3 +195,170 @@ impl BlockIO for DiscIOGCZ {
}
}
}
struct BlockProcessorGCZ {
inner: DiscReader,
header: GCZHeader,
compressor: Compressor,
}
impl Clone for BlockProcessorGCZ {
fn clone(&self) -> Self {
Self {
inner: self.inner.clone(),
header: self.header.clone(),
compressor: self.compressor.clone(),
}
}
}
struct BlockMetaGCZ {
is_compressed: bool,
block_hash: u32,
}
impl BlockProcessor for BlockProcessorGCZ {
type BlockMeta = BlockMetaGCZ;
fn process_block(&mut self, block_idx: u32) -> io::Result<BlockResult<Self::BlockMeta>> {
let block_size = self.header.block_size.get();
self.inner.seek(SeekFrom::Start(block_idx as u64 * block_size as u64))?;
let (mut block_data, disc_data) = read_block(&mut self.inner, block_size as usize)?;
// Try to compress block
let is_compressed = if self.compressor.compress(&block_data)? {
println!("Compressed block {} to {}", block_idx, self.compressor.buffer.len());
block_data = Bytes::copy_from_slice(self.compressor.buffer.as_slice());
true
} else {
false
};
let block_hash = adler32_slice(block_data.as_ref());
Ok(BlockResult {
block_idx,
disc_data,
block_data,
meta: BlockMetaGCZ { is_compressed, block_hash },
})
}
}
#[derive(Clone)]
pub struct DiscWriterGCZ {
inner: DiscReader,
header: GCZHeader,
compression: Compression,
}
pub const DEFAULT_BLOCK_SIZE: u32 = 0x8000; // 32 KiB
// Level 0 will be converted to the default level in [`Compression::validate_level`]
pub const DEFAULT_COMPRESSION: Compression = Compression::Deflate(0);
impl DiscWriterGCZ {
pub fn new(inner: DiscReader, options: &FormatOptions) -> Result<Box<dyn DiscWriter>> {
if options.format != Format::Gcz {
return Err(Error::DiscFormat("Invalid format for GCZ writer".to_string()));
}
if !matches!(options.compression, Compression::Deflate(_)) {
return Err(Error::DiscFormat(format!(
"Unsupported compression for GCZ: {:?}",
options.compression
)));
}
let block_size = options.block_size;
if block_size < SECTOR_SIZE as u32 || block_size % SECTOR_SIZE as u32 != 0 {
return Err(Error::DiscFormat("Invalid block size for GCZ".to_string()));
}
let disc_header = inner.header();
let disc_size = inner.disc_size();
let block_count = disc_size.div_ceil(block_size as u64) as u32;
// Generate header
let header = GCZHeader {
magic: GCZ_MAGIC,
disc_type: if disc_header.is_wii() { 1 } else { 0 }.into(),
compressed_size: 0.into(), // Written when finalized
disc_size: disc_size.into(),
block_size: block_size.into(),
block_count: block_count.into(),
};
Ok(Box::new(Self { inner, header, compression: options.compression }))
}
}
impl DiscWriter for DiscWriterGCZ {
fn process(
&self,
data_callback: &mut DataCallback,
options: &ProcessOptions,
) -> Result<DiscFinalization> {
let disc_size = self.header.disc_size.get();
let block_size = self.header.block_size.get();
let block_count = self.header.block_count.get();
// Create hashers
let digest = DigestManager::new(options);
// Generate block map and hashes
let mut block_map = <[U64]>::new_box_zeroed_with_elems(block_count as usize)?;
let mut block_hashes = <[U32]>::new_box_zeroed_with_elems(block_count as usize)?;
let header_data_size = size_of::<GCZHeader>()
+ size_of_val(block_map.as_ref())
+ size_of_val(block_hashes.as_ref());
let mut header_data = BytesMut::with_capacity(header_data_size);
header_data.put_slice(self.header.as_bytes());
header_data.resize(header_data_size, 0);
data_callback(header_data.freeze(), 0, disc_size).context("Failed to write GCZ header")?;
let mut input_position = 0;
let mut data_position = 0;
par_process(
BlockProcessorGCZ {
inner: self.inner.clone(),
header: self.header.clone(),
compressor: Compressor::new(self.compression, block_size as usize),
},
block_count,
options.processor_threads,
|block| {
// Update hashers
input_position += block.disc_data.len() as u64;
digest.send(block.disc_data);
// Update block map and hash
let uncompressed_bit = (!block.meta.is_compressed as u64) << 63;
block_map[block.block_idx as usize] = (data_position | uncompressed_bit).into();
block_hashes[block.block_idx as usize] = block.meta.block_hash.into();
// Write block data
data_position += block.block_data.len() as u64;
data_callback(block.block_data, input_position, disc_size)
.with_context(|| format!("Failed to write block {}", block.block_idx))?;
Ok(())
},
)?;
// Write updated header, block map and hashes
let mut header = self.header.clone();
header.compressed_size = data_position.into();
let mut header_data = BytesMut::with_capacity(header_data_size);
header_data.extend_from_slice(header.as_bytes());
header_data.extend_from_slice(block_map.as_bytes());
header_data.extend_from_slice(block_hashes.as_bytes());
let mut finalization =
DiscFinalization { header: header_data.freeze(), ..Default::default() };
finalization.apply_digests(&digest.finish());
Ok(finalization)
}
fn progress_bound(&self) -> u64 { self.header.disc_size.get() }
fn weight(&self) -> DiscWriterWeight { DiscWriterWeight::Heavy }
}

View File

@ -1,65 +1,89 @@
use std::{
io,
io::{Read, Seek, SeekFrom},
path::Path,
};
use std::{io, io::BufRead};
use crate::{
disc::SECTOR_SIZE,
io::{
block::{Block, BlockIO, PartitionInfo},
split::SplitFileReader,
Format,
},
DiscMeta, Result,
Result, ResultContext,
common::Format,
disc::{SECTOR_SIZE, reader::DiscReader, writer::DiscWriter},
io::block::{Block, BlockKind, BlockReader},
read::{DiscMeta, DiscStream},
util::digest::DigestManager,
write::{DataCallback, DiscFinalization, DiscWriterWeight, ProcessOptions},
};
#[derive(Clone)]
pub struct DiscIOISO {
inner: SplitFileReader,
pub struct BlockReaderISO {
inner: Box<dyn DiscStream>,
disc_size: u64,
}
impl DiscIOISO {
pub fn new(filename: &Path) -> Result<Box<Self>> {
let inner = SplitFileReader::new(filename)?;
Ok(Box::new(Self { inner }))
impl BlockReaderISO {
pub fn new(mut inner: Box<dyn DiscStream>) -> Result<Box<Self>> {
let disc_size = inner.stream_len().context("Determining stream length")?;
Ok(Box::new(Self { inner, disc_size }))
}
}
impl BlockIO for DiscIOISO {
fn read_block_internal(
&mut self,
out: &mut [u8],
block: u32,
_partition: Option<&PartitionInfo>,
) -> io::Result<Block> {
let offset = block as u64 * SECTOR_SIZE as u64;
let total_size = self.inner.len();
if offset >= total_size {
impl BlockReader for BlockReaderISO {
fn read_block(&mut self, out: &mut [u8], sector: u32) -> io::Result<Block> {
let pos = sector as u64 * SECTOR_SIZE as u64;
if pos >= self.disc_size {
// End of file
return Ok(Block::Zero);
return Ok(Block::sector(sector, BlockKind::None));
}
self.inner.seek(SeekFrom::Start(offset))?;
if offset + SECTOR_SIZE as u64 > total_size {
if pos + SECTOR_SIZE as u64 > self.disc_size {
// If the last block is not a full sector, fill the rest with zeroes
let read = (total_size - offset) as usize;
self.inner.read_exact(&mut out[..read])?;
let read = (self.disc_size - pos) as usize;
self.inner.read_exact_at(&mut out[..read], pos)?;
out[read..].fill(0);
} else {
self.inner.read_exact(out)?;
self.inner.read_exact_at(out, pos)?;
}
Ok(Block::Raw)
Ok(Block::sector(sector, BlockKind::Raw))
}
fn block_size_internal(&self) -> u32 { SECTOR_SIZE as u32 }
fn block_size(&self) -> u32 { SECTOR_SIZE as u32 }
fn meta(&self) -> DiscMeta {
DiscMeta {
format: Format::Iso,
lossless: true,
disc_size: Some(self.inner.len()),
disc_size: Some(self.disc_size),
..Default::default()
}
}
}
impl DiscWriter for DiscReader {
fn process(
&self,
data_callback: &mut DataCallback,
options: &ProcessOptions,
) -> Result<DiscFinalization> {
let mut reader = self.clone();
let digest = DigestManager::new(options);
loop {
let pos = reader.position();
let data = reader
.fill_buf_internal()
.with_context(|| format!("Reading disc data at offset {pos}"))?;
let len = data.len();
if len == 0 {
break;
}
// Update hashers
digest.send(data.clone());
data_callback(data, pos + len as u64, reader.disc_size())
.context("Failed to write disc data")?;
reader.consume(len);
}
let mut finalization = DiscFinalization::default();
finalization.apply_digests(&digest.finish());
Ok(finalization)
}
fn progress_bound(&self) -> u64 { self.disc_size() }
fn weight(&self) -> DiscWriterWeight { DiscWriterWeight::Light }
}

View File

@ -1,7 +1,5 @@
//! Disc file format related logic (CISO, NFS, WBFS, WIA, etc.)
use std::fmt;
pub(crate) mod block;
pub(crate) mod ciso;
#[cfg(feature = "compress-zlib")]
@ -13,128 +11,3 @@ pub(crate) mod split;
pub(crate) mod tgc;
pub(crate) mod wbfs;
pub(crate) mod wia;
/// SHA-1 hash bytes
pub(crate) type HashBytes = [u8; 20];
/// AES key bytes
pub(crate) type KeyBytes = [u8; 16];
/// Magic bytes
pub(crate) type MagicBytes = [u8; 4];
/// The disc file format.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
pub enum Format {
/// ISO / GCM (GameCube master disc)
#[default]
Iso,
/// CISO (Compact ISO)
Ciso,
/// GCZ
Gcz,
/// NFS (Wii U VC)
Nfs,
/// RVZ
Rvz,
/// WBFS
Wbfs,
/// WIA
Wia,
/// TGC
Tgc,
}
impl fmt::Display for Format {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Format::Iso => write!(f, "ISO"),
Format::Ciso => write!(f, "CISO"),
Format::Gcz => write!(f, "GCZ"),
Format::Nfs => write!(f, "NFS"),
Format::Rvz => write!(f, "RVZ"),
Format::Wbfs => write!(f, "WBFS"),
Format::Wia => write!(f, "WIA"),
Format::Tgc => write!(f, "TGC"),
}
}
}
/// The disc file format's compression algorithm.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
pub enum Compression {
/// No compression
#[default]
None,
/// BZIP2
Bzip2,
/// Deflate (GCZ only)
Deflate,
/// LZMA
Lzma,
/// LZMA2
Lzma2,
/// Purge (WIA only)
Purge,
/// Zstandard
Zstandard,
}
impl fmt::Display for Compression {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Compression::None => write!(f, "None"),
Compression::Bzip2 => write!(f, "BZIP2"),
Compression::Deflate => write!(f, "Deflate"),
Compression::Lzma => write!(f, "LZMA"),
Compression::Lzma2 => write!(f, "LZMA2"),
Compression::Purge => write!(f, "Purge"),
Compression::Zstandard => write!(f, "Zstandard"),
}
}
}
/// Extra metadata about the underlying disc file format.
#[derive(Debug, Clone, Default)]
pub struct DiscMeta {
/// The disc file format.
pub format: Format,
/// The format's compression algorithm.
pub compression: Compression,
/// If the format uses blocks, the block size in bytes.
pub block_size: Option<u32>,
/// Whether Wii partitions are stored decrypted in the format.
pub decrypted: bool,
/// Whether the format omits Wii partition data hashes.
pub needs_hash_recovery: bool,
/// Whether the format supports recovering the original disc data losslessly.
pub lossless: bool,
/// The original disc's size in bytes, if stored by the format.
pub disc_size: Option<u64>,
/// The original disc's CRC32 hash, if stored by the format.
pub crc32: Option<u32>,
/// The original disc's MD5 hash, if stored by the format.
pub md5: Option<[u8; 16]>,
/// The original disc's SHA-1 hash, if stored by the format.
pub sha1: Option<[u8; 20]>,
/// The original disc's XXH64 hash, if stored by the format.
pub xxhash64: Option<u64>,
}
/// Encrypts data in-place using AES-128-CBC with the given key and IV.
#[inline(always)]
pub(crate) fn aes_encrypt(key: &KeyBytes, iv: KeyBytes, data: &mut [u8]) {
use aes::cipher::{block_padding::NoPadding, BlockEncryptMut, KeyIvInit};
<cbc::Encryptor<aes::Aes128>>::new(key.into(), &aes::Block::from(iv))
.encrypt_padded_mut::<NoPadding>(data, data.len())
.unwrap(); // Safe: using NoPadding
}
/// Decrypts data in-place using AES-128-CBC with the given key and IV.
#[inline(always)]
pub(crate) fn aes_decrypt(key: &KeyBytes, iv: KeyBytes, data: &mut [u8]) {
use aes::cipher::{block_padding::NoPadding, BlockDecryptMut, KeyIvInit};
<cbc::Decryptor<aes::Aes128>>::new(key.into(), &aes::Block::from(iv))
.decrypt_padded_mut::<NoPadding>(data)
.unwrap(); // Safe: using NoPadding
}

View File

@ -1,37 +1,36 @@
use std::{
fs::File,
io,
io::{BufReader, Read, Seek, SeekFrom},
io::{BufReader, Read},
mem::size_of,
path::{Component, Path, PathBuf},
sync::Arc,
};
use zerocopy::{big_endian::U32, AsBytes, FromBytes, FromZeroes};
use zerocopy::{FromBytes, FromZeros, Immutable, IntoBytes, KnownLayout, big_endian::U32};
use crate::{
Error, Result, ResultContext,
common::{Format, KeyBytes, MagicBytes},
disc::SECTOR_SIZE,
io::{
aes_decrypt,
block::{Block, BlockIO, PartitionInfo},
block::{Block, BlockKind, BlockReader, NFS_MAGIC},
split::SplitFileReader,
Format, KeyBytes, MagicBytes,
},
static_assert,
util::read::read_from,
DiscMeta, Error, Result, ResultContext,
read::{DiscMeta, DiscStream},
util::{aes::aes_cbc_decrypt, array_ref_mut, read::read_arc, static_assert},
};
pub const NFS_MAGIC: MagicBytes = *b"EGGS";
pub const NFS_END_MAGIC: MagicBytes = *b"SGGE";
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
#[derive(Clone, Debug, PartialEq, FromBytes, IntoBytes, Immutable, KnownLayout)]
#[repr(C, align(4))]
struct LBARange {
start_sector: U32,
num_sectors: U32,
}
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
#[derive(Clone, Debug, PartialEq, FromBytes, IntoBytes, Immutable, KnownLayout)]
#[repr(C, align(4))]
struct NFSHeader {
magic: MagicBytes,
@ -84,19 +83,19 @@ impl NFSHeader {
}
#[derive(Clone)]
pub struct DiscIONFS {
pub struct BlockReaderNFS {
inner: SplitFileReader,
header: NFSHeader,
header: Arc<NFSHeader>,
raw_size: u64,
disc_size: u64,
key: KeyBytes,
}
impl DiscIONFS {
impl BlockReaderNFS {
pub fn new(directory: &Path) -> Result<Box<Self>> {
let mut disc_io = Box::new(Self {
inner: SplitFileReader::empty(),
header: NFSHeader::new_zeroed(),
header: Arc::new(NFSHeader::new_zeroed()),
raw_size: 0,
disc_size: 0,
key: [0; 16],
@ -106,42 +105,28 @@ impl DiscIONFS {
}
}
impl BlockIO for DiscIONFS {
fn read_block_internal(
&mut self,
out: &mut [u8],
sector: u32,
partition: Option<&PartitionInfo>,
) -> io::Result<Block> {
impl BlockReader for BlockReaderNFS {
fn read_block(&mut self, out: &mut [u8], sector: u32) -> io::Result<Block> {
// Calculate physical sector
let phys_sector = self.header.phys_sector(sector);
if phys_sector == u32::MAX {
// Logical zero sector
return Ok(Block::Zero);
return Ok(Block::sector(sector, BlockKind::Raw));
}
// Read sector
let offset = size_of::<NFSHeader>() as u64 + phys_sector as u64 * SECTOR_SIZE as u64;
self.inner.seek(SeekFrom::Start(offset))?;
self.inner.read_exact(out)?;
self.inner.read_exact_at(out, offset)?;
// Decrypt
let iv_bytes = sector.to_be_bytes();
#[rustfmt::skip]
let iv: KeyBytes = [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
iv_bytes[0], iv_bytes[1], iv_bytes[2], iv_bytes[3],
];
aes_decrypt(&self.key, iv, out);
let mut iv = [0u8; 0x10];
*array_ref_mut!(iv, 12, 4) = sector.to_be_bytes();
aes_cbc_decrypt(&self.key, &iv, out);
if partition.is_some() {
Ok(Block::PartDecrypted { has_hashes: true })
} else {
Ok(Block::Raw)
}
Ok(Block::sector(sector, BlockKind::PartDecrypted { hash_block: true }))
}
fn block_size_internal(&self) -> u32 { SECTOR_SIZE as u32 }
fn block_size(&self) -> u32 { SECTOR_SIZE as u32 }
fn meta(&self) -> DiscMeta {
DiscMeta { format: Format::Nfs, decrypted: true, ..Default::default() }
@ -171,7 +156,7 @@ fn get_nfs(directory: &Path, num: u32) -> Result<PathBuf> {
}
}
impl DiscIONFS {
impl BlockReaderNFS {
pub fn load_files(&mut self, directory: &Path) -> Result<()> {
{
// Load key file
@ -192,7 +177,7 @@ impl DiscIONFS {
let resolved_path = key_path.unwrap();
File::open(resolved_path.as_path())
.map_err(|v| Error::Io(format!("Failed to open {}", resolved_path.display()), v))?
.read(&mut self.key)
.read_exact(&mut self.key)
.map_err(|v| Error::Io(format!("Failed to read {}", resolved_path.display()), v))?;
}
@ -204,7 +189,7 @@ impl DiscIONFS {
let mut file = BufReader::new(
File::open(&path).with_context(|| format!("Opening file {}", path.display()))?,
);
let header: NFSHeader = read_from(&mut file)
let header: Arc<NFSHeader> = read_arc(&mut file)
.with_context(|| format!("Reading NFS header from file {}", path.display()))?;
header.validate()?;
// log::debug!("{:?}", header);

View File

@ -1,13 +1,12 @@
use std::{
io,
io::{Read, Seek, SeekFrom},
};
use std::io::{self, Read, Seek, Write};
use tracing::warn;
use crate::{
common::MagicBytes,
disc::DL_DVD_SIZE,
io::MagicBytes,
util::read::{read_from, read_u16_be, read_u32_be, read_u64_be, read_vec},
DiscMeta,
read::{DiscMeta, DiscStream},
util::read::{read_at, read_from, read_u16_be, read_u32_be, read_u64_be, read_vec},
};
#[allow(unused)]
@ -56,33 +55,50 @@ const fn calc_header_size(version: u8, flags: u16, key_len: u32) -> usize {
size
}
#[allow(unused)]
#[derive(Debug, Clone)]
pub struct NKitHeader {
pub version: u8,
pub flags: u16,
pub size: Option<u64>,
pub crc32: Option<u32>,
pub md5: Option<[u8; 16]>,
pub sha1: Option<[u8; 20]>,
pub xxhash64: Option<u64>,
pub xxh64: Option<u64>,
/// Bitstream of blocks that are junk data
pub junk_bits: Option<Vec<u8>>,
pub block_size: u32,
pub junk_bits: Option<JunkBits>,
pub encrypted: bool,
}
impl Default for NKitHeader {
fn default() -> Self {
Self {
version: 2,
size: None,
crc32: None,
md5: None,
sha1: None,
xxh64: None,
junk_bits: None,
encrypted: false,
}
}
}
const VERSION_PREFIX: [u8; 7] = *b"NKIT v";
impl NKitHeader {
pub fn try_read_from<R>(reader: &mut R, block_size: u32, has_junk_bits: bool) -> Option<Self>
where R: Read + Seek + ?Sized {
let magic: MagicBytes = read_from(reader).ok()?;
pub fn try_read_from(
reader: &mut dyn DiscStream,
pos: u64,
block_size: u32,
has_junk_bits: bool,
) -> Option<Self> {
let magic: MagicBytes = read_at(reader, pos).ok()?;
if magic == *b"NKIT" {
reader.seek(SeekFrom::Current(-4)).ok()?;
match NKitHeader::read_from(reader, block_size, has_junk_bits) {
let mut reader = ReadAdapter::new(reader, pos);
match NKitHeader::read_from(&mut reader, block_size, has_junk_bits) {
Ok(header) => Some(header),
Err(e) => {
log::warn!("Failed to read NKit header: {}", e);
warn!("Failed to read NKit header: {}", e);
None
}
}
@ -136,25 +152,20 @@ impl NKitHeader {
let sha1 = (flags & NKitHeaderFlags::Sha1 as u16 != 0)
.then(|| read_from::<[u8; 20], _>(&mut inner))
.transpose()?;
let xxhash64 = (flags & NKitHeaderFlags::Xxhash64 as u16 != 0)
let xxh64 = (flags & NKitHeaderFlags::Xxhash64 as u16 != 0)
.then(|| read_u64_be(&mut inner))
.transpose()?;
let junk_bits = if has_junk_bits {
let n = DL_DVD_SIZE.div_ceil(block_size as u64).div_ceil(8);
Some(read_vec(reader, n as usize)?)
} else {
None
};
let junk_bits =
if has_junk_bits { Some(JunkBits::read_from(reader, block_size)?) } else { None };
Ok(Self { version, flags, size, crc32, md5, sha1, xxhash64, junk_bits, block_size })
let encrypted = flags & NKitHeaderFlags::Encrypted as u16 != 0;
Ok(Self { version, size, crc32, md5, sha1, xxh64, junk_bits, encrypted })
}
pub fn is_junk_block(&self, block: u32) -> Option<bool> {
self.junk_bits
.as_ref()
.and_then(|v| v.get((block / 8) as usize))
.map(|&b| b & (1 << (7 - (block & 7))) != 0)
self.junk_bits.as_ref().map(|v| v.get(block))
}
pub fn apply(&self, meta: &mut DiscMeta) {
@ -164,6 +175,160 @@ impl NKitHeader {
meta.crc32 = self.crc32;
meta.md5 = self.md5;
meta.sha1 = self.sha1;
meta.xxhash64 = self.xxhash64;
meta.xxh64 = self.xxh64;
}
fn calc_flags(&self) -> u16 {
let mut flags = 0;
if self.size.is_some() {
flags |= NKitHeaderFlags::Size as u16;
}
if self.crc32.is_some() {
flags |= NKitHeaderFlags::Crc32 as u16;
}
if self.md5.is_some() {
flags |= NKitHeaderFlags::Md5 as u16;
}
if self.sha1.is_some() {
flags |= NKitHeaderFlags::Sha1 as u16;
}
if self.xxh64.is_some() {
flags |= NKitHeaderFlags::Xxhash64 as u16;
}
if self.encrypted {
flags |= NKitHeaderFlags::Encrypted as u16;
}
flags
}
pub fn write_to<W>(&self, w: &mut W) -> io::Result<()>
where W: Write + ?Sized {
w.write_all(&VERSION_PREFIX)?;
w.write_all(&[b'0' + self.version])?;
let flags = self.calc_flags();
match self.version {
1 => {}
2 => {
let header_size = calc_header_size(self.version, flags, 0) as u16;
w.write_all(&header_size.to_be_bytes())?;
w.write_all(&flags.to_be_bytes())?;
}
version => {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("Unsupported NKit header version: {}", version),
));
}
};
if let Some(size) = self.size {
w.write_all(&size.to_be_bytes())?;
}
if let Some(crc32) = self.crc32 {
w.write_all(&crc32.to_be_bytes())?;
} else if self.version == 1 {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Missing CRC32 in NKit v1 header",
));
}
if let Some(md5) = self.md5 {
w.write_all(&md5)?;
} else if self.version == 1 {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Missing MD5 in NKit v1 header",
));
}
if let Some(sha1) = self.sha1 {
w.write_all(&sha1)?;
} else if self.version == 1 {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Missing SHA1 in NKit v1 header",
));
}
if let Some(xxh64) = self.xxh64 {
w.write_all(&xxh64.to_be_bytes())?;
} else if self.version == 1 {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Missing XXHash64 in NKit header",
));
}
if let Some(junk_bits) = &self.junk_bits {
junk_bits.write_to(w)?;
}
Ok(())
}
}
#[derive(Debug, Clone)]
pub struct JunkBits(Vec<u8>);
impl JunkBits {
pub fn new(block_size: u32) -> Self { Self(vec![0; Self::len(block_size)]) }
pub fn read_from<R>(reader: &mut R, block_size: u32) -> io::Result<Self>
where R: Read + ?Sized {
Ok(Self(read_vec(reader, Self::len(block_size))?))
}
pub fn write_to<W>(&self, w: &mut W) -> io::Result<()>
where W: Write + ?Sized {
w.write_all(&self.0)
}
pub fn set(&mut self, block: u32, is_junk: bool) {
let Some(byte) = self.0.get_mut((block / 8) as usize) else {
return;
};
if is_junk {
*byte |= 1 << (7 - (block & 7));
} else {
*byte &= !(1 << (7 - (block & 7)));
}
}
pub fn get(&self, block: u32) -> bool {
let Some(&byte) = self.0.get((block / 8) as usize) else {
return false;
};
byte & (1 << (7 - (block & 7))) != 0
}
fn len(block_size: u32) -> usize {
DL_DVD_SIZE.div_ceil(block_size as u64).div_ceil(8) as usize
}
}
pub struct ReadAdapter<'a> {
reader: &'a mut dyn DiscStream,
pos: u64,
}
impl<'a> ReadAdapter<'a> {
pub fn new(reader: &'a mut dyn DiscStream, offset: u64) -> Self { Self { reader, pos: offset } }
}
impl Read for ReadAdapter<'_> {
fn read(&mut self, _buf: &mut [u8]) -> io::Result<usize> {
Err(io::Error::from(io::ErrorKind::Unsupported))
}
fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
self.reader.read_exact_at(buf, self.pos)?;
self.pos += buf.len() as u64;
Ok(())
}
}
impl Seek for ReadAdapter<'_> {
fn seek(&mut self, pos: io::SeekFrom) -> io::Result<u64> {
self.pos = match pos {
io::SeekFrom::Start(pos) => pos,
io::SeekFrom::End(v) => self.reader.stream_len()?.saturating_add_signed(v),
io::SeekFrom::Current(v) => self.pos.saturating_add_signed(v),
};
Ok(self.pos)
}
}

View File

@ -1,18 +1,15 @@
use std::{
cmp::min,
fs::File,
io,
io::{BufReader, Read, Seek, SeekFrom},
path::{Path, PathBuf},
};
use crate::{ErrorContext, Result, ResultContext};
use crate::{ErrorContext, Result, ResultContext, read::DiscStream};
#[derive(Debug)]
pub struct SplitFileReader {
files: Vec<Split<PathBuf>>,
open_file: Option<Split<BufReader<File>>>,
pos: u64,
open_file: Option<Split<File>>,
}
#[derive(Debug, Clone)]
@ -59,7 +56,7 @@ fn split_path_3(input: &Path, index: u32) -> PathBuf {
}
impl SplitFileReader {
pub fn empty() -> Self { Self { files: Vec::new(), open_file: None, pos: 0 } }
pub fn empty() -> Self { Self { files: Vec::new(), open_file: Default::default() } }
pub fn new(path: &Path) -> Result<Self> {
let mut files = vec![];
@ -89,7 +86,7 @@ impl SplitFileReader {
break;
}
}
Ok(Self { files, open_file: None, pos: 0 })
Ok(Self { files, open_file: Default::default() })
}
pub fn add(&mut self, path: &Path) -> Result<()> {
@ -103,47 +100,43 @@ impl SplitFileReader {
pub fn len(&self) -> u64 { self.files.last().map_or(0, |f| f.begin + f.size) }
}
impl Read for SplitFileReader {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
if self.open_file.is_none() || !self.open_file.as_ref().unwrap().contains(self.pos) {
self.open_file = if let Some(split) = self.files.iter().find(|f| f.contains(self.pos)) {
let mut file = BufReader::new(File::open(&split.inner)?);
// log::info!("Opened file {} at pos {}", split.inner.display(), self.pos);
file.seek(SeekFrom::Start(self.pos - split.begin))?;
Some(Split { inner: file, begin: split.begin, size: split.size })
} else {
None
};
}
let Some(split) = self.open_file.as_mut() else {
return Ok(0);
};
let to_read = min(buf.len(), (split.begin + split.size - self.pos) as usize);
let read = split.inner.read(&mut buf[..to_read])?;
self.pos += read as u64;
Ok(read)
}
}
impl Seek for SplitFileReader {
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
self.pos = match pos {
SeekFrom::Start(pos) => pos,
SeekFrom::Current(offset) => self.pos.saturating_add_signed(offset),
SeekFrom::End(offset) => self.len().saturating_add_signed(offset),
};
if let Some(split) = &mut self.open_file {
if split.contains(self.pos) {
// Seek within the open file
split.inner.seek(SeekFrom::Start(self.pos - split.begin))?;
} else {
self.open_file = None;
}
}
Ok(self.pos)
}
}
impl Clone for SplitFileReader {
fn clone(&self) -> Self { Self { files: self.files.clone(), open_file: None, pos: 0 } }
fn clone(&self) -> Self { Self { files: self.files.clone(), open_file: Default::default() } }
}
impl DiscStream for SplitFileReader {
fn read_exact_at(&mut self, mut buf: &mut [u8], mut offset: u64) -> io::Result<()> {
while !buf.is_empty() {
let split = if self.open_file.as_ref().is_none_or(|s| !s.contains(offset)) {
let split = if let Some(split) = self.files.iter().find(|f| f.contains(offset)) {
let file = File::open(&split.inner)?;
Split { inner: file, begin: split.begin, size: split.size }
} else {
return Err(io::Error::from(io::ErrorKind::UnexpectedEof));
};
self.open_file.insert(split)
} else {
self.open_file.as_mut().unwrap()
};
let file_offset = offset - split.begin;
let len = (split.size - file_offset).min(buf.len() as u64) as usize;
let (out, remain) = buf.split_at_mut(len);
#[cfg(unix)]
{
use std::os::unix::fs::FileExt;
split.inner.read_exact_at(out, file_offset)?;
}
#[cfg(not(unix))]
{
use std::io::{Read, Seek, SeekFrom};
split.inner.seek(SeekFrom::Start(file_offset))?;
split.inner.read_exact(out)?
}
buf = remain;
offset += len as u64;
}
Ok(())
}
fn stream_len(&mut self) -> io::Result<u64> { Ok(self.len()) }
}

View File

@ -1,27 +1,35 @@
use std::{
io,
io::{Read, Seek, SeekFrom},
mem::size_of,
path::Path,
io::{BufRead, Read, Seek, SeekFrom},
sync::Arc,
};
use zerocopy::{big_endian::U32, AsBytes, FromBytes, FromZeroes};
use bytes::{BufMut, Bytes, BytesMut};
use zerocopy::{FromBytes, FromZeros, Immutable, IntoBytes, KnownLayout, big_endian::U32};
use crate::{
disc::SECTOR_SIZE,
io::{
block::{Block, BlockIO, PartitionInfo},
split::SplitFileReader,
Format, MagicBytes,
Error, Result, ResultContext,
build::gc::{FileCallback, GCPartitionStream, WriteInfo, WriteKind, insert_junk_data},
common::{Compression, Format, MagicBytes, PartitionKind},
disc::{
BB2_OFFSET, BootHeader, DiscHeader, SECTOR_SIZE,
fst::Fst,
gcn::{read_dol, read_fst},
reader::DiscReader,
writer::DiscWriter,
},
util::read::{read_box_slice, read_from},
DiscHeader, DiscMeta, Error, Node, PartitionHeader, Result, ResultContext,
io::block::{Block, BlockKind, BlockReader, TGC_MAGIC},
read::{DiscMeta, DiscStream, PartitionOptions, PartitionReader},
util::{
Align, array_ref,
read::{read_arc_at, read_arc_slice_at, read_at, read_with_zero_fill},
static_assert,
},
write::{DataCallback, DiscFinalization, DiscWriterWeight, FormatOptions, ProcessOptions},
};
pub const TGC_MAGIC: MagicBytes = [0xae, 0x0f, 0x38, 0xa2];
/// TGC header (big endian)
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
#[derive(Clone, Debug, PartialEq, FromBytes, IntoBytes, Immutable, KnownLayout)]
#[repr(C, align(4))]
struct TGCHeader {
/// Magic bytes
@ -50,107 +58,278 @@ struct TGCHeader {
banner_offset: U32,
/// Size of the banner
banner_size: U32,
/// Original user data offset in the GCM
gcm_user_offset: U32,
/// Start of user files in the original GCM
gcm_files_start: U32,
}
static_assert!(size_of::<TGCHeader>() == 0x38);
const GCM_HEADER_SIZE: usize = 0x100000;
#[derive(Clone)]
pub struct DiscIOTGC {
inner: SplitFileReader,
header: TGCHeader,
fst: Box<[u8]>,
pub struct BlockReaderTGC {
inner: GCPartitionStream<FileCallbackTGC>,
}
impl DiscIOTGC {
pub fn new(filename: &Path) -> Result<Box<Self>> {
let mut inner = SplitFileReader::new(filename)?;
impl BlockReaderTGC {
pub fn new(mut inner: Box<dyn DiscStream>) -> Result<Box<dyn BlockReader>> {
// Read header
let header: TGCHeader = read_from(&mut inner).context("Reading TGC header")?;
let header: TGCHeader = read_at(inner.as_mut(), 0).context("Reading TGC header")?;
if header.magic != TGC_MAGIC {
return Err(Error::DiscFormat("Invalid TGC magic".to_string()));
}
let disc_size = (header.gcm_files_start.get() + header.user_size.get()) as u64;
// Read FST and adjust offsets
inner
.seek(SeekFrom::Start(header.fst_offset.get() as u64))
.context("Seeking to TGC FST")?;
let mut fst = read_box_slice(&mut inner, header.fst_size.get() as usize)
.context("Reading TGC FST")?;
let root_node = Node::ref_from_prefix(&fst)
.ok_or_else(|| Error::DiscFormat("Invalid TGC FST".to_string()))?;
let node_count = root_node.length() as usize;
let (nodes, _) = Node::mut_slice_from_prefix(&mut fst, node_count)
.ok_or_else(|| Error::DiscFormat("Invalid TGC FST".to_string()))?;
for node in nodes {
// Read GCM header
let raw_header = read_arc_at::<[u8; GCM_HEADER_SIZE], _>(
inner.as_mut(),
header.header_offset.get() as u64,
)
.context("Reading GCM header")?;
let disc_header =
DiscHeader::ref_from_bytes(array_ref![raw_header, 0, size_of::<DiscHeader>()])
.expect("Invalid disc header alignment");
let disc_header = disc_header.clone();
let boot_header =
BootHeader::ref_from_bytes(array_ref![raw_header, BB2_OFFSET, size_of::<BootHeader>()])
.expect("Invalid boot header alignment");
let boot_header = boot_header.clone();
// Read DOL
let raw_dol = read_arc_slice_at::<u8, _>(
inner.as_mut(),
header.dol_size.get() as usize,
header.dol_offset.get() as u64,
)
.context("Reading DOL")?;
// Read FST
let raw_fst = read_arc_slice_at::<u8, _>(
inner.as_mut(),
header.fst_size.get() as usize,
header.fst_offset.get() as u64,
)
.context("Reading FST")?;
let fst = Fst::new(&raw_fst)?;
let mut write_info = Vec::with_capacity(5 + fst.num_files());
write_info.push(WriteInfo {
kind: WriteKind::Static(raw_header, "sys/header.bin"),
size: GCM_HEADER_SIZE as u64,
offset: 0,
});
write_info.push(WriteInfo {
kind: WriteKind::Static(raw_dol, "sys/main.dol"),
size: header.dol_size.get() as u64,
offset: boot_header.dol_offset(false),
});
write_info.push(WriteInfo {
kind: WriteKind::Static(raw_fst.clone(), "sys/fst.bin"),
size: header.fst_size.get() as u64,
offset: boot_header.fst_offset(false),
});
// Collect files
for (_, node, path) in fst.iter() {
if node.is_dir() {
continue;
}
write_info.push(WriteInfo {
kind: WriteKind::File(path),
size: node.length() as u64,
offset: node.offset(false),
});
}
write_info.sort_unstable_by(|a, b| a.offset.cmp(&b.offset).then(a.size.cmp(&b.size)));
let write_info = insert_junk_data(write_info, &boot_header);
let file_callback = FileCallbackTGC::new(inner, raw_fst, header);
let disc_id = *array_ref![disc_header.game_id, 0, 4];
let disc_num = disc_header.disc_num;
Ok(Box::new(Self {
inner: GCPartitionStream::new(
file_callback,
Arc::from(write_info),
disc_size,
disc_id,
disc_num,
),
}))
}
}
impl BlockReader for BlockReaderTGC {
fn read_block(&mut self, out: &mut [u8], sector: u32) -> io::Result<Block> {
let count = (out.len() / SECTOR_SIZE) as u32;
self.inner.set_position(sector as u64 * SECTOR_SIZE as u64);
let read = read_with_zero_fill(&mut self.inner, out)?;
Ok(Block::sectors(sector, count, if read == 0 { BlockKind::None } else { BlockKind::Raw }))
}
fn block_size(&self) -> u32 { SECTOR_SIZE as u32 }
fn meta(&self) -> DiscMeta {
DiscMeta { format: Format::Tgc, disc_size: Some(self.inner.len()), ..Default::default() }
}
}
#[derive(Clone)]
struct FileCallbackTGC {
inner: Box<dyn DiscStream>,
fst: Arc<[u8]>,
header: TGCHeader,
}
impl FileCallbackTGC {
fn new(inner: Box<dyn DiscStream>, fst: Arc<[u8]>, header: TGCHeader) -> Self {
Self { inner, fst, header }
}
}
impl FileCallback for FileCallbackTGC {
fn read_file(&mut self, out: &mut [u8], name: &str, offset: u64) -> io::Result<()> {
let fst = Fst::new(&self.fst).map_err(io::Error::other)?;
let (_, node) = fst.find(name).ok_or_else(|| {
io::Error::new(io::ErrorKind::NotFound, format!("File not found in FST: {}", name))
})?;
if !node.is_file() {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("Path is a directory: {}", name),
));
}
// Calculate file offset in TGC
let file_start = (node.offset(false) as u32 - self.header.gcm_files_start.get())
+ self.header.user_offset.get();
self.inner.read_exact_at(out, file_start as u64 + offset)?;
Ok(())
}
}
#[derive(Clone)]
pub struct DiscWriterTGC {
inner: Box<dyn PartitionReader>,
header: TGCHeader,
header_data: Bytes,
output_size: u64,
}
impl DiscWriterTGC {
pub fn new(reader: DiscReader, options: &FormatOptions) -> Result<Box<dyn DiscWriter>> {
if options.format != Format::Tgc {
return Err(Error::DiscFormat("Invalid format for TGC writer".to_string()));
}
if options.compression != Compression::None {
return Err(Error::DiscFormat("TGC does not support compression".to_string()));
}
let mut inner =
reader.open_partition_kind(PartitionKind::Data, &PartitionOptions::default())?;
// Read GCM header
let mut raw_header = <[u8; GCM_HEADER_SIZE]>::new_box_zeroed()?;
inner.read_exact(raw_header.as_mut()).context("Reading GCM header")?;
let boot_header =
BootHeader::ref_from_bytes(array_ref![raw_header, BB2_OFFSET, size_of::<BootHeader>()])
.expect("Invalid boot header alignment");
// Read DOL
let raw_dol = read_dol(inner.as_mut(), boot_header, false)?;
let raw_fst = read_fst(inner.as_mut(), boot_header, false)?;
// Parse FST
let fst = Fst::new(&raw_fst)?;
let mut gcm_files_start = u32::MAX;
for (_, node, _) in fst.iter() {
if node.is_file() {
node.offset = node.offset - header.gcm_user_offset
+ (header.user_offset - header.header_offset);
let start = node.offset(false) as u32;
if start < gcm_files_start {
gcm_files_start = start;
}
}
}
Ok(Box::new(Self { inner, header, fst }))
// Layout system files
let gcm_header_offset = SECTOR_SIZE as u32;
let fst_offset = gcm_header_offset + GCM_HEADER_SIZE as u32;
let dol_offset = (fst_offset + boot_header.fst_size.get()).align_up(32);
let user_size =
boot_header.user_offset.get() + boot_header.user_size.get() - gcm_files_start;
let user_end = (dol_offset + raw_dol.len() as u32 + user_size).align_up(SECTOR_SIZE as u32);
let user_offset = user_end - user_size;
let header = TGCHeader {
magic: TGC_MAGIC,
version: 0.into(),
header_offset: gcm_header_offset.into(),
header_size: (GCM_HEADER_SIZE as u32).into(),
fst_offset: fst_offset.into(),
fst_size: boot_header.fst_size,
fst_max_size: boot_header.fst_max_size,
dol_offset: dol_offset.into(),
dol_size: (raw_dol.len() as u32).into(),
user_offset: user_offset.into(),
user_size: user_size.into(),
banner_offset: 0.into(),
banner_size: 0.into(),
gcm_files_start: gcm_files_start.into(),
};
let mut buffer = BytesMut::with_capacity(user_offset as usize);
buffer.put_slice(header.as_bytes());
buffer.put_bytes(0, gcm_header_offset as usize - buffer.len());
// Write GCM header
buffer.put_slice(raw_header.as_ref());
buffer.put_bytes(0, fst_offset as usize - buffer.len());
// Write FST
buffer.put_slice(raw_fst.as_ref());
buffer.put_bytes(0, dol_offset as usize - buffer.len());
// Write DOL
buffer.put_slice(raw_dol.as_ref());
buffer.put_bytes(0, user_offset as usize - buffer.len());
let header_data = buffer.freeze();
Ok(Box::new(Self { inner, header, header_data, output_size: user_end as u64 }))
}
}
impl BlockIO for DiscIOTGC {
fn read_block_internal(
&mut self,
out: &mut [u8],
block: u32,
_partition: Option<&PartitionInfo>,
) -> io::Result<Block> {
let offset = self.header.header_offset.get() as u64 + block as u64 * SECTOR_SIZE as u64;
let total_size = self.inner.len();
if offset >= total_size {
// End of file
return Ok(Block::Zero);
impl DiscWriter for DiscWriterTGC {
fn process(
&self,
data_callback: &mut DataCallback,
_options: &ProcessOptions,
) -> Result<DiscFinalization> {
let mut data_position = self.header.user_offset.get() as u64;
data_callback(self.header_data.clone(), data_position, self.output_size)
.context("Failed to write TGC header")?;
// Write user data serially
let mut inner = self.inner.clone();
inner
.seek(SeekFrom::Start(self.header.gcm_files_start.get() as u64))
.context("Seeking to GCM files start")?;
loop {
// TODO use DiscReader::fill_buf_internal
let buf = inner
.fill_buf()
.with_context(|| format!("Reading disc data at offset {data_position}"))?;
let len = buf.len();
if len == 0 {
break;
}
data_position += len as u64;
data_callback(Bytes::copy_from_slice(buf), data_position, self.output_size)
.context("Failed to write disc data")?;
inner.consume(len);
}
self.inner.seek(SeekFrom::Start(offset))?;
if offset + SECTOR_SIZE as u64 > total_size {
// If the last block is not a full sector, fill the rest with zeroes
let read = (total_size - offset) as usize;
self.inner.read_exact(&mut out[..read])?;
out[read..].fill(0);
} else {
self.inner.read_exact(out)?;
}
// Adjust internal GCM header
if block == 0 {
let partition_header = PartitionHeader::mut_from(
&mut out[size_of::<DiscHeader>()
..size_of::<DiscHeader>() + size_of::<PartitionHeader>()],
)
.unwrap();
partition_header.dol_offset = self.header.dol_offset - self.header.header_offset;
partition_header.fst_offset = self.header.fst_offset - self.header.header_offset;
}
// Copy modified FST to output
if offset + out.len() as u64 > self.header.fst_offset.get() as u64
&& offset < self.header.fst_offset.get() as u64 + self.header.fst_size.get() as u64
{
let out_offset = (self.header.fst_offset.get() as u64).saturating_sub(offset) as usize;
let fst_offset = offset.saturating_sub(self.header.fst_offset.get() as u64) as usize;
let copy_len =
(out.len() - out_offset).min(self.header.fst_size.get() as usize - fst_offset);
out[out_offset..out_offset + copy_len]
.copy_from_slice(&self.fst[fst_offset..fst_offset + copy_len]);
}
Ok(Block::Raw)
Ok(DiscFinalization::default())
}
fn block_size_internal(&self) -> u32 { SECTOR_SIZE as u32 }
fn progress_bound(&self) -> u64 { self.output_size }
fn meta(&self) -> DiscMeta {
DiscMeta {
format: Format::Tgc,
lossless: true,
disc_size: Some(self.inner.len() - self.header.header_offset.get() as u64),
..Default::default()
}
}
fn weight(&self) -> DiscWriterWeight { DiscWriterWeight::Light }
}

View File

@ -1,33 +1,47 @@
use std::{
io,
io::{Read, Seek, SeekFrom},
io::{Seek, SeekFrom},
mem::size_of,
path::Path,
sync::Arc,
};
use zerocopy::{big_endian::*, AsBytes, FromBytes, FromZeroes};
use bytes::{BufMut, Bytes, BytesMut};
use zerocopy::{FromBytes, FromZeros, Immutable, IntoBytes, KnownLayout, big_endian::*};
use crate::{
io::{
block::{Block, BlockIO, PartitionInfo},
nkit::NKitHeader,
split::SplitFileReader,
DiscMeta, Format, MagicBytes,
},
util::read::{read_box_slice, read_from},
Error, Result, ResultContext,
common::{Compression, Format, MagicBytes},
disc::{
SECTOR_SIZE,
reader::DiscReader,
writer::{
BlockProcessor, BlockResult, CheckBlockResult, DiscWriter, check_block, par_process,
read_block,
},
},
io::{
block::{Block, BlockKind, BlockReader, WBFS_MAGIC},
nkit::{JunkBits, NKitHeader},
},
read::{DiscMeta, DiscStream},
util::{
array_ref,
digest::DigestManager,
lfg::LaggedFibonacci,
read::{read_arc_slice_at, read_at, read_box_slice_at},
},
write::{DataCallback, DiscFinalization, DiscWriterWeight, FormatOptions, ProcessOptions},
};
pub const WBFS_MAGIC: MagicBytes = *b"WBFS";
#[derive(Debug, Clone, PartialEq, FromBytes, FromZeroes, AsBytes)]
#[derive(Debug, Clone, PartialEq, FromBytes, IntoBytes, Immutable, KnownLayout)]
#[repr(C, align(4))]
struct WBFSHeader {
magic: MagicBytes,
num_sectors: U32,
sector_size_shift: u8,
block_size_shift: u8,
_pad: [u8; 2],
version: u8,
_pad: u8,
}
impl WBFSHeader {
@ -35,44 +49,31 @@ impl WBFSHeader {
fn block_size(&self) -> u32 { 1 << self.block_size_shift }
// fn align_lba(&self, x: u32) -> u32 { (x + self.sector_size() - 1) & !(self.sector_size() - 1) }
//
// fn num_wii_sectors(&self) -> u32 {
// (self.num_sectors.get() / SECTOR_SIZE as u32) * self.sector_size()
// }
//
// fn max_wii_sectors(&self) -> u32 { NUM_WII_SECTORS }
//
// fn num_wbfs_sectors(&self) -> u32 {
// self.num_wii_sectors() >> (self.wbfs_sector_size_shift - 15)
// }
fn max_blocks(&self) -> u32 { NUM_WII_SECTORS >> (self.block_size_shift - 15) }
}
const DISC_HEADER_SIZE: usize = 0x100;
const NUM_WII_SECTORS: u32 = 143432 * 2; // Double layer discs
const NKIT_HEADER_OFFSET: u64 = 0x10000;
#[derive(Clone)]
pub struct DiscIOWBFS {
inner: SplitFileReader,
pub struct BlockReaderWBFS {
inner: Box<dyn DiscStream>,
/// WBFS header
header: WBFSHeader,
/// Map of Wii LBAs to WBFS LBAs
block_map: Box<[U16]>,
block_map: Arc<[U16]>,
/// Optional NKit header
nkit_header: Option<NKitHeader>,
}
impl DiscIOWBFS {
pub fn new(filename: &Path) -> Result<Box<Self>> {
let mut inner = SplitFileReader::new(filename)?;
let header: WBFSHeader = read_from(&mut inner).context("Reading WBFS header")?;
impl BlockReaderWBFS {
pub fn new(mut inner: Box<dyn DiscStream>) -> Result<Box<Self>> {
let header: WBFSHeader = read_at(inner.as_mut(), 0).context("Reading WBFS header")?;
if header.magic != WBFS_MAGIC {
return Err(Error::DiscFormat("Invalid WBFS magic".to_string()));
}
let file_len = inner.len();
let file_len = inner.stream_len().context("Determining stream length")?;
let expected_file_len = header.num_sectors.get() as u64 * header.sector_size() as u64;
if file_len != expected_file_len {
return Err(Error::DiscFormat(format!(
@ -81,9 +82,12 @@ impl DiscIOWBFS {
)));
}
let disc_table: Box<[u8]> =
read_box_slice(&mut inner, header.sector_size() as usize - size_of::<WBFSHeader>())
.context("Reading WBFS disc table")?;
let disc_table: Box<[u8]> = read_box_slice_at(
inner.as_mut(),
header.sector_size() as usize - size_of::<WBFSHeader>(),
size_of::<WBFSHeader>() as u64,
)
.context("Reading WBFS disc table")?;
if disc_table[0] != 1 {
return Err(Error::DiscFormat("WBFS doesn't contain a disc".to_string()));
}
@ -92,52 +96,54 @@ impl DiscIOWBFS {
}
// Read WBFS LBA map
inner
.seek(SeekFrom::Start(header.sector_size() as u64 + DISC_HEADER_SIZE as u64))
.context("Seeking to WBFS LBA table")?; // Skip header
let block_map: Box<[U16]> = read_box_slice(&mut inner, header.max_blocks() as usize)
.context("Reading WBFS LBA table")?;
let block_map: Arc<[U16]> = read_arc_slice_at(
inner.as_mut(),
header.max_blocks() as usize,
header.sector_size() as u64 + DISC_HEADER_SIZE as u64,
)
.context("Reading WBFS LBA table")?;
// Read NKit header if present (always at 0x10000)
inner.seek(SeekFrom::Start(0x10000)).context("Seeking to NKit header")?;
let nkit_header = NKitHeader::try_read_from(&mut inner, header.block_size(), true);
let nkit_header = NKitHeader::try_read_from(
inner.as_mut(),
NKIT_HEADER_OFFSET,
header.block_size(),
true,
);
Ok(Box::new(Self { inner, header, block_map, nkit_header }))
}
}
impl BlockIO for DiscIOWBFS {
fn read_block_internal(
&mut self,
out: &mut [u8],
block: u32,
_partition: Option<&PartitionInfo>,
) -> io::Result<Block> {
impl BlockReader for BlockReaderWBFS {
fn read_block(&mut self, out: &mut [u8], sector: u32) -> io::Result<Block> {
let block_size = self.header.block_size();
if block >= self.header.max_blocks() {
return Ok(Block::Zero);
let block_idx = ((sector as u64 * SECTOR_SIZE as u64) / block_size as u64) as u32;
if block_idx >= self.header.max_blocks() {
// Out of bounds
return Ok(Block::new(block_idx, block_size, BlockKind::None));
}
// Find the block in the map
let phys_block = self.block_map[block as usize].get();
let phys_block = self.block_map[block_idx as usize].get();
if phys_block == 0 {
// Check if block is junk data
if self.nkit_header.as_ref().and_then(|h| h.is_junk_block(block)).unwrap_or(false) {
return Ok(Block::Junk);
if self.nkit_header.as_ref().and_then(|h| h.is_junk_block(block_idx)).unwrap_or(false) {
return Ok(Block::new(block_idx, block_size, BlockKind::Junk));
}
// Otherwise, read zeroes
return Ok(Block::Zero);
return Ok(Block::new(block_idx, block_size, BlockKind::Zero));
}
// Read block
let block_start = block_size as u64 * phys_block as u64;
self.inner.seek(SeekFrom::Start(block_start))?;
self.inner.read_exact(out)?;
Ok(Block::Raw)
self.inner.read_exact_at(out, block_start)?;
Ok(Block::new(block_idx, block_size, BlockKind::Raw))
}
fn block_size_internal(&self) -> u32 { self.header.block_size() }
fn block_size(&self) -> u32 { self.header.block_size() }
fn meta(&self) -> DiscMeta {
let mut result = DiscMeta {
@ -151,3 +157,228 @@ impl BlockIO for DiscIOWBFS {
result
}
}
struct BlockProcessorWBFS {
inner: DiscReader,
header: WBFSHeader,
decrypted_block: Box<[u8]>,
lfg: LaggedFibonacci,
disc_id: [u8; 4],
disc_num: u8,
}
impl Clone for BlockProcessorWBFS {
fn clone(&self) -> Self {
let block_size = self.header.block_size() as usize;
Self {
inner: self.inner.clone(),
header: self.header.clone(),
decrypted_block: <[u8]>::new_box_zeroed_with_elems(block_size).unwrap(),
lfg: LaggedFibonacci::default(),
disc_id: self.disc_id,
disc_num: self.disc_num,
}
}
}
impl BlockProcessor for BlockProcessorWBFS {
type BlockMeta = CheckBlockResult;
fn process_block(&mut self, block_idx: u32) -> io::Result<BlockResult<Self::BlockMeta>> {
let block_size = self.header.block_size() as usize;
let input_position = block_idx as u64 * block_size as u64;
self.inner.seek(SeekFrom::Start(input_position))?;
let (block_data, disc_data) = read_block(&mut self.inner, block_size)?;
// Check if block is zeroed or junk
let result = match check_block(
disc_data.as_ref(),
&mut self.decrypted_block,
input_position,
self.inner.partitions(),
&mut self.lfg,
self.disc_id,
self.disc_num,
)? {
CheckBlockResult::Normal => {
BlockResult { block_idx, disc_data, block_data, meta: CheckBlockResult::Normal }
}
CheckBlockResult::Zeroed => BlockResult {
block_idx,
disc_data,
block_data: Bytes::new(),
meta: CheckBlockResult::Zeroed,
},
CheckBlockResult::Junk => BlockResult {
block_idx,
disc_data,
block_data: Bytes::new(),
meta: CheckBlockResult::Junk,
},
};
Ok(result)
}
}
#[derive(Clone)]
pub struct DiscWriterWBFS {
inner: DiscReader,
header: WBFSHeader,
disc_table: Box<[u8]>,
block_count: u16,
}
pub const DEFAULT_BLOCK_SIZE: u32 = 0x200000; // 2 MiB
impl DiscWriterWBFS {
pub fn new(mut inner: DiscReader, options: &FormatOptions) -> Result<Box<dyn DiscWriter>> {
if options.format != Format::Wbfs {
return Err(Error::DiscFormat("Invalid format for WBFS writer".to_string()));
}
if options.compression != Compression::None {
return Err(Error::DiscFormat("WBFS does not support compression".to_string()));
}
let block_size = options.block_size;
if block_size < SECTOR_SIZE as u32 || block_size % SECTOR_SIZE as u32 != 0 {
return Err(Error::DiscFormat("Invalid block size for WBFS".to_string()));
}
let sector_size = 512u32;
let disc_size = inner.disc_size();
let block_count = disc_size.div_ceil(block_size as u64);
if block_count > u16::MAX as u64 {
return Err(Error::DiscFormat("Block size too small".to_string()));
}
let block_count = block_count as u16;
// Create header
let header = WBFSHeader {
magic: WBFS_MAGIC,
num_sectors: 0.into(), // Written during finalization
sector_size_shift: sector_size.trailing_zeros() as u8,
block_size_shift: block_size.trailing_zeros() as u8,
version: 1,
_pad: 0,
};
// Create disc table
let mut disc_table =
<[u8]>::new_box_zeroed_with_elems(sector_size as usize - size_of::<WBFSHeader>())?;
disc_table[0] = 1;
let mut header_size = size_of::<WBFSHeader>();
header_size += size_of_val(disc_table.as_ref());
header_size += DISC_HEADER_SIZE;
header_size += header.max_blocks() as usize * size_of::<U16>();
if header_size > block_size as usize {
return Err(Error::Other("WBFS info too large for block".to_string()));
}
inner.rewind().context("Seeking to start")?;
Ok(Box::new(Self { inner, header, disc_table, block_count }))
}
}
impl DiscWriter for DiscWriterWBFS {
fn process(
&self,
data_callback: &mut DataCallback,
options: &ProcessOptions,
) -> Result<DiscFinalization> {
let block_size = self.header.block_size();
let max_blocks = self.header.max_blocks();
let mut block_map = <[U16]>::new_box_zeroed_with_elems(max_blocks as usize)?;
let disc_size = self.inner.disc_size();
let mut header_data = BytesMut::with_capacity(block_size as usize);
header_data.put_slice(self.header.as_bytes());
header_data.put_slice(self.disc_table.as_ref());
header_data.put_slice(&self.inner.header().as_bytes()[..DISC_HEADER_SIZE]);
header_data.put_slice(block_map.as_bytes());
header_data.resize(block_size as usize, 0);
data_callback(header_data.freeze(), 0, disc_size).context("Failed to write header")?;
// Determine junk data values
let disc_header = self.inner.header();
let disc_id = *array_ref![disc_header.game_id, 0, 4];
let disc_num = disc_header.disc_num;
// Create hashers
let digest = DigestManager::new(options);
let mut junk_bits = JunkBits::new(block_size);
let mut input_position = 0;
let mut phys_block = 1;
par_process(
BlockProcessorWBFS {
inner: self.inner.clone(),
header: self.header.clone(),
decrypted_block: <[u8]>::new_box_zeroed_with_elems(block_size as usize).unwrap(),
lfg: LaggedFibonacci::default(),
disc_id,
disc_num,
},
self.block_count as u32,
options.processor_threads,
|block| -> Result<()> {
// Update hashers
let disc_data_len = block.disc_data.len() as u64;
digest.send(block.disc_data);
// Check if block is zeroed or junk
match block.meta {
CheckBlockResult::Normal => {
block_map[block.block_idx as usize] = phys_block.into();
phys_block += 1;
}
CheckBlockResult::Zeroed => {}
CheckBlockResult::Junk => {
junk_bits.set(block.block_idx, true);
}
}
input_position += disc_data_len;
data_callback(block.block_data.clone(), input_position, disc_size)
.with_context(|| format!("Failed to write block {}", block.block_idx))?;
Ok(())
},
)?;
// Collect hash results
let digest_results = digest.finish();
let mut nkit_header = NKitHeader {
version: 2,
size: Some(disc_size),
crc32: None,
md5: None,
sha1: None,
xxh64: None,
junk_bits: Some(junk_bits),
encrypted: true,
};
nkit_header.apply_digests(&digest_results);
// Update header
let mut header = self.header.clone();
header.num_sectors = (((phys_block as u64 * header.block_size() as u64)
/ header.sector_size() as u64) as u32)
.into();
let mut header_data = BytesMut::with_capacity(block_size as usize);
header_data.put_slice(header.as_bytes());
header_data.put_slice(&self.disc_table);
header_data.put_slice(&self.inner.header().as_bytes()[..DISC_HEADER_SIZE]);
header_data.put_slice(block_map.as_bytes());
header_data.resize(NKIT_HEADER_OFFSET as usize, 0);
let mut w = header_data.writer();
nkit_header.write_to(&mut w).context("Writing NKit header")?;
let header_data = w.into_inner().freeze();
let mut finalization = DiscFinalization { header: header_data, ..Default::default() };
finalization.apply_digests(&digest_results);
Ok(finalization)
}
fn progress_bound(&self) -> u64 { self.inner.disc_size() }
fn weight(&self) -> DiscWriterWeight { DiscWriterWeight::Medium }
}

File diff suppressed because it is too large Load Diff

View File

@ -1,16 +1,18 @@
#![allow(clippy::new_ret_no_self)]
#![warn(missing_docs)]
//! Library for traversing & reading Nintendo Optical Disc (GameCube and Wii) images.
//! Library for reading and writing Nintendo Optical Disc (GameCube and Wii) images.
//!
//! Originally based on the C++ library [nod](https://github.com/AxioDL/nod),
//! but does not currently support authoring.
//! but with extended format support and many additional features.
//!
//! Currently supported file formats:
//! - ISO (GCM)
//! - WIA / RVZ
//! - WBFS (+ NKit 2 lossless)
//! - CISO (+ NKit 2 lossless)
//! - NFS (Wii U VC)
//! - NFS (Wii U VC, read-only)
//! - GCZ
//! - TGC
//!
//! # Examples
//!
@ -19,17 +21,21 @@
//! ```no_run
//! use std::io::Read;
//!
//! use nod::{
//! common::PartitionKind,
//! read::{DiscOptions, DiscReader, PartitionOptions},
//! };
//!
//! // Open a disc image and the first data partition.
//! let disc = nod::Disc::new("path/to/file.iso")
//! .expect("Failed to open disc");
//! let mut partition = disc.open_partition_kind(nod::PartitionKind::Data)
//! let disc =
//! DiscReader::new("path/to/file.iso", &DiscOptions::default()).expect("Failed to open disc");
//! let mut partition = disc
//! .open_partition_kind(PartitionKind::Data, &PartitionOptions::default())
//! .expect("Failed to open data partition");
//!
//! // Read partition metadata and the file system table.
//! let meta = partition.meta()
//! .expect("Failed to read partition metadata");
//! let fst = meta.fst()
//! .expect("File system table is invalid");
//! let meta = partition.meta().expect("Failed to read partition metadata");
//! let fst = meta.fst().expect("File system table is invalid");
//!
//! // Find a file by path and read it into a string.
//! if let Some((_, node)) = fst.find("/MP3/Worlds.txt") {
@ -46,36 +52,109 @@
//! Converting a disc image to raw ISO:
//!
//! ```no_run
//! // Enable `rebuild_encryption` to ensure the output is a valid ISO.
//! let options = nod::OpenOptions { rebuild_encryption: true, ..Default::default() };
//! let mut disc = nod::Disc::new_with_options("path/to/file.rvz", &options)
//! .expect("Failed to open disc");
//! use nod::read::{DiscOptions, DiscReader, PartitionEncryption};
//!
//! // Read directly from the open disc and write to the output file.
//! let mut out = std::fs::File::create("output.iso")
//! .expect("Failed to create output file");
//! std::io::copy(&mut disc, &mut out)
//! .expect("Failed to write data");
//! let options = DiscOptions {
//! partition_encryption: PartitionEncryption::Original,
//! // Use 4 threads to preload data as the disc is read. This can speed up sequential reads,
//! // especially when the disc image format uses compression.
//! preloader_threads: 4,
//! };
//! // Open a disc image.
//! let mut disc = DiscReader::new("path/to/file.rvz", &options).expect("Failed to open disc");
//!
//! // Create a new output file.
//! let mut out = std::fs::File::create("output.iso").expect("Failed to create output file");
//! // Read directly from the DiscReader and write to the output file.
//! // NOTE: Any copy method that accepts `Read` and `Write` can be used here,
//! // such as `std::io::copy`. This example utilizes `BufRead` for efficiency,
//! // since `DiscReader` has its own internal buffer.
//! nod::util::buf_copy(&mut disc, &mut out).expect("Failed to write data");
//! ```
//!
//! Converting a disc image to RVZ:
//!
//! ```no_run
//! use std::{
//! fs::File,
//! io::{Seek, Write},
//! };
//!
//! use nod::{
//! common::{Compression, Format},
//! read::{DiscOptions, DiscReader, PartitionEncryption},
//! write::{DiscWriter, DiscWriterWeight, FormatOptions, ProcessOptions},
//! };
//!
//! let open_options = DiscOptions {
//! partition_encryption: PartitionEncryption::Original,
//! // Use 4 threads to preload data as the disc is read. This can speed up sequential reads,
//! // especially when the disc image format uses compression.
//! preloader_threads: 4,
//! };
//! // Open a disc image.
//! let disc = DiscReader::new("path/to/file.iso", &open_options).expect("Failed to open disc");
//! // Create a new output file.
//! let mut output_file = File::create("output.rvz").expect("Failed to create output file");
//!
//! let options = FormatOptions {
//! format: Format::Rvz,
//! compression: Compression::Zstandard(19),
//! block_size: Format::Rvz.default_block_size(),
//! };
//! // Create a disc writer with the desired output format.
//! let mut writer = DiscWriter::new(disc, &options).expect("Failed to create writer");
//!
//! // Ideally we'd base this on the actual number of CPUs available.
//! // This is just an example.
//! let num_threads = match writer.weight() {
//! DiscWriterWeight::Light => 0,
//! DiscWriterWeight::Medium => 4,
//! DiscWriterWeight::Heavy => 12,
//! };
//! let process_options = ProcessOptions {
//! processor_threads: num_threads,
//! // Enable checksum calculation for the _original_ disc data.
//! // Digests will be stored in the output file for verification, if supported.
//! // They will also be returned in the finalization result.
//! digest_crc32: true,
//! digest_md5: false, // MD5 is slow, skip it
//! digest_sha1: true,
//! digest_xxh64: true,
//! };
//! // Start processing the disc image.
//! let finalization = writer
//! .process(
//! |data, _progress, _total| {
//! output_file.write_all(data.as_ref())?;
//! // One could display progress here, if desired.
//! Ok(())
//! },
//! &process_options,
//! )
//! .expect("Failed to process disc image");
//!
//! // Some disc writers calculate data during processing.
//! // If the finalization returns header data, seek to the beginning of the file and write it.
//! if !finalization.header.is_empty() {
//! output_file.rewind().expect("Failed to seek");
//! output_file.write_all(finalization.header.as_ref()).expect("Failed to write header");
//! }
//! output_file.flush().expect("Failed to flush output file");
//!
//! // Display the calculated digests.
//! println!("CRC32: {:08X}", finalization.crc32.unwrap());
//! // ...
//! ```
use std::{
io::{Read, Seek},
path::Path,
};
pub use disc::{
ApploaderHeader, DiscHeader, DolHeader, PartitionBase, PartitionHeader, PartitionKind,
PartitionMeta, BI2_SIZE, BOOT_SIZE, SECTOR_SIZE,
};
pub use fst::{Fst, Node, NodeKind};
pub use io::{block::PartitionInfo, Compression, DiscMeta, Format};
pub use streams::ReadStream;
mod disc;
mod fst;
mod io;
mod streams;
mod util;
// [WIP] Disc image building is incomplete and not yet exposed.
pub(crate) mod build;
pub mod common;
pub mod disc;
pub(crate) mod io;
pub mod read;
pub mod util;
pub mod write;
/// Error types for nod.
#[derive(thiserror::Error, Debug)]
@ -84,7 +163,7 @@ pub enum Error {
#[error("disc format error: {0}")]
DiscFormat(String),
/// A general I/O error.
#[error("I/O error: {0}")]
#[error("{0}")]
Io(String, #[source] std::io::Error),
/// An unknown error.
#[error("error: {0}")]
@ -92,13 +171,25 @@ pub enum Error {
}
impl From<&str> for Error {
#[inline]
fn from(s: &str) -> Error { Error::Other(s.to_string()) }
}
impl From<String> for Error {
#[inline]
fn from(s: String) -> Error { Error::Other(s) }
}
impl From<zerocopy::AllocError> for Error {
#[inline]
fn from(_: zerocopy::AllocError) -> Error {
Error::Io(
"allocation failed".to_string(),
std::io::Error::from(std::io::ErrorKind::OutOfMemory),
)
}
}
/// Helper result type for [`Error`].
pub type Result<T, E = Error> = core::result::Result<T, E>;
@ -109,6 +200,7 @@ pub trait ErrorContext {
}
impl ErrorContext for std::io::Error {
#[inline]
fn context(self, context: impl Into<String>) -> Error { Error::Io(context.into(), self) }
}
@ -125,81 +217,45 @@ pub trait ResultContext<T> {
impl<T, E> ResultContext<T> for Result<T, E>
where E: ErrorContext
{
#[inline]
fn context(self, context: impl Into<String>) -> Result<T> {
self.map_err(|e| e.context(context))
}
#[inline]
fn with_context<F>(self, f: F) -> Result<T>
where F: FnOnce() -> String {
self.map_err(|e| e.context(f()))
}
}
/// Options for opening a disc image.
#[derive(Default, Debug, Clone)]
pub struct OpenOptions {
/// Wii: Rebuild partition data encryption and hashes if the underlying format stores data
/// decrypted or with hashes removed. (e.g. WIA/RVZ, NFS)
pub rebuild_encryption: bool,
/// Wii: Validate partition data hashes while reading the disc image.
pub validate_hashes: bool,
pub(crate) trait IoErrorContext {
fn io_context(self, context: impl Into<String>) -> std::io::Error;
}
/// An open disc image and read stream.
///
/// This is the primary entry point for reading disc images.
pub struct Disc {
reader: disc::reader::DiscReader,
options: OpenOptions,
}
impl Disc {
/// Opens a disc image from a file path.
pub fn new<P: AsRef<Path>>(path: P) -> Result<Disc> {
Disc::new_with_options(path, &OpenOptions::default())
}
/// Opens a disc image from a file path with custom options.
pub fn new_with_options<P: AsRef<Path>>(path: P, options: &OpenOptions) -> Result<Disc> {
let io = io::block::open(path.as_ref())?;
let reader = disc::reader::DiscReader::new(io, options)?;
Ok(Disc { reader, options: options.clone() })
}
/// The disc's primary header.
pub fn header(&self) -> &DiscHeader { self.reader.header() }
/// Returns extra metadata included in the disc file format, if any.
pub fn meta(&self) -> DiscMeta { self.reader.meta() }
/// The disc's size in bytes, or an estimate if not stored by the format.
pub fn disc_size(&self) -> u64 { self.reader.disc_size() }
/// A list of Wii partitions on the disc.
///
/// **GameCube**: This will return an empty slice.
pub fn partitions(&self) -> &[PartitionInfo] { self.reader.partitions() }
/// Opens a decrypted partition read stream for the specified partition index.
///
/// **GameCube**: `index` must always be 0.
pub fn open_partition(&self, index: usize) -> Result<Box<dyn PartitionBase>> {
self.reader.open_partition(index, &self.options)
}
/// Opens a decrypted partition read stream for the first partition matching
/// the specified kind.
///
/// **GameCube**: `kind` must always be [`PartitionKind::Data`].
pub fn open_partition_kind(&self, kind: PartitionKind) -> Result<Box<dyn PartitionBase>> {
self.reader.open_partition_kind(kind, &self.options)
impl IoErrorContext for std::io::Error {
#[inline]
fn io_context(self, context: impl Into<String>) -> std::io::Error {
std::io::Error::new(self.kind(), self.context(context))
}
}
impl Read for Disc {
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> { self.reader.read(buf) }
pub(crate) trait IoResultContext<T> {
fn io_context(self, context: impl Into<String>) -> std::io::Result<T>;
fn io_with_context<F>(self, f: F) -> std::io::Result<T>
where F: FnOnce() -> String;
}
impl Seek for Disc {
fn seek(&mut self, pos: std::io::SeekFrom) -> std::io::Result<u64> { self.reader.seek(pos) }
impl<T> IoResultContext<T> for std::io::Result<T> {
#[inline]
fn io_context(self, context: impl Into<String>) -> std::io::Result<T> {
self.map_err(|e| e.io_context(context))
}
#[inline]
fn io_with_context<F>(self, f: F) -> std::io::Result<T>
where F: FnOnce() -> String {
self.map_err(|e| e.io_context(f()))
}
}

506
nod/src/read.rs Normal file
View File

@ -0,0 +1,506 @@
//! [`DiscReader`] and associated types.
use std::{
io::{self, BufRead, Read, Seek},
path::Path,
sync::{Arc, Mutex},
};
use dyn_clone::DynClone;
use zerocopy::FromBytes;
use crate::{
Result,
common::{Compression, Format, PartitionInfo, PartitionKind},
disc,
disc::{
ApploaderHeader, BB2_OFFSET, BI2_SIZE, BOOT_SIZE, BootHeader, DebugHeader, DiscHeader,
DolHeader,
fst::{Fst, Node},
wii::{ContentMetadata, H3_TABLE_SIZE, REGION_SIZE, Ticket, TmdHeader},
},
io::block,
util::{WindowedReader, array_ref},
};
/// Wii partition encryption mode.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default, Hash)]
pub enum PartitionEncryption {
/// Partition encryption and hashes are rebuilt to match its original state,
/// if necessary. This is used for converting or verifying a disc image.
#[default]
Original,
/// Partition data will be encrypted if reading a decrypted disc image.
/// Modifies the disc header to mark partition data as encrypted.
ForceEncrypted,
/// Partition data will be decrypted if reading an encrypted disc image.
/// Modifies the disc header to mark partition data as decrypted.
ForceDecrypted,
/// Partition data will be decrypted if reading an encrypted disc image.
/// Modifies the disc header to mark partition data as decrypted.
/// Hashes are removed from the partition data.
ForceDecryptedNoHashes,
}
/// Options for opening a disc image.
#[derive(Default, Debug, Clone)]
pub struct DiscOptions {
/// Wii: Partition encryption mode. This affects how partition data appears when
/// reading directly from [`DiscReader`], and can be used to convert between
/// encrypted and decrypted disc images.
pub partition_encryption: PartitionEncryption,
/// Number of threads to use for preloading data as the disc is read. This
/// is particularly useful when reading the disc image sequentially, as it
/// can perform decompression and rebuilding in parallel with the main
/// read thread. The default value of 0 disables preloading.
pub preloader_threads: usize,
}
/// Options for opening a partition.
#[derive(Default, Debug, Clone)]
pub struct PartitionOptions {
/// Wii: Validate data hashes while reading the partition, if available.
pub validate_hashes: bool,
}
/// Trait for reading disc images.
///
/// Disc images are read in blocks, often in the hundred kilobyte to several megabyte range,
/// making the standard [`Read`] and [`Seek`] traits a poor fit for this use case. This trait
/// provides a simplified interface for reading disc images, with a focus on large, random
/// access reads.
///
/// For multithreading support, an implementation must be [`Send`] and [`Clone`].
/// [`Sync`] is _not_ required: the stream will be cloned if used in multiple threads.
///
/// Rather than implement this trait directly, you'll likely use one of the following
/// [`DiscReader`] functions:
/// - [`DiscReader::new`]: to open a disc image from a file path.
/// - [`DiscReader::new_stream`]: when you can provide a [`Box<dyn DiscStream>`].
/// - [`DiscReader::new_from_cloneable_read`]: when you can provide a [`Read`] + [`Seek`] +
/// [`Clone`] stream.
/// - [`DiscReader::new_from_non_cloneable_read`]: when you can provide a [`Read`] + [`Seek`]
/// stream. (Accesses will be synchronized, limiting multithreaded performance.)
pub trait DiscStream: DynClone + Send {
/// Reads the exact number of bytes required to fill `buf` from the given offset.
fn read_exact_at(&mut self, buf: &mut [u8], offset: u64) -> io::Result<()>;
/// Returns the length of the stream in bytes.
fn stream_len(&mut self) -> io::Result<u64>;
}
dyn_clone::clone_trait_object!(DiscStream);
impl<T> DiscStream for T
where T: AsRef<[u8]> + Send + Clone
{
fn read_exact_at(&mut self, buf: &mut [u8], offset: u64) -> io::Result<()> {
let data = self.as_ref();
let len = data.len() as u64;
let end = offset + buf.len() as u64;
if offset >= len || end > len {
return Err(io::Error::from(io::ErrorKind::UnexpectedEof));
}
buf.copy_from_slice(&data[offset as usize..end as usize]);
Ok(())
}
fn stream_len(&mut self) -> io::Result<u64> { Ok(self.as_ref().len() as u64) }
}
#[derive(Debug, Clone)]
pub(crate) struct CloneableStream<T>(pub T)
where T: Read + Seek + Clone + Send;
impl<T> CloneableStream<T>
where T: Read + Seek + Clone + Send
{
pub fn new(stream: T) -> Self { Self(stream) }
}
impl<T> DiscStream for CloneableStream<T>
where T: Read + Seek + Clone + Send
{
fn read_exact_at(&mut self, buf: &mut [u8], offset: u64) -> io::Result<()> {
self.0.seek(io::SeekFrom::Start(offset))?;
self.0.read_exact(buf)
}
fn stream_len(&mut self) -> io::Result<u64> { self.0.seek(io::SeekFrom::End(0)) }
}
#[derive(Debug)]
pub(crate) struct NonCloneableStream<T>(pub Arc<Mutex<T>>)
where T: Read + Seek + Send;
impl<T> Clone for NonCloneableStream<T>
where T: Read + Seek + Send
{
fn clone(&self) -> Self { Self(self.0.clone()) }
}
impl<T> NonCloneableStream<T>
where T: Read + Seek + Send
{
pub fn new(stream: T) -> Self { Self(Arc::new(Mutex::new(stream))) }
fn lock(&self) -> io::Result<std::sync::MutexGuard<'_, T>> {
self.0.lock().map_err(|_| io::Error::other("NonCloneableStream mutex poisoned"))
}
}
impl<T> DiscStream for NonCloneableStream<T>
where T: Read + Seek + Send
{
fn read_exact_at(&mut self, buf: &mut [u8], offset: u64) -> io::Result<()> {
let mut stream = self.lock()?;
stream.seek(io::SeekFrom::Start(offset))?;
stream.read_exact(buf)
}
fn stream_len(&mut self) -> io::Result<u64> {
let mut stream = self.lock()?;
stream.seek(io::SeekFrom::End(0))
}
}
/// An open disc image and read stream.
///
/// This is the primary entry point for reading disc images.
#[derive(Clone)]
#[repr(transparent)]
pub struct DiscReader(disc::reader::DiscReader);
impl DiscReader {
/// Opens a disc image from a file path.
pub fn new<P: AsRef<Path>>(path: P, options: &DiscOptions) -> Result<DiscReader> {
let io = block::open(path.as_ref())?;
let inner = disc::reader::DiscReader::new(io, options)?;
Ok(DiscReader(inner))
}
/// Opens a disc image from a [`DiscStream`]. This allows low-overhead, multithreaded
/// access to disc images stored in memory, archives, or other non-file sources.
///
/// See [`DiscStream`] for more information.
pub fn new_stream(stream: Box<dyn DiscStream>, options: &DiscOptions) -> Result<DiscReader> {
let io = block::new(stream)?;
let inner = disc::reader::DiscReader::new(io, options)?;
Ok(DiscReader(inner))
}
/// Opens a disc image from a [`Read`] + [`Seek`] stream that can be cloned.
///
/// The stream will be cloned for each thread that reads from it, allowing for multithreaded
/// access (e.g. for preloading blocks during reading or parallel block processing during
/// conversion).
pub fn new_from_cloneable_read<R>(stream: R, options: &DiscOptions) -> Result<DiscReader>
where R: Read + Seek + Clone + Send + 'static {
Self::new_stream(Box::new(CloneableStream::new(stream)), options)
}
/// Opens a disc image from a [`Read`] + [`Seek`] stream that cannot be cloned.
///
/// Multithreaded accesses will be synchronized, which will limit performance (e.g. for
/// preloading blocks during reading or parallel block processing during conversion).
pub fn new_from_non_cloneable_read<R>(stream: R, options: &DiscOptions) -> Result<DiscReader>
where R: Read + Seek + Send + 'static {
Self::new_stream(Box::new(NonCloneableStream::new(stream)), options)
}
/// Detects the format of a disc image from a read stream.
#[inline]
pub fn detect<R>(stream: &mut R) -> io::Result<Option<Format>>
where R: Read + ?Sized {
block::detect(stream)
}
/// The disc's primary header.
#[inline]
pub fn header(&self) -> &DiscHeader { self.0.header() }
/// The Wii disc's region information.
///
/// **GameCube**: This will return `None`.
#[inline]
pub fn region(&self) -> Option<&[u8; REGION_SIZE]> { self.0.region() }
/// Returns extra metadata included in the disc file format, if any.
#[inline]
pub fn meta(&self) -> DiscMeta { self.0.meta() }
/// The disc's size in bytes, or an estimate if not stored by the format.
#[inline]
pub fn disc_size(&self) -> u64 { self.0.disc_size() }
/// A list of Wii partitions on the disc.
///
/// **GameCube**: This will return an empty slice.
#[inline]
pub fn partitions(&self) -> &[PartitionInfo] { self.0.partitions() }
/// Opens a decrypted partition read stream for the specified partition index.
///
/// **GameCube**: `index` must always be 0.
#[inline]
pub fn open_partition(
&self,
index: usize,
options: &PartitionOptions,
) -> Result<Box<dyn PartitionReader>> {
self.0.open_partition(index, options)
}
/// Opens a decrypted partition read stream for the first partition matching
/// the specified kind.
///
/// **GameCube**: `kind` must always be [`PartitionKind::Data`].
#[inline]
pub fn open_partition_kind(
&self,
kind: PartitionKind,
options: &PartitionOptions,
) -> Result<Box<dyn PartitionReader>> {
self.0.open_partition_kind(kind, options)
}
pub(crate) fn into_inner(self) -> disc::reader::DiscReader { self.0 }
}
impl BufRead for DiscReader {
#[inline]
fn fill_buf(&mut self) -> io::Result<&[u8]> { self.0.fill_buf() }
#[inline]
fn consume(&mut self, amt: usize) { self.0.consume(amt) }
}
impl Read for DiscReader {
#[inline]
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { self.0.read(buf) }
}
impl Seek for DiscReader {
#[inline]
fn seek(&mut self, pos: io::SeekFrom) -> io::Result<u64> { self.0.seek(pos) }
}
/// Extra metadata about the underlying disc file format.
#[derive(Debug, Clone, Default)]
pub struct DiscMeta {
/// The disc file format.
pub format: Format,
/// The format's compression algorithm.
pub compression: Compression,
/// If the format uses blocks, the block size in bytes.
pub block_size: Option<u32>,
/// Whether Wii partitions are stored decrypted in the format.
pub decrypted: bool,
/// Whether the format omits Wii partition data hashes.
pub needs_hash_recovery: bool,
/// Whether the format supports recovering the original disc data losslessly.
pub lossless: bool,
/// The original disc's size in bytes, if stored by the format.
pub disc_size: Option<u64>,
/// The original disc's CRC32 hash, if stored by the format.
pub crc32: Option<u32>,
/// The original disc's MD5 hash, if stored by the format.
pub md5: Option<[u8; 16]>,
/// The original disc's SHA-1 hash, if stored by the format.
pub sha1: Option<[u8; 20]>,
/// The original disc's XXH64 hash, if stored by the format.
pub xxh64: Option<u64>,
}
/// An open disc partition.
pub trait PartitionReader: DynClone + BufRead + Seek + Send {
/// Whether this is a Wii partition. (GameCube otherwise)
fn is_wii(&self) -> bool;
/// Reads the partition header and file system table.
fn meta(&mut self) -> Result<PartitionMeta>;
}
/// A file reader borrowing a [`PartitionReader`].
pub type FileReader<'a> = WindowedReader<&'a mut dyn PartitionReader>;
/// A file reader owning a [`PartitionReader`].
pub type OwnedFileReader = WindowedReader<Box<dyn PartitionReader>>;
impl dyn PartitionReader + '_ {
/// Seeks the partition stream to the specified file system node
/// and returns a windowed stream.
///
/// # Examples
///
/// Basic usage:
/// ```no_run
/// use std::io::Read;
///
/// use nod::{
/// common::PartitionKind,
/// read::{DiscOptions, DiscReader, PartitionOptions},
/// };
///
/// fn main() -> nod::Result<()> {
/// let disc = DiscReader::new("path/to/file.iso", &DiscOptions::default())?;
/// let mut partition =
/// disc.open_partition_kind(PartitionKind::Data, &PartitionOptions::default())?;
/// let meta = partition.meta()?;
/// let fst = meta.fst()?;
/// if let Some((_, node)) = fst.find("/MP3/Worlds.txt") {
/// let mut s = String::new();
/// partition
/// .open_file(node)
/// .expect("Failed to open file stream")
/// .read_to_string(&mut s)
/// .expect("Failed to read file");
/// println!("{}", s);
/// }
/// Ok(())
/// }
/// ```
pub fn open_file(&mut self, node: Node) -> io::Result<FileReader<'_>> {
if !node.is_file() {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Node is not a file".to_string(),
));
}
let is_wii = self.is_wii();
FileReader::new(self, node.offset(is_wii), node.length() as u64)
}
}
impl dyn PartitionReader {
/// Consumes the partition instance and returns a windowed stream.
///
/// # Examples
///
/// ```no_run
/// use std::io::Read;
///
/// use nod::{
/// common::PartitionKind,
/// read::{DiscOptions, DiscReader, OwnedFileReader, PartitionOptions},
/// };
///
/// fn main() -> nod::Result<()> {
/// let disc = DiscReader::new("path/to/file.iso", &DiscOptions::default())?;
/// let mut partition =
/// disc.open_partition_kind(PartitionKind::Data, &PartitionOptions::default())?;
/// let meta = partition.meta()?;
/// let fst = meta.fst()?;
/// if let Some((_, node)) = fst.find("/disc.tgc") {
/// let file: OwnedFileReader = partition
/// .into_open_file(node) // Get an OwnedFileStream
/// .expect("Failed to open file stream");
/// // Open the inner disc image using the owned stream
/// let inner_disc = DiscReader::new_from_cloneable_read(file, &DiscOptions::default())
/// .expect("Failed to open inner disc");
/// // ...
/// }
/// Ok(())
/// }
/// ```
pub fn into_open_file(self: Box<Self>, node: Node) -> io::Result<OwnedFileReader> {
if !node.is_file() {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Node is not a file".to_string(),
));
}
let is_wii = self.is_wii();
OwnedFileReader::new(self, node.offset(is_wii), node.length() as u64)
}
}
dyn_clone::clone_trait_object!(PartitionReader);
/// Extra disc partition data. (DOL, FST, etc.)
#[derive(Clone, Debug)]
pub struct PartitionMeta {
/// Disc and boot header (boot.bin)
pub raw_boot: Arc<[u8; BOOT_SIZE]>,
/// Debug and region information (bi2.bin)
pub raw_bi2: Arc<[u8; BI2_SIZE]>,
/// Apploader (apploader.bin)
pub raw_apploader: Arc<[u8]>,
/// Main binary (main.dol)
pub raw_dol: Arc<[u8]>,
/// File system table (fst.bin)
pub raw_fst: Arc<[u8]>,
/// Ticket (ticket.bin, Wii only)
pub raw_ticket: Option<Arc<[u8]>>,
/// TMD (tmd.bin, Wii only)
pub raw_tmd: Option<Arc<[u8]>>,
/// Certificate chain (cert.bin, Wii only)
pub raw_cert_chain: Option<Arc<[u8]>>,
/// H3 hash table (h3.bin, Wii only)
pub raw_h3_table: Option<Arc<[u8; H3_TABLE_SIZE]>>,
}
impl PartitionMeta {
/// A view into the disc header.
#[inline]
pub fn disc_header(&self) -> &DiscHeader {
DiscHeader::ref_from_bytes(array_ref![self.raw_boot, 0, size_of::<DiscHeader>()])
.expect("Invalid disc header alignment")
}
/// A view into the debug header.
#[inline]
pub fn debug_header(&self) -> &DebugHeader {
DebugHeader::ref_from_bytes(array_ref![
self.raw_boot,
size_of::<DiscHeader>(),
size_of::<DebugHeader>()
])
.expect("Invalid debug header alignment")
}
/// A view into the boot header.
#[inline]
pub fn boot_header(&self) -> &BootHeader {
BootHeader::ref_from_bytes(array_ref![self.raw_boot, BB2_OFFSET, size_of::<BootHeader>()])
.expect("Invalid boot header alignment")
}
/// A view into the apploader header.
#[inline]
pub fn apploader_header(&self) -> &ApploaderHeader {
ApploaderHeader::ref_from_prefix(&self.raw_apploader)
.expect("Invalid apploader alignment")
.0
}
/// A view into the file system table (FST).
#[inline]
pub fn fst(&self) -> Result<Fst<'_>, &'static str> { Fst::new(&self.raw_fst) }
/// A view into the DOL header.
#[inline]
pub fn dol_header(&self) -> &DolHeader {
DolHeader::ref_from_prefix(&self.raw_dol).expect("Invalid DOL alignment").0
}
/// A view into the ticket. (Wii only)
#[inline]
pub fn ticket(&self) -> Option<&Ticket> {
let raw_ticket = self.raw_ticket.as_deref()?;
Some(Ticket::ref_from_bytes(raw_ticket).expect("Invalid ticket alignment"))
}
/// A view into the TMD. (Wii only)
#[inline]
pub fn tmd_header(&self) -> Option<&TmdHeader> {
let raw_tmd = self.raw_tmd.as_deref()?;
Some(TmdHeader::ref_from_prefix(raw_tmd).expect("Invalid TMD alignment").0)
}
/// A view into the TMD content metadata. (Wii only)
#[inline]
pub fn content_metadata(&self) -> Option<&[ContentMetadata]> {
let raw_cmd = &self.raw_tmd.as_deref()?[size_of::<TmdHeader>()..];
Some(<[ContentMetadata]>::ref_from_bytes(raw_cmd).expect("Invalid CMD alignment"))
}
}

View File

@ -1,80 +0,0 @@
//! Common stream types
use std::{
io,
io::{Read, Seek, SeekFrom},
};
/// A helper trait for seekable read streams.
pub trait ReadStream: Read + Seek {
/// Creates a windowed read sub-stream with offset and size.
///
/// Seeks underlying stream immediately.
fn new_window(&mut self, offset: u64, size: u64) -> io::Result<SharedWindowedReadStream> {
self.seek(SeekFrom::Start(offset))?;
Ok(SharedWindowedReadStream { base: self.as_dyn(), begin: offset, end: offset + size })
}
/// Retrieves a type-erased reference to the stream.
fn as_dyn(&mut self) -> &mut dyn ReadStream;
}
impl<T> ReadStream for T
where T: Read + Seek
{
fn as_dyn(&mut self) -> &mut dyn ReadStream { self }
}
/// A non-owning window into an existing [`ReadStream`].
pub struct SharedWindowedReadStream<'a> {
/// A reference to the base stream.
pub base: &'a mut dyn ReadStream,
/// The beginning of the window in bytes.
pub begin: u64,
/// The end of the window in bytes.
pub end: u64,
}
impl<'a> SharedWindowedReadStream<'a> {
/// Modifies the current window & seeks to the beginning of the window.
pub fn set_window(&mut self, begin: u64, end: u64) -> io::Result<()> {
self.base.seek(SeekFrom::Start(begin))?;
self.begin = begin;
self.end = end;
Ok(())
}
}
impl<'a> Read for SharedWindowedReadStream<'a> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let pos = self.stream_position()?;
let size = self.end - self.begin;
if pos == size {
return Ok(0);
}
self.base.read(if pos + buf.len() as u64 > size {
&mut buf[..(size - pos) as usize]
} else {
buf
})
}
}
impl<'a> Seek for SharedWindowedReadStream<'a> {
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
let result = self.base.seek(match pos {
SeekFrom::Start(p) => SeekFrom::Start(self.begin + p),
SeekFrom::End(p) => SeekFrom::End(self.end as i64 + p),
SeekFrom::Current(_) => pos,
})?;
if result < self.begin || result > self.end {
Err(io::Error::from(io::ErrorKind::UnexpectedEof))
} else {
Ok(result - self.begin)
}
}
fn stream_position(&mut self) -> io::Result<u64> {
Ok(self.base.stream_position()? - self.begin)
}
}

136
nod/src/util/aes.rs Normal file
View File

@ -0,0 +1,136 @@
use tracing::instrument;
use crate::{
common::KeyBytes,
disc::{
SECTOR_SIZE,
wii::{HASHES_SIZE, SECTOR_DATA_SIZE},
},
util::array_ref,
};
#[cfg(feature = "openssl")]
thread_local! {
static ENC_CIPHER_CTX: std::cell::RefCell<openssl::cipher_ctx::CipherCtx> = {
let cipher = openssl::cipher::Cipher::fetch(None, "AES-128-CBC", None).unwrap();
let mut ctx = openssl::cipher_ctx::CipherCtx::new().unwrap();
ctx.set_padding(false);
ctx.encrypt_init(Some(&cipher), None, None).unwrap();
std::cell::RefCell::new(ctx)
};
static DEC_CIPHER_CTX: std::cell::RefCell<openssl::cipher_ctx::CipherCtx> = {
let cipher = openssl::cipher::Cipher::fetch(None, "AES-128-CBC", None).unwrap();
let mut ctx = openssl::cipher_ctx::CipherCtx::new().unwrap();
ctx.set_padding(false);
ctx.decrypt_init(Some(&cipher), None, None).unwrap();
std::cell::RefCell::new(ctx)
};
}
/// Encrypts data in-place using AES-128-CBC with the given key and IV.
pub fn aes_cbc_encrypt(key: &KeyBytes, iv: &KeyBytes, data: &mut [u8]) {
assert_eq!(data.len() % 16, 0);
#[cfg(not(feature = "openssl"))]
{
use aes::cipher::{BlockModeEncrypt, KeyIvInit, block_padding::NoPadding};
<cbc::Encryptor<aes::Aes128>>::new(key.into(), iv.into())
.encrypt_padded::<NoPadding>(data, data.len())
.unwrap();
}
#[cfg(feature = "openssl")]
ENC_CIPHER_CTX.with_borrow_mut(|ctx| {
ctx.encrypt_init(None, Some(key), Some(iv)).unwrap();
let len = unsafe {
// The openssl crate doesn't provide a safe API for using the same inbuf/outbuf.
// However, this is valid with AES-CBC and no padding. Create a copy of the input
// slice to appease the borrow checker.
let input = std::slice::from_raw_parts(data.as_ptr(), data.len());
ctx.cipher_update_unchecked(input, Some(data))
}
.unwrap();
assert_eq!(len, data.len());
});
}
/// Decrypts data in-place using AES-128-CBC with the given key and IV.
pub fn aes_cbc_decrypt(key: &KeyBytes, iv: &KeyBytes, data: &mut [u8]) {
assert_eq!(data.len() % 16, 0);
#[cfg(not(feature = "openssl"))]
{
use aes::cipher::{BlockModeDecrypt, KeyIvInit, block_padding::NoPadding};
<cbc::Decryptor<aes::Aes128>>::new(key.into(), iv.into())
.decrypt_padded::<NoPadding>(data)
.unwrap();
}
#[cfg(feature = "openssl")]
DEC_CIPHER_CTX.with_borrow_mut(|ctx| {
ctx.decrypt_init(None, Some(key), Some(iv)).unwrap();
let len = unsafe {
// The openssl crate doesn't provide a safe API for using the same inbuf/outbuf.
// However, this is valid with AES-CBC and no padding. Create a copy of the input
// slice to appease the borrow checker.
let input = std::slice::from_raw_parts(data.as_ptr(), data.len());
ctx.cipher_update_unchecked(input, Some(data))
}
.unwrap();
assert_eq!(len, data.len());
});
}
/// Decrypts data buffer-to-buffer using AES-128-CBC with the given key and IV.
pub fn aes_cbc_decrypt_b2b(key: &KeyBytes, iv: &KeyBytes, data: &[u8], out: &mut [u8]) {
assert_eq!(data.len() % 16, 0);
assert_eq!(data.len(), out.len());
#[cfg(not(feature = "openssl"))]
{
use aes::cipher::{BlockModeDecrypt, KeyIvInit, block_padding::NoPadding};
<cbc::Decryptor<aes::Aes128>>::new(key.into(), iv.into())
.decrypt_padded_b2b::<NoPadding>(data, out)
.unwrap();
}
#[cfg(feature = "openssl")]
DEC_CIPHER_CTX.with_borrow_mut(|ctx| {
ctx.decrypt_init(None, Some(key), Some(iv)).unwrap();
let len = unsafe { ctx.cipher_update_unchecked(data, Some(out)) }.unwrap();
assert_eq!(len, out.len());
});
}
/// Encrypts a Wii partition sector in-place.
#[instrument(skip_all)]
pub fn encrypt_sector(out: &mut [u8; SECTOR_SIZE], key: &KeyBytes) {
aes_cbc_encrypt(key, &[0u8; 16], &mut out[..HASHES_SIZE]);
// Data IV from encrypted hash block
let iv = *array_ref![out, 0x3D0, 16];
aes_cbc_encrypt(key, &iv, &mut out[HASHES_SIZE..]);
}
/// Decrypts a Wii partition sector in-place.
#[instrument(skip_all)]
pub fn decrypt_sector(out: &mut [u8; SECTOR_SIZE], key: &KeyBytes) {
// Data IV from encrypted hash block
let iv = *array_ref![out, 0x3D0, 16];
aes_cbc_decrypt(key, &[0u8; 16], &mut out[..HASHES_SIZE]);
aes_cbc_decrypt(key, &iv, &mut out[HASHES_SIZE..]);
}
/// Decrypts a Wii partition sector buffer-to-buffer.
#[instrument(skip_all)]
pub fn decrypt_sector_b2b(data: &[u8; SECTOR_SIZE], out: &mut [u8; SECTOR_SIZE], key: &KeyBytes) {
// Data IV from encrypted hash block
let iv = *array_ref![data, 0x3D0, 16];
aes_cbc_decrypt_b2b(key, &[0u8; 16], &data[..HASHES_SIZE], &mut out[..HASHES_SIZE]);
aes_cbc_decrypt_b2b(key, &iv, &data[HASHES_SIZE..], &mut out[HASHES_SIZE..]);
}
/// Decrypts a Wii partition sector data (excluding hashes) buffer-to-buffer.
#[instrument(skip_all)]
pub fn decrypt_sector_data_b2b(
data: &[u8; SECTOR_SIZE],
out: &mut [u8; SECTOR_DATA_SIZE],
key: &KeyBytes,
) {
// Data IV from encrypted hash block
let iv = *array_ref![data, 0x3D0, 16];
aes_cbc_decrypt_b2b(key, &iv, &data[HASHES_SIZE..], out);
}

View File

@ -1,92 +1,484 @@
use std::{io, io::Read};
use std::io;
/// Decodes the LZMA Properties byte (lc/lp/pb).
/// See `lzma_lzma_lclppb_decode` in `liblzma/lzma/lzma_decoder.c`.
#[cfg(feature = "compress-lzma")]
pub fn lzma_lclppb_decode(options: &mut liblzma::stream::LzmaOptions, byte: u8) -> io::Result<()> {
let mut d = byte as u32;
if d >= (9 * 5 * 5) {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("Invalid LZMA props byte: {}", d),
));
}
options.literal_context_bits(d % 9);
d /= 9;
options.position_bits(d / 5);
options.literal_position_bits(d % 5);
Ok(())
use tracing::instrument;
use crate::{
Error, Result,
common::Compression,
io::wia::{WIACompression, WIADisc},
};
pub struct Decompressor {
pub kind: DecompressionKind,
#[allow(unused)] // if compression features are disabled
pub cache: DecompressorCache,
}
/// Decodes LZMA properties.
/// See `lzma_lzma_props_decode` in `liblzma/lzma/lzma_decoder.c`.
#[cfg(feature = "compress-lzma")]
pub fn lzma_props_decode(props: &[u8]) -> io::Result<liblzma::stream::LzmaOptions> {
use crate::array_ref;
if props.len() != 5 {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("Invalid LZMA props length: {}", props.len()),
));
impl Clone for Decompressor {
fn clone(&self) -> Self {
Self { kind: self.kind.clone(), cache: DecompressorCache::default() }
}
let mut options = liblzma::stream::LzmaOptions::new();
lzma_lclppb_decode(&mut options, props[0])?;
options.dict_size(u32::from_le_bytes(*array_ref!(props, 1, 4)));
Ok(options)
}
/// Decodes LZMA2 properties.
/// See `lzma_lzma2_props_decode` in `liblzma/lzma/lzma2_decoder.c`.
#[cfg(feature = "compress-lzma")]
pub fn lzma2_props_decode(props: &[u8]) -> io::Result<liblzma::stream::LzmaOptions> {
use std::cmp::Ordering;
if props.len() != 1 {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("Invalid LZMA2 props length: {}", props.len()),
));
#[derive(Default)]
pub enum DecompressorCache {
#[default]
None,
#[cfg(feature = "compress-zlib")]
Deflate(Box<miniz_oxide::inflate::core::DecompressorOxide>),
#[cfg(feature = "compress-zstd")]
Zstandard(zstd_safe::DCtx<'static>),
}
impl Decompressor {
pub fn new(kind: DecompressionKind) -> Self {
Self { kind, cache: DecompressorCache::default() }
}
let d = props[0] as u32;
let mut options = liblzma::stream::LzmaOptions::new();
options.dict_size(match d.cmp(&40) {
Ordering::Greater => {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("Invalid LZMA2 props byte: {}", d),
#[instrument(name = "Decompressor::decompress", skip_all)]
pub fn decompress(&mut self, buf: &[u8], out: &mut [u8]) -> io::Result<usize> {
match &self.kind {
DecompressionKind::None => {
if buf.len() > out.len() {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("Decompressed data too large: {} > {}", buf.len(), out.len()),
));
}
out[..buf.len()].copy_from_slice(buf);
Ok(buf.len())
}
#[cfg(feature = "compress-zlib")]
DecompressionKind::Deflate => {
let decompressor = match &mut self.cache {
DecompressorCache::Deflate(decompressor) => decompressor,
_ => {
self.cache = DecompressorCache::Deflate(Box::new(
miniz_oxide::inflate::core::DecompressorOxide::new(),
));
match &mut self.cache {
DecompressorCache::Deflate(decompressor) => decompressor,
_ => unreachable!(),
}
}
};
decompressor.init();
let (status, in_size, out_size) = miniz_oxide::inflate::core::decompress(
decompressor.as_mut(),
buf,
out,
0,
miniz_oxide::inflate::core::inflate_flags::TINFL_FLAG_PARSE_ZLIB_HEADER
| miniz_oxide::inflate::core::inflate_flags::TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF,
);
match status {
miniz_oxide::inflate::TINFLStatus::Done => Ok(out_size),
_ => Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"Deflate decompression status {:?} (in: {}, out: {})",
status, in_size, out_size
),
)),
}
}
#[cfg(feature = "compress-bzip2")]
DecompressionKind::Bzip2 => {
let mut decoder = bzip2::Decompress::new(false);
let status = decoder.decompress(buf, out)?;
match status {
bzip2::Status::StreamEnd => Ok(decoder.total_out() as usize),
_ => Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("Bzip2 decompression status {:?}", status),
)),
}
}
#[cfg(feature = "compress-lzma")]
DecompressionKind::Lzma(data) => {
use lzma_util::{lzma_props_decode, new_lzma_decoder};
let mut decoder = new_lzma_decoder(&lzma_props_decode(data)?)?;
let status = decoder.process(buf, out, liblzma::stream::Action::Finish)?;
match status {
liblzma::stream::Status::StreamEnd => Ok(decoder.total_out() as usize),
_ => Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("LZMA decompression status {:?}", status),
)),
}
}
#[cfg(feature = "compress-lzma")]
DecompressionKind::Lzma2(data) => {
use lzma_util::{lzma2_props_decode, new_lzma2_decoder};
let mut decoder = new_lzma2_decoder(&lzma2_props_decode(data)?)?;
let status = decoder.process(buf, out, liblzma::stream::Action::Finish)?;
match status {
liblzma::stream::Status::StreamEnd => Ok(decoder.total_out() as usize),
_ => Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("LZMA2 decompression status {:?}", status),
)),
}
}
#[cfg(feature = "compress-zstd")]
DecompressionKind::Zstandard => {
let ctx = match &mut self.cache {
DecompressorCache::Zstandard(ctx) => ctx,
_ => {
let ctx = zstd_safe::DCtx::create();
self.cache = DecompressorCache::Zstandard(ctx);
match &mut self.cache {
DecompressorCache::Zstandard(ctx) => ctx,
_ => unreachable!(),
}
}
};
ctx.decompress(out, buf).map_err(zstd_util::map_error_code)
}
}
}
pub fn get_content_size(&self, buf: &[u8]) -> io::Result<Option<usize>> {
match &self.kind {
DecompressionKind::None => Ok(Some(buf.len())),
#[cfg(feature = "compress-zstd")]
DecompressionKind::Zstandard => zstd_safe::get_frame_content_size(buf)
.map(|n| n.map(|n| n as usize))
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e.to_string())),
#[allow(unreachable_patterns)] // if compression features are disabled
_ => Ok(None),
}
}
}
#[derive(Debug, Clone)]
pub enum DecompressionKind {
None,
#[cfg(feature = "compress-zlib")]
Deflate,
#[cfg(feature = "compress-bzip2")]
Bzip2,
#[cfg(feature = "compress-lzma")]
Lzma(Box<[u8]>),
#[cfg(feature = "compress-lzma")]
Lzma2(Box<[u8]>),
#[cfg(feature = "compress-zstd")]
Zstandard,
}
impl DecompressionKind {
pub fn from_wia(disc: &WIADisc) -> Result<Self> {
let _data = &disc.compr_data[..disc.compr_data_len as usize];
match disc.compression() {
WIACompression::None => Ok(Self::None),
#[cfg(feature = "compress-bzip2")]
WIACompression::Bzip2 => Ok(Self::Bzip2),
#[cfg(feature = "compress-lzma")]
WIACompression::Lzma => Ok(Self::Lzma(Box::from(_data))),
#[cfg(feature = "compress-lzma")]
WIACompression::Lzma2 => Ok(Self::Lzma2(Box::from(_data))),
#[cfg(feature = "compress-zstd")]
WIACompression::Zstandard => Ok(Self::Zstandard),
comp => Err(Error::DiscFormat(format!("Unsupported WIA/RVZ compression: {:?}", comp))),
}
}
}
pub struct Compressor {
pub kind: Compression,
pub cache: CompressorCache,
pub buffer: Vec<u8>,
}
impl Clone for Compressor {
fn clone(&self) -> Self {
Self {
kind: self.kind,
cache: CompressorCache::default(),
buffer: Vec::with_capacity(self.buffer.capacity()),
}
}
}
#[derive(Default)]
pub enum CompressorCache {
#[default]
None,
#[cfg(feature = "compress-zlib")]
Deflate(Box<miniz_oxide::deflate::core::CompressorOxide>),
#[cfg(feature = "compress-zstd")]
Zstandard(zstd_safe::CCtx<'static>),
}
impl Compressor {
pub fn new(kind: Compression, buffer_size: usize) -> Self {
Self { kind, cache: CompressorCache::default(), buffer: Vec::with_capacity(buffer_size) }
}
/// Compresses the given buffer into `out`. `out`'s capacity will not be extended. Instead, if
/// the compressed data is larger than `out`, this function will bail and return `false`.
#[instrument(name = "Compressor::compress", skip_all)]
pub fn compress(&mut self, buf: &[u8]) -> io::Result<bool> {
self.buffer.clear();
match self.kind {
Compression::None => {
if self.buffer.capacity() >= buf.len() {
self.buffer.extend_from_slice(buf);
Ok(true)
} else {
Ok(false)
}
}
#[cfg(feature = "compress-zlib")]
Compression::Deflate(level) => {
let compressor = match &mut self.cache {
CompressorCache::Deflate(compressor) => compressor,
_ => {
self.cache = CompressorCache::Deflate(Box::new(
miniz_oxide::deflate::core::CompressorOxide::new(
miniz_oxide::deflate::core::create_comp_flags_from_zip_params(
level as i32,
15,
0,
),
),
));
match &mut self.cache {
CompressorCache::Deflate(compressor) => compressor,
_ => unreachable!(),
}
}
};
self.buffer.resize(self.buffer.capacity(), 0);
compressor.reset();
let (status, _, out_size) = miniz_oxide::deflate::core::compress(
compressor.as_mut(),
buf,
self.buffer.as_mut_slice(),
miniz_oxide::deflate::core::TDEFLFlush::Finish,
);
self.buffer.truncate(out_size);
Ok(status == miniz_oxide::deflate::core::TDEFLStatus::Done)
}
#[cfg(feature = "compress-bzip2")]
Compression::Bzip2(level) => {
let compression = bzip2::Compression::new(level as u32);
let mut compress = bzip2::Compress::new(compression, 30);
let status = compress.compress_vec(buf, &mut self.buffer, bzip2::Action::Finish)?;
Ok(status == bzip2::Status::StreamEnd)
}
#[cfg(feature = "compress-lzma")]
Compression::Lzma(level) => {
let options = liblzma::stream::LzmaOptions::new_preset(level as u32)?;
let mut encoder = lzma_util::new_lzma_encoder(&options)?;
let status =
encoder.process_vec(buf, &mut self.buffer, liblzma::stream::Action::Finish)?;
Ok(status == liblzma::stream::Status::StreamEnd)
}
#[cfg(feature = "compress-lzma")]
Compression::Lzma2(level) => {
let options = liblzma::stream::LzmaOptions::new_preset(level as u32)?;
let mut encoder = lzma_util::new_lzma2_encoder(&options)?;
let status =
encoder.process_vec(buf, &mut self.buffer, liblzma::stream::Action::Finish)?;
Ok(status == liblzma::stream::Status::StreamEnd)
}
#[cfg(feature = "compress-zstd")]
Compression::Zstandard(level) => {
let ctx = match &mut self.cache {
CompressorCache::Zstandard(compressor) => compressor,
_ => {
let mut ctx = zstd_safe::CCtx::create();
ctx.init(level as i32).map_err(zstd_util::map_error_code)?;
ctx.set_parameter(zstd_safe::CParameter::ContentSizeFlag(true))
.map_err(zstd_util::map_error_code)?;
self.cache = CompressorCache::Zstandard(ctx);
match &mut self.cache {
CompressorCache::Zstandard(compressor) => compressor,
_ => unreachable!(),
}
}
};
match ctx.compress2(&mut self.buffer, buf) {
Ok(_) => Ok(true),
// dstSize_tooSmall
Err(e) if e == -70isize as usize => Ok(false),
Err(e) => Err(zstd_util::map_error_code(e)),
}
}
#[allow(unreachable_patterns)] // if compression is disabled
_ => Err(io::Error::other(format!("Unsupported compression: {:?}", self.kind))),
}
}
}
#[cfg(feature = "compress-lzma")]
pub mod lzma_util {
use std::{
cmp::Ordering,
io::{Error, ErrorKind, Result},
};
use liblzma::stream::{Filters, LzmaOptions, Stream};
use crate::util::{array_ref, array_ref_mut, static_assert};
/// Decodes the LZMA Properties byte (lc/lp/pb).
/// See `lzma_lzma_lclppb_decode` in `liblzma/lzma/lzma_decoder.c`.
pub fn lzma_lclppb_decode(options: &mut LzmaOptions, byte: u8) -> Result<()> {
let mut d = byte as u32;
if d >= (9 * 5 * 5) {
return Err(Error::new(
ErrorKind::InvalidData,
format!("Invalid LZMA props byte: {}", d),
));
}
Ordering::Equal => u32::MAX,
Ordering::Less => (2 | (d & 1)) << (d / 2 + 11),
});
Ok(options)
options.literal_context_bits(d % 9);
d /= 9;
options.position_bits(d / 5);
options.literal_position_bits(d % 5);
Ok(())
}
/// Encodes the LZMA Properties byte (lc/lp/pb).
/// See `lzma_lzma_lclppb_encode` in `liblzma/lzma/lzma_encoder.c`.
pub fn lzma_lclppb_encode(options: &LzmaOptions) -> Result<u8> {
let options = get_options_sys(options);
let byte = (options.pb * 5 + options.lp) * 9 + options.lc;
if byte >= (9 * 5 * 5) {
return Err(Error::new(
ErrorKind::InvalidData,
format!("Invalid LZMA props byte: {}", byte),
));
}
Ok(byte as u8)
}
/// Decodes LZMA properties.
/// See `lzma_lzma_props_decode` in `liblzma/lzma/lzma_decoder.c`.
pub fn lzma_props_decode(props: &[u8]) -> Result<LzmaOptions> {
if props.len() != 5 {
return Err(Error::new(
ErrorKind::InvalidData,
format!("Invalid LZMA props length: {}", props.len()),
));
}
let mut options = LzmaOptions::new();
lzma_lclppb_decode(&mut options, props[0])?;
options.dict_size(u32::from_le_bytes(*array_ref![props, 1, 4]));
Ok(options)
}
/// Encodes LZMA properties.
/// See `lzma_lzma_props_encode` in `liblzma/lzma/lzma_encoder.c`.
pub fn lzma_props_encode(options: &LzmaOptions) -> Result<[u8; 5]> {
let mut props = [0u8; 5];
props[0] = lzma_lclppb_encode(options)?;
*array_ref_mut![props, 1, 4] = get_options_sys(options).dict_size.to_le_bytes();
Ok(props)
}
/// Decodes LZMA2 properties.
/// See `lzma_lzma2_props_decode` in `liblzma/lzma/lzma2_decoder.c`.
pub fn lzma2_props_decode(props: &[u8]) -> Result<LzmaOptions> {
if props.len() != 1 {
return Err(Error::new(
ErrorKind::InvalidData,
format!("Invalid LZMA2 props length: {}", props.len()),
));
}
let d = props[0] as u32;
let mut options = LzmaOptions::new();
options.dict_size(match d.cmp(&40) {
Ordering::Greater => {
return Err(Error::new(
ErrorKind::InvalidData,
format!("Invalid LZMA2 props byte: {}", d),
));
}
Ordering::Equal => u32::MAX,
Ordering::Less => (2 | (d & 1)) << (d / 2 + 11),
});
Ok(options)
}
/// Encodes LZMA2 properties.
/// See `lzma_lzma2_props_encode` in `liblzma/lzma/lzma2_encoder.c`.
pub fn lzma2_props_encode(options: &LzmaOptions) -> Result<[u8; 1]> {
let options = get_options_sys(options);
let mut d = options.dict_size.max(liblzma_sys::LZMA_DICT_SIZE_MIN);
// Round up to the next 2^n - 1 or 2^n + 2^(n - 1) - 1 depending
// on which one is the next:
d -= 1;
d |= d >> 2;
d |= d >> 3;
d |= d >> 4;
d |= d >> 8;
d |= d >> 16;
// Get the highest two bits using the proper encoding:
if d == u32::MAX {
d = 40;
} else {
d = get_dist_slot(d + 1) - 24;
}
Ok([d as u8])
}
/// Creates a new raw LZMA decoder with the given options.
pub fn new_lzma_decoder(options: &LzmaOptions) -> Result<Stream> {
let mut filters = Filters::new();
filters.lzma1(options);
Stream::new_raw_decoder(&filters).map_err(Error::from)
}
/// Creates a new raw LZMA encoder with the given options.
pub fn new_lzma_encoder(options: &LzmaOptions) -> Result<Stream> {
let mut filters = Filters::new();
filters.lzma1(options);
Stream::new_raw_encoder(&filters).map_err(Error::from)
}
/// Creates a new raw LZMA2 decoder with the given options.
pub fn new_lzma2_decoder(options: &LzmaOptions) -> Result<Stream> {
let mut filters = Filters::new();
filters.lzma2(options);
Stream::new_raw_decoder(&filters).map_err(Error::from)
}
/// Creates a new raw LZMA2 encoder with the given options.
pub fn new_lzma2_encoder(options: &LzmaOptions) -> Result<Stream> {
let mut filters = Filters::new();
filters.lzma2(options);
Stream::new_raw_encoder(&filters).map_err(Error::from)
}
/// liblzma does not expose any accessors for `LzmaOptions`, so we have to
/// cast it into the internal `lzma_options_lzma` struct.
#[inline]
fn get_options_sys(options: &LzmaOptions) -> &liblzma_sys::lzma_options_lzma {
static_assert!(size_of::<LzmaOptions>() == size_of::<liblzma_sys::lzma_options_lzma>());
unsafe { &*(options as *const LzmaOptions as *const liblzma_sys::lzma_options_lzma) }
}
/// See `get_dist_slot` in `liblzma/lzma/fastpos.h`.
fn get_dist_slot(dist: u32) -> u32 {
if dist <= 4 {
dist
} else {
let i = dist.leading_zeros() ^ 31;
(i + i) + ((dist >> (i - 1)) & 1)
}
}
}
/// Creates a new raw LZMA decoder with the given options.
#[cfg(feature = "compress-lzma")]
pub fn new_lzma_decoder<R>(
reader: R,
options: &liblzma::stream::LzmaOptions,
) -> io::Result<liblzma::read::XzDecoder<R>>
where
R: Read,
{
let mut filters = liblzma::stream::Filters::new();
filters.lzma1(options);
let stream = liblzma::stream::Stream::new_raw_decoder(&filters).map_err(io::Error::from)?;
Ok(liblzma::read::XzDecoder::new_stream(reader, stream))
}
#[cfg(feature = "compress-zstd")]
mod zstd_util {
use std::io;
/// Creates a new raw LZMA2 decoder with the given options.
#[cfg(feature = "compress-lzma")]
pub fn new_lzma2_decoder<R>(
reader: R,
options: &liblzma::stream::LzmaOptions,
) -> io::Result<liblzma::read::XzDecoder<R>>
where
R: Read,
{
let mut filters = liblzma::stream::Filters::new();
filters.lzma2(options);
let stream = liblzma::stream::Stream::new_raw_decoder(&filters).map_err(io::Error::from)?;
Ok(liblzma::read::XzDecoder::new_stream(reader, stream))
pub fn map_error_code(code: usize) -> io::Error {
io::Error::other(zstd_safe::get_error_name(code))
}
}

276
nod/src/util/digest.rs Normal file
View File

@ -0,0 +1,276 @@
use std::{thread, thread::JoinHandle};
use bytes::Bytes;
use crossbeam_channel::Sender;
use digest::Digest;
use tracing::instrument;
use crate::{
common::HashBytes,
io::nkit::NKitHeader,
write::{DiscFinalization, ProcessOptions},
};
/// Hashes a byte slice with SHA-1.
#[instrument(skip_all)]
pub fn sha1_hash(buf: &[u8]) -> HashBytes {
#[cfg(feature = "openssl")]
{
// The one-shot openssl::sha::sha1 ends up being much slower
let mut hasher = openssl::sha::Sha1::new();
hasher.update(buf);
hasher.finish()
}
#[cfg(not(feature = "openssl"))]
{
use sha1::Digest;
HashBytes::from(sha1::Sha1::digest(buf))
}
}
/// Hashes a byte slice with XXH64.
#[allow(unused_braces)] // https://github.com/rust-lang/rust/issues/116347
#[instrument(skip_all)]
pub fn xxh64_hash(buf: &[u8]) -> u64 { xxhash_rust::xxh64::xxh64(buf, 0) }
pub type DigestThread = (Sender<Bytes>, JoinHandle<DigestResult>);
pub fn digest_thread<H>() -> DigestThread
where H: Hasher + Send + 'static {
let (tx, rx) = crossbeam_channel::bounded::<Bytes>(1);
let handle = thread::Builder::new()
.name(format!("Digest {}", H::NAME))
.spawn(move || {
let mut hasher = H::new();
while let Ok(data) = rx.recv() {
hasher.update(data.as_ref());
}
hasher.finalize()
})
.expect("Failed to spawn digest thread");
(tx, handle)
}
pub struct DigestManager {
threads: Vec<DigestThread>,
}
impl DigestManager {
pub fn new(options: &ProcessOptions) -> Self {
let mut threads = Vec::new();
if options.digest_crc32 {
threads.push(digest_thread::<crc32fast::Hasher>());
}
if options.digest_md5 {
#[cfg(feature = "openssl")]
threads.push(digest_thread::<openssl_util::HasherMD5>());
#[cfg(not(feature = "openssl"))]
threads.push(digest_thread::<md5::Md5>());
}
if options.digest_sha1 {
#[cfg(feature = "openssl")]
threads.push(digest_thread::<openssl_util::HasherSHA1>());
#[cfg(not(feature = "openssl"))]
threads.push(digest_thread::<sha1::Sha1>());
}
if options.digest_xxh64 {
threads.push(digest_thread::<xxhash_rust::xxh64::Xxh64>());
}
DigestManager { threads }
}
#[instrument(name = "DigestManager::send", skip_all)]
pub fn send(&self, data: Bytes) {
let mut sent = 0usize;
// Non-blocking send to all threads
for (idx, (tx, _)) in self.threads.iter().enumerate() {
if tx.try_send(data.clone()).is_ok() {
sent |= 1 << idx;
}
}
// Blocking send to any remaining threads
for (idx, (tx, _)) in self.threads.iter().enumerate() {
if sent & (1 << idx) == 0 {
tx.send(data.clone()).expect("Failed to send data to digest thread");
}
}
}
#[instrument(name = "DigestManager::finish", skip_all)]
pub fn finish(self) -> DigestResults {
let mut results = DigestResults { crc32: None, md5: None, sha1: None, xxh64: None };
for (tx, handle) in self.threads {
drop(tx); // Close channel
match handle.join().unwrap() {
DigestResult::Crc32(v) => results.crc32 = Some(v),
DigestResult::Md5(v) => results.md5 = Some(v),
DigestResult::Sha1(v) => results.sha1 = Some(v),
DigestResult::Xxh64(v) => results.xxh64 = Some(v),
}
}
results
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum DigestResult {
Crc32(u32),
Md5([u8; 16]),
Sha1([u8; 20]),
Xxh64(u64),
}
pub trait Hasher {
const NAME: &'static str;
fn new() -> Self;
fn finalize(self) -> DigestResult;
fn update(&mut self, data: &[u8]);
}
impl Hasher for md5::Md5 {
const NAME: &'static str = "MD5";
fn new() -> Self { Digest::new() }
fn finalize(self) -> DigestResult { DigestResult::Md5(Digest::finalize(self).into()) }
#[allow(unused_braces)] // https://github.com/rust-lang/rust/issues/116347
#[instrument(name = "md5::Md5::update", skip_all)]
fn update(&mut self, data: &[u8]) { Digest::update(self, data) }
}
impl Hasher for sha1::Sha1 {
const NAME: &'static str = "SHA-1";
fn new() -> Self { Digest::new() }
fn finalize(self) -> DigestResult { DigestResult::Sha1(Digest::finalize(self).into()) }
#[allow(unused_braces)] // https://github.com/rust-lang/rust/issues/116347
#[instrument(name = "sha1::Sha1::update", skip_all)]
fn update(&mut self, data: &[u8]) { Digest::update(self, data) }
}
impl Hasher for crc32fast::Hasher {
const NAME: &'static str = "CRC32";
fn new() -> Self { crc32fast::Hasher::new() }
fn finalize(self) -> DigestResult { DigestResult::Crc32(crc32fast::Hasher::finalize(self)) }
#[allow(unused_braces)] // https://github.com/rust-lang/rust/issues/116347
#[instrument(name = "crc32fast::Hasher::update", skip_all)]
fn update(&mut self, data: &[u8]) { crc32fast::Hasher::update(self, data) }
}
impl Hasher for xxhash_rust::xxh64::Xxh64 {
const NAME: &'static str = "XXH64";
fn new() -> Self { xxhash_rust::xxh64::Xxh64::new(0) }
fn finalize(self) -> DigestResult {
DigestResult::Xxh64(xxhash_rust::xxh64::Xxh64::digest(&self))
}
#[allow(unused_braces)] // https://github.com/rust-lang/rust/issues/116347
#[instrument(name = "xxhash_rust::xxh64::Xxh64::update", skip_all)]
fn update(&mut self, data: &[u8]) { xxhash_rust::xxh64::Xxh64::update(self, data) }
}
#[cfg(feature = "openssl")]
mod openssl_util {
use tracing::instrument;
use super::{DigestResult, Hasher};
pub type HasherMD5 = HashWrapper<MessageDigestMD5>;
pub type HasherSHA1 = HashWrapper<MessageDigestSHA1>;
pub struct HashWrapper<T>
where T: MessageDigest
{
hasher: openssl::hash::Hasher,
_marker: std::marker::PhantomData<T>,
}
impl<T> HashWrapper<T>
where T: MessageDigest
{
fn new() -> Self {
Self {
hasher: openssl::hash::Hasher::new(T::new()).unwrap(),
_marker: Default::default(),
}
}
}
pub trait MessageDigest {
fn new() -> openssl::hash::MessageDigest;
}
pub struct MessageDigestMD5;
impl MessageDigest for MessageDigestMD5 {
fn new() -> openssl::hash::MessageDigest { openssl::hash::MessageDigest::md5() }
}
pub struct MessageDigestSHA1;
impl MessageDigest for MessageDigestSHA1 {
fn new() -> openssl::hash::MessageDigest { openssl::hash::MessageDigest::sha1() }
}
impl Hasher for HasherMD5 {
const NAME: &'static str = "MD5";
fn new() -> Self { Self::new() }
fn finalize(mut self) -> DigestResult {
DigestResult::Md5((*self.hasher.finish().unwrap()).try_into().unwrap())
}
#[allow(unused_braces)] // https://github.com/rust-lang/rust/issues/116347
#[instrument(name = "openssl_util::HasherMD5::update", skip_all)]
fn update(&mut self, data: &[u8]) { self.hasher.update(data).unwrap() }
}
impl Hasher for HasherSHA1 {
const NAME: &'static str = "SHA-1";
fn new() -> Self { Self::new() }
fn finalize(mut self) -> DigestResult {
DigestResult::Sha1((*self.hasher.finish().unwrap()).try_into().unwrap())
}
#[allow(unused_braces)] // https://github.com/rust-lang/rust/issues/116347
#[instrument(name = "openssl_util::HasherSHA1::update", skip_all)]
fn update(&mut self, data: &[u8]) { self.hasher.update(data).unwrap() }
}
}
pub struct DigestResults {
pub crc32: Option<u32>,
pub md5: Option<[u8; 16]>,
pub sha1: Option<[u8; 20]>,
pub xxh64: Option<u64>,
}
impl DiscFinalization {
pub(crate) fn apply_digests(&mut self, results: &DigestResults) {
self.crc32 = results.crc32;
self.md5 = results.md5;
self.sha1 = results.sha1;
self.xxh64 = results.xxh64;
}
}
impl NKitHeader {
pub(crate) fn apply_digests(&mut self, results: &DigestResults) {
self.crc32 = results.crc32;
self.md5 = results.md5;
self.sha1 = results.sha1;
self.xxh64 = results.xxh64;
}
}

View File

@ -1,70 +1,114 @@
use std::{cmp::min, io, io::Read};
//! Lagged Fibonacci generator for GC / Wii partition junk data.
use zerocopy::{transmute_ref, AsBytes};
use std::{
io,
io::{Read, Write},
};
use crate::disc::SECTOR_SIZE;
use bytes::Buf;
use tracing::instrument;
use zerocopy::{IntoBytes, transmute_ref};
use crate::{disc::SECTOR_SIZE, util::array_ref_mut};
/// Value of `k` for the LFG.
pub const LFG_K: usize = 521;
/// Value of `k` for the LFG in bytes.
pub const LFG_K_BYTES: usize = LFG_K * 4;
/// Value of `j` for the LFG.
pub const LFG_J: usize = 32;
/// Number of 32-bit words in the seed.
pub const SEED_SIZE: usize = 17;
/// Lagged Fibonacci generator for Wii partition junk data.
/// Size of the seed in bytes.
pub const SEED_SIZE_BYTES: usize = SEED_SIZE * 4;
/// Lagged Fibonacci generator for GC / Wii partition junk data.
///
/// References (license CC0-1.0):
/// https://github.com/dolphin-emu/dolphin/blob/a0f555648c27ec0c928f6b1e1fcad5e2d7c4d0c4/docs/WiaAndRvz.md
/// https://github.com/dolphin-emu/dolphin/blob/a0f555648c27ec0c928f6b1e1fcad5e2d7c4d0c4/Source/Core/DiscIO/LaggedFibonacciGenerator.cpp
/// - [WiaAndRvz.md](https://github.com/dolphin-emu/dolphin/blob/a0f555648c27ec0c928f6b1e1fcad5e2d7c4d0c4/docs/WiaAndRvz.md)
/// - [LaggedFibonacciGenerator.cpp](https://github.com/dolphin-emu/dolphin/blob/a0f555648c27ec0c928f6b1e1fcad5e2d7c4d0c4/Source/Core/DiscIO/LaggedFibonacciGenerator.cpp)
pub struct LaggedFibonacci {
buffer: [u32; LFG_K],
position: usize,
}
impl Default for LaggedFibonacci {
#[inline]
fn default() -> Self { Self { buffer: [0u32; LFG_K], position: 0 } }
}
impl LaggedFibonacci {
fn init(&mut self) {
for i in SEED_SIZE..LFG_K {
self.buffer[i] =
(self.buffer[i - 17] << 23) ^ (self.buffer[i - 16] >> 9) ^ self.buffer[i - 1];
self.buffer[i] = (self.buffer[i - SEED_SIZE] << 23)
^ (self.buffer[i - SEED_SIZE + 1] >> 9)
^ self.buffer[i - 1];
}
// Instead of doing the "shift by 18 instead of 16" oddity when actually outputting the data,
// we can do the shifting (and byteswapping) at this point to make the output code simpler.
for x in self.buffer.iter_mut() {
*x = ((*x & 0xFF00FFFF) | (*x >> 2 & 0x00FF0000)).swap_bytes();
*x = ((*x & 0xFF00FFFF) | ((*x >> 2) & 0x00FF0000)).to_be();
}
for _ in 0..4 {
self.forward();
}
}
pub fn init_with_seed(&mut self, init: [u8; 4], disc_num: u8, partition_offset: u64) {
/// Generates the seed for GC / Wii partition junk data using the disc ID, disc number, and sector.
pub fn generate_seed(out: &mut [u32; SEED_SIZE], disc_id: [u8; 4], disc_num: u8, sector: u32) {
let seed = u32::from_be_bytes([
init[2],
init[1],
init[3].wrapping_add(init[2]),
init[0].wrapping_add(init[1]),
disc_id[2],
disc_id[1],
disc_id[3].wrapping_add(disc_id[2]),
disc_id[0].wrapping_add(disc_id[1]),
]) ^ disc_num as u32;
let sector = (partition_offset / SECTOR_SIZE as u64) as u32;
let sector_offset = partition_offset % SECTOR_SIZE as u64;
let mut n = seed.wrapping_mul(0x260BCD5) ^ sector.wrapping_mul(0x1EF29123);
for i in 0..SEED_SIZE {
let mut v = 0u32;
for v in &mut *out {
*v = 0u32;
for _ in 0..LFG_J {
n = n.wrapping_mul(0x5D588B65).wrapping_add(1);
v = (v >> 1) | (n & 0x80000000);
*v = (*v >> 1) | (n & 0x80000000);
}
self.buffer[i] = v;
}
self.buffer[16] ^= self.buffer[0] >> 9 ^ self.buffer[16] << 23;
self.position = 0;
self.init();
self.skip(sector_offset as usize);
out[16] ^= (out[0] >> 9) ^ (out[16] << 23);
}
/// Same as [`Self::generate_seed`], but ensures the resulting seed is big-endian.
pub fn generate_seed_be(
out: &mut [u32; SEED_SIZE],
disc_id: [u8; 4],
disc_num: u8,
sector: u32,
) {
Self::generate_seed(out, disc_id, disc_num, sector);
for x in out.iter_mut() {
*x = x.to_be();
}
}
/// Initializes the LFG with the standard seed for a given disc ID, disc number, and sector.
/// The partition offset is used to determine the sector and how many bytes to skip within the
/// sector.
#[instrument(name = "LaggedFibonacci::init_with_seed", skip_all)]
pub fn init_with_seed(&mut self, disc_id: [u8; 4], disc_num: u8, partition_offset: u64) {
let sector = (partition_offset / SECTOR_SIZE as u64) as u32;
let sector_offset = (partition_offset % SECTOR_SIZE as u64) as usize;
Self::generate_seed(array_ref_mut![self.buffer, 0, SEED_SIZE], disc_id, disc_num, sector);
self.position = 0;
self.init();
self.skip(sector_offset);
}
/// Initializes the LFG with the seed read from a reader. The seed is assumed to be big-endian.
/// This is used for rebuilding junk data in WIA/RVZ files.
#[instrument(name = "LaggedFibonacci::init_with_reader", skip_all)]
pub fn init_with_reader<R>(&mut self, reader: &mut R) -> io::Result<()>
where R: Read + ?Sized {
reader.read_exact(self.buffer[..SEED_SIZE].as_bytes_mut())?;
reader.read_exact(self.buffer[..SEED_SIZE].as_mut_bytes())?;
for x in self.buffer[..SEED_SIZE].iter_mut() {
*x = u32::from_be(*x);
}
@ -73,7 +117,28 @@ impl LaggedFibonacci {
Ok(())
}
pub fn forward(&mut self) {
/// Initializes the LFG with the seed read from a [`Buf`]. The seed is assumed to be big-endian.
/// This is used for rebuilding junk data in WIA/RVZ files.
#[instrument(name = "LaggedFibonacci::init_with_buf", skip_all)]
pub fn init_with_buf(&mut self, reader: &mut impl Buf) -> io::Result<()> {
let out = self.buffer[..SEED_SIZE].as_mut_bytes();
if reader.remaining() < out.len() {
return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "Filling LFG seed"));
}
reader.copy_to_slice(out);
for x in self.buffer[..SEED_SIZE].iter_mut() {
*x = u32::from_be(*x);
}
self.position = 0;
self.init();
Ok(())
}
/// Advances the LFG by one step.
// This gets vectorized and aggressively inlined, so it's better to
// keep it separate for code size and instruction cache pressure.
#[inline(never)]
fn forward(&mut self) {
for i in 0..LFG_J {
self.buffer[i] ^= self.buffer[i + LFG_K - LFG_J];
}
@ -82,27 +147,138 @@ impl LaggedFibonacci {
}
}
/// Skips `n` bytes of junk data.
pub fn skip(&mut self, n: usize) {
self.position += n;
while self.position >= LFG_K * 4 {
while self.position >= LFG_K_BYTES {
self.forward();
self.position -= LFG_K * 4;
self.position -= LFG_K_BYTES;
}
}
/// Fills the buffer with junk data.
#[instrument(name = "LaggedFibonacci::fill", skip_all)]
pub fn fill(&mut self, mut buf: &mut [u8]) {
while !buf.is_empty() {
let len = min(buf.len(), LFG_K * 4 - self.position);
let bytes: &[u8; LFG_K * 4] = transmute_ref!(&self.buffer);
while self.position >= LFG_K_BYTES {
self.forward();
self.position -= LFG_K_BYTES;
}
let bytes: &[u8; LFG_K_BYTES] = transmute_ref!(&self.buffer);
let len = buf.len().min(LFG_K_BYTES - self.position);
buf[..len].copy_from_slice(&bytes[self.position..self.position + len]);
self.position += len;
buf = &mut buf[len..];
if self.position == LFG_K * 4 {
self.forward();
self.position = 0;
}
}
}
/// Writes junk data to the output stream.
#[instrument(name = "LaggedFibonacci::write", skip_all)]
pub fn write<W>(&mut self, w: &mut W, mut len: u64) -> io::Result<()>
where W: Write + ?Sized {
while len > 0 {
while self.position >= LFG_K_BYTES {
self.forward();
self.position -= LFG_K_BYTES;
}
let bytes: &[u8; LFG_K_BYTES] = transmute_ref!(&self.buffer);
let write_len = len.min((LFG_K_BYTES - self.position) as u64) as usize;
w.write_all(&bytes[self.position..self.position + write_len])?;
self.position += write_len;
len -= write_len as u64;
}
Ok(())
}
/// The junk data on GC / Wii discs is reinitialized every 32KB. This functions handles the
/// wrapping logic and reinitializes the LFG at sector boundaries.
#[instrument(name = "LaggedFibonacci::fill_sector_chunked", skip_all)]
pub fn fill_sector_chunked(
&mut self,
mut buf: &mut [u8],
disc_id: [u8; 4],
disc_num: u8,
mut partition_offset: u64,
) {
while !buf.is_empty() {
self.init_with_seed(disc_id, disc_num, partition_offset);
let len =
(SECTOR_SIZE - (partition_offset % SECTOR_SIZE as u64) as usize).min(buf.len());
self.fill(&mut buf[..len]);
buf = &mut buf[len..];
partition_offset += len as u64;
}
}
/// The junk data on GC / Wii discs is reinitialized every 32KB. This functions handles the
/// wrapping logic and reinitializes the LFG at sector boundaries.
#[instrument(name = "LaggedFibonacci::write_sector_chunked", skip_all)]
pub fn write_sector_chunked<W>(
&mut self,
w: &mut W,
mut len: u64,
disc_id: [u8; 4],
disc_num: u8,
mut partition_offset: u64,
) -> io::Result<()>
where
W: Write + ?Sized,
{
while len > 0 {
self.init_with_seed(disc_id, disc_num, partition_offset);
let write_len = (SECTOR_SIZE as u64 - (partition_offset % SECTOR_SIZE as u64)).min(len);
self.write(w, write_len)?;
len -= write_len;
partition_offset += write_len;
}
Ok(())
}
/// Checks if the data matches the junk data generated by the LFG, up to the first sector
/// boundary.
#[instrument(name = "LaggedFibonacci::check", skip_all)]
pub fn check(
&mut self,
buf: &[u8],
disc_id: [u8; 4],
disc_num: u8,
partition_offset: u64,
) -> usize {
let mut lfg_buf = [0u8; SECTOR_SIZE];
self.init_with_seed(disc_id, disc_num, partition_offset);
let len = (SECTOR_SIZE - (partition_offset % SECTOR_SIZE as u64) as usize).min(buf.len());
self.fill(&mut lfg_buf[..len]);
buf[..len].iter().zip(&lfg_buf[..len]).take_while(|(a, b)| a == b).count()
}
/// Checks if the data matches the junk data generated by the LFG. This function handles the
/// wrapping logic and reinitializes the LFG at sector boundaries.
#[instrument(name = "LaggedFibonacci::check_sector_chunked", skip_all)]
pub fn check_sector_chunked(
&mut self,
mut buf: &[u8],
disc_id: [u8; 4],
disc_num: u8,
mut partition_offset: u64,
) -> usize {
let mut lfg_buf = [0u8; SECTOR_SIZE];
let mut total_num_matching = 0;
while !buf.is_empty() {
self.init_with_seed(disc_id, disc_num, partition_offset);
let len =
(SECTOR_SIZE - (partition_offset % SECTOR_SIZE as u64) as usize).min(buf.len());
self.fill(&mut lfg_buf[..len]);
let num_matching =
buf[..len].iter().zip(&lfg_buf[..len]).take_while(|(a, b)| a == b).count();
total_num_matching += num_matching;
if num_matching != len {
break;
}
buf = &buf[len..];
partition_offset += len as u64;
}
total_num_matching
}
}
#[cfg(test)]
@ -132,4 +308,53 @@ mod tests {
0xEA, 0xD0
]);
}
#[test]
fn test_init_with_seed_3() {
let mut lfg = LaggedFibonacci::default();
lfg.init_with_seed([0x47, 0x50, 0x49, 0x45], 0, 0x322904);
let mut buf = [0u8; 16];
lfg.fill(&mut buf);
assert_eq!(buf, [
0x97, 0xD8, 0x23, 0x0B, 0x12, 0xAA, 0x20, 0x45, 0xC2, 0xBD, 0x71, 0x8C, 0x30, 0x32,
0xC5, 0x2F
]);
}
#[test]
fn test_write() {
let mut lfg = LaggedFibonacci::default();
lfg.init_with_seed([0x47, 0x50, 0x49, 0x45], 0, 0x322904);
let mut buf = [0u8; 16];
lfg.write(&mut buf.as_mut_slice(), 16).unwrap();
assert_eq!(buf, [
0x97, 0xD8, 0x23, 0x0B, 0x12, 0xAA, 0x20, 0x45, 0xC2, 0xBD, 0x71, 0x8C, 0x30, 0x32,
0xC5, 0x2F
]);
}
#[test]
fn test_fill_sector_chunked() {
let mut lfg = LaggedFibonacci::default();
let mut buf = [0u8; 32];
lfg.fill_sector_chunked(&mut buf, [0x47, 0x4D, 0x38, 0x45], 0, 0x27FF0);
assert_eq!(buf, [
0xAD, 0x6F, 0x21, 0xBE, 0x05, 0x57, 0x10, 0xED, 0xEA, 0xB0, 0x8E, 0xFD, 0x91, 0x58,
0xA2, 0x0E, 0xDC, 0x0D, 0x59, 0xC0, 0x02, 0x98, 0xA5, 0x00, 0x39, 0x5B, 0x68, 0xA6,
0x5D, 0x53, 0x2D, 0xB6
]);
}
#[test]
fn test_write_sector_chunked() {
let mut lfg = LaggedFibonacci::default();
let mut buf = [0u8; 32];
lfg.write_sector_chunked(&mut buf.as_mut_slice(), 32, [0x47, 0x4D, 0x38, 0x45], 0, 0x27FF0)
.unwrap();
assert_eq!(buf, [
0xAD, 0x6F, 0x21, 0xBE, 0x05, 0x57, 0x10, 0xED, 0xEA, 0xB0, 0x8E, 0xFD, 0x91, 0x58,
0xA2, 0x0E, 0xDC, 0x0D, 0x59, 0xC0, 0x02, 0x98, 0xA5, 0x00, 0x39, 0x5B, 0x68, 0xA6,
0x5D, 0x53, 0x2D, 0xB6
]);
}
}

View File

@ -1,46 +1,200 @@
use std::ops::{Div, Rem};
//! Utility functions and types.
use std::{
io,
io::{Read, Seek, SeekFrom},
ops::{Div, Rem},
};
use io::{BufRead, Write};
pub(crate) mod aes;
pub(crate) mod compress;
pub(crate) mod lfg;
pub(crate) mod digest;
pub mod lfg;
pub(crate) mod read;
pub(crate) mod take_seek;
/// Copies from a [`BufRead`] to a [`Write`] without allocating a buffer.
pub fn buf_copy<R, W>(reader: &mut R, writer: &mut W) -> io::Result<u64>
where
R: BufRead + ?Sized,
W: Write + ?Sized,
{
let mut copied = 0;
loop {
let buf = reader.fill_buf()?;
let len = buf.len();
if len == 0 {
break;
}
writer.write_all(buf)?;
reader.consume(len);
copied += len as u64;
}
Ok(copied)
}
/// A reader with a fixed window.
#[derive(Clone)]
pub struct WindowedReader<T>
where T: BufRead + Seek
{
base: T,
pos: u64,
begin: u64,
end: u64,
}
impl<T> WindowedReader<T>
where T: BufRead + Seek
{
/// Creates a new windowed stream with offset and size.
///
/// Seeks underlying stream immediately.
#[inline]
pub fn new(mut base: T, offset: u64, size: u64) -> io::Result<Self> {
base.seek(SeekFrom::Start(offset))?;
Ok(Self { base, pos: offset, begin: offset, end: offset + size })
}
/// Returns the length of the window.
#[inline]
#[allow(clippy::len_without_is_empty)]
pub fn len(&self) -> u64 { self.end - self.begin }
}
impl<T> Read for WindowedReader<T>
where T: BufRead + Seek
{
#[inline]
fn read(&mut self, out: &mut [u8]) -> io::Result<usize> {
let buf = self.fill_buf()?;
let len = buf.len().min(out.len());
out[..len].copy_from_slice(&buf[..len]);
self.consume(len);
Ok(len)
}
}
impl<T> BufRead for WindowedReader<T>
where T: BufRead + Seek
{
#[inline]
fn fill_buf(&mut self) -> io::Result<&[u8]> {
let limit = self.end.saturating_sub(self.pos);
if limit == 0 {
return Ok(&[]);
}
let buf = self.base.fill_buf()?;
let max = (buf.len() as u64).min(limit) as usize;
Ok(&buf[..max])
}
#[inline]
fn consume(&mut self, amt: usize) {
self.base.consume(amt);
self.pos += amt as u64;
}
}
impl<T> Seek for WindowedReader<T>
where T: BufRead + Seek
{
#[inline]
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
let mut pos = match pos {
SeekFrom::Start(p) => self.begin + p,
SeekFrom::End(p) => self.end.saturating_add_signed(p),
SeekFrom::Current(p) => self.pos.saturating_add_signed(p),
};
if pos < self.begin {
pos = self.begin;
} else if pos > self.end {
pos = self.end;
}
let result = self.base.seek(SeekFrom::Start(pos))?;
self.pos = result;
Ok(result - self.begin)
}
#[inline]
fn stream_position(&mut self) -> io::Result<u64> { Ok(self.pos) }
}
#[inline(always)]
pub(crate) fn div_rem<T>(x: T, y: T) -> (T, T)
where T: Div<Output = T> + Rem<Output = T> + Copy {
let quot = x / y;
let rem = x % y;
(quot, rem)
(x / y, x % y)
}
pub(crate) trait Align {
fn align_up(self, align: Self) -> Self;
fn align_down(self, align: Self) -> Self;
}
macro_rules! impl_align {
($ty:ident) => {
impl Align for $ty {
#[inline(always)]
fn align_up(self, align: Self) -> Self { (self + (align - 1)) & !(align - 1) }
#[inline(always)]
fn align_down(self, align: Self) -> Self { self & !(align - 1) }
}
};
}
impl_align!(u8);
impl_align!(u16);
impl_align!(u32);
impl_align!(u64);
impl_align!(usize);
/// Creates a fixed-size array reference from a slice.
#[macro_export]
macro_rules! array_ref {
($slice:expr, $offset:expr, $size:expr) => {{
#[inline(always)]
fn to_array<T>(slice: &[T]) -> &[T; $size] {
unsafe { &*(slice.as_ptr() as *const [_; $size]) }
unsafe { &*(slice as *const [T] as *const [T; $size]) }
}
to_array(&$slice[$offset..$offset + $size])
}};
}
pub(crate) use array_ref;
/// Creates a mutable fixed-size array reference from a slice.
#[macro_export]
macro_rules! array_ref_mut {
($slice:expr, $offset:expr, $size:expr) => {{
#[inline(always)]
fn to_array<T>(slice: &mut [T]) -> &mut [T; $size] {
unsafe { &mut *(slice.as_ptr() as *mut [_; $size]) }
unsafe { &mut *(slice as *mut [T] as *mut [T; $size]) }
}
to_array(&mut $slice[$offset..$offset + $size])
}};
}
pub(crate) use array_ref_mut;
/// Compile-time assertion.
#[macro_export]
macro_rules! static_assert {
($condition:expr) => {
const _: () = core::assert!($condition);
};
}
pub(crate) use static_assert;
macro_rules! impl_read_for_bufread {
($ty:ident) => {
impl std::io::Read for $ty {
fn read(&mut self, out: &mut [u8]) -> std::io::Result<usize> {
use std::io::BufRead;
let buf = self.fill_buf()?;
let len = buf.len().min(out.len());
out[..len].copy_from_slice(&buf[..len]);
self.consume(len);
Ok(len)
}
}
};
}
pub(crate) use impl_read_for_bufread;

View File

@ -1,51 +1,141 @@
use std::{io, io::Read};
use std::{io, io::Read, sync::Arc};
use zerocopy::{AsBytes, FromBytes, FromZeroes};
use zerocopy::{FromBytes, FromZeros, IntoBytes};
use crate::read::DiscStream;
#[inline(always)]
pub fn read_from<T, R>(reader: &mut R) -> io::Result<T>
where
T: FromBytes + FromZeroes + AsBytes,
T: FromBytes + IntoBytes,
R: Read + ?Sized,
{
let mut ret = <T>::new_zeroed();
reader.read_exact(ret.as_bytes_mut())?;
reader.read_exact(ret.as_mut_bytes())?;
Ok(ret)
}
#[inline(always)]
pub fn read_at<T, R>(reader: &mut R, offset: u64) -> io::Result<T>
where
T: FromBytes + IntoBytes,
R: DiscStream + ?Sized,
{
let mut ret = <T>::new_zeroed();
reader.read_exact_at(ret.as_mut_bytes(), offset)?;
Ok(ret)
}
#[inline(always)]
pub fn read_vec<T, R>(reader: &mut R, count: usize) -> io::Result<Vec<T>>
where
T: FromBytes + FromZeroes + AsBytes,
T: FromBytes + IntoBytes,
R: Read + ?Sized,
{
let mut ret = <T>::new_vec_zeroed(count);
reader.read_exact(ret.as_mut_slice().as_bytes_mut())?;
let mut ret =
<T>::new_vec_zeroed(count).map_err(|_| io::Error::from(io::ErrorKind::OutOfMemory))?;
reader.read_exact(ret.as_mut_slice().as_mut_bytes())?;
Ok(ret)
}
#[inline(always)]
pub fn read_vec_at<T, R>(reader: &mut R, count: usize, offset: u64) -> io::Result<Vec<T>>
where
T: FromBytes + IntoBytes,
R: DiscStream + ?Sized,
{
let mut ret =
<T>::new_vec_zeroed(count).map_err(|_| io::Error::from(io::ErrorKind::OutOfMemory))?;
reader.read_exact_at(ret.as_mut_slice().as_mut_bytes(), offset)?;
Ok(ret)
}
#[inline(always)]
pub fn read_box<T, R>(reader: &mut R) -> io::Result<Box<T>>
where
T: FromBytes + FromZeroes + AsBytes,
T: FromBytes + IntoBytes,
R: Read + ?Sized,
{
let mut ret = <T>::new_box_zeroed();
reader.read_exact(ret.as_mut().as_bytes_mut())?;
let mut ret = <T>::new_box_zeroed().map_err(|_| io::Error::from(io::ErrorKind::OutOfMemory))?;
reader.read_exact(ret.as_mut().as_mut_bytes())?;
Ok(ret)
}
#[inline(always)]
pub fn read_box_at<T, R>(reader: &mut R, offset: u64) -> io::Result<Box<T>>
where
T: FromBytes + IntoBytes,
R: DiscStream + ?Sized,
{
let mut ret = <T>::new_box_zeroed().map_err(|_| io::Error::from(io::ErrorKind::OutOfMemory))?;
reader.read_exact_at(ret.as_mut().as_mut_bytes(), offset)?;
Ok(ret)
}
#[inline(always)]
pub fn read_arc<T, R>(reader: &mut R) -> io::Result<Arc<T>>
where
T: FromBytes + IntoBytes,
R: Read + ?Sized,
{
// TODO use Arc::new_zeroed once it's stable
read_box(reader).map(Arc::from)
}
#[inline(always)]
pub fn read_arc_at<T, R>(reader: &mut R, offset: u64) -> io::Result<Arc<T>>
where
T: FromBytes + IntoBytes,
R: DiscStream + ?Sized,
{
// TODO use Arc::new_zeroed once it's stable
read_box_at(reader, offset).map(Arc::from)
}
#[inline(always)]
pub fn read_box_slice<T, R>(reader: &mut R, count: usize) -> io::Result<Box<[T]>>
where
T: FromBytes + FromZeroes + AsBytes,
T: FromBytes + IntoBytes,
R: Read + ?Sized,
{
let mut ret = <T>::new_box_slice_zeroed(count);
reader.read_exact(ret.as_mut().as_bytes_mut())?;
let mut ret = <[T]>::new_box_zeroed_with_elems(count)
.map_err(|_| io::Error::from(io::ErrorKind::OutOfMemory))?;
reader.read_exact(ret.as_mut().as_mut_bytes())?;
Ok(ret)
}
#[inline(always)]
pub fn read_box_slice_at<T, R>(reader: &mut R, count: usize, offset: u64) -> io::Result<Box<[T]>>
where
T: FromBytes + IntoBytes,
R: DiscStream + ?Sized,
{
let mut ret = <[T]>::new_box_zeroed_with_elems(count)
.map_err(|_| io::Error::from(io::ErrorKind::OutOfMemory))?;
reader.read_exact_at(ret.as_mut().as_mut_bytes(), offset)?;
Ok(ret)
}
#[inline(always)]
pub fn read_arc_slice<T, R>(reader: &mut R, count: usize) -> io::Result<Arc<[T]>>
where
T: FromBytes + IntoBytes,
R: Read + ?Sized,
{
// TODO use Arc::new_zeroed once it's stable
read_box_slice(reader, count).map(Arc::from)
}
#[inline(always)]
pub fn read_arc_slice_at<T, R>(reader: &mut R, count: usize, offset: u64) -> io::Result<Arc<[T]>>
where
T: FromBytes + IntoBytes,
R: DiscStream + ?Sized,
{
// TODO use Arc::new_zeroed once it's stable
read_box_slice_at(reader, count, offset).map(Arc::from)
}
#[inline(always)]
pub fn read_u16_be<R>(reader: &mut R) -> io::Result<u16>
where R: Read + ?Sized {
@ -69,3 +159,53 @@ where R: Read + ?Sized {
reader.read_exact(&mut buf)?;
Ok(u64::from_be_bytes(buf))
}
pub fn read_with_zero_fill<R>(r: &mut R, mut buf: &mut [u8]) -> io::Result<usize>
where R: Read + ?Sized {
let mut total = 0;
while !buf.is_empty() {
let read = r.read(buf)?;
if read == 0 {
// Fill remaining block with zeroes
buf.fill(0);
break;
}
buf = &mut buf[read..];
total += read;
}
Ok(total)
}
pub fn box_to_bytes<T>(b: Box<T>) -> Box<[u8]>
where T: IntoBytes {
let p = Box::into_raw(b);
let sp = unsafe { std::slice::from_raw_parts_mut(p as *mut u8, size_of::<T>()) };
unsafe { Box::from_raw(sp) }
}
pub fn read_into_box_slice<T, E>(
count: usize,
init: impl FnOnce(&mut [u8]) -> Result<(), E>,
) -> Result<Box<[T]>, E>
where
T: FromBytes + IntoBytes,
{
let mut out = <[T]>::new_box_zeroed_with_elems(count).unwrap();
init(out.as_mut_bytes())?;
Ok(out)
}
pub fn read_into_arc_slice<T, E>(
count: usize,
init: impl FnOnce(&mut [u8]) -> Result<(), E>,
) -> Result<Arc<[T]>, E>
where
T: FromBytes + IntoBytes,
{
let mut arc = Arc::<[T]>::new_uninit_slice(count);
let ptr = Arc::get_mut(&mut arc).unwrap().as_mut_ptr() as *mut u8;
let slice = unsafe { std::slice::from_raw_parts_mut(ptr, count * size_of::<T>()) };
slice.fill(0);
init(slice)?;
Ok(unsafe { arc.assume_init() })
}

View File

@ -1,127 +0,0 @@
// From https://github.com/jam1garner/binrw/blob/e96a1320287ec83d1f471525ffa380800ec9e124/binrw/src/io/take_seek.rs
// MIT License
//
// Copyright (c) jam1garner and other contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
#![allow(dead_code)]
//! Types for seekable reader adapters which limit the number of bytes read from
//! the underlying reader.
use std::io::{Read, Result, Seek, SeekFrom};
/// Read adapter which limits the bytes read from an underlying reader, with
/// seek support.
///
/// This struct is generally created by importing the [`TakeSeekExt`] extension
/// and calling [`take_seek`] on a reader.
///
/// [`take_seek`]: TakeSeekExt::take_seek
#[derive(Debug)]
pub struct TakeSeek<T> {
inner: T,
pos: u64,
end: u64,
}
impl<T> TakeSeek<T> {
/// Gets a reference to the underlying reader.
pub fn get_ref(&self) -> &T { &self.inner }
/// Gets a mutable reference to the underlying reader.
///
/// Care should be taken to avoid modifying the internal I/O state of the
/// underlying reader as doing so may corrupt the internal limit of this
/// `TakeSeek`.
pub fn get_mut(&mut self) -> &mut T { &mut self.inner }
/// Consumes this wrapper, returning the wrapped value.
pub fn into_inner(self) -> T { self.inner }
/// Returns the number of bytes that can be read before this instance will
/// return EOF.
///
/// # Note
///
/// This instance may reach EOF after reading fewer bytes than indicated by
/// this method if the underlying [`Read`] instance reaches EOF.
pub fn limit(&self) -> u64 { self.end.saturating_sub(self.pos) }
}
impl<T: Seek> TakeSeek<T> {
/// Sets the number of bytes that can be read before this instance will
/// return EOF. This is the same as constructing a new `TakeSeek` instance,
/// so the amount of bytes read and the previous limit value dont matter
/// when calling this method.
///
/// # Panics
///
/// Panics if the inner stream returns an error from `stream_position`.
pub fn set_limit(&mut self, limit: u64) {
let pos = self.inner.stream_position().expect("cannot get position for `set_limit`");
self.pos = pos;
self.end = pos + limit;
}
}
impl<T: Read> Read for TakeSeek<T> {
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
let limit = self.limit();
// Don't call into inner reader at all at EOF because it may still block
if limit == 0 {
return Ok(0);
}
// Lint: It is impossible for this cast to truncate because the value
// being cast is the minimum of two values, and one of the value types
// is already `usize`.
#[allow(clippy::cast_possible_truncation)]
let max = (buf.len() as u64).min(limit) as usize;
let n = self.inner.read(&mut buf[0..max])?;
self.pos += n as u64;
Ok(n)
}
}
impl<T: Seek> Seek for TakeSeek<T> {
fn seek(&mut self, pos: SeekFrom) -> Result<u64> {
self.pos = self.inner.seek(pos)?;
Ok(self.pos)
}
fn stream_position(&mut self) -> Result<u64> { Ok(self.pos) }
}
/// An extension trait that implements `take_seek()` for compatible streams.
pub trait TakeSeekExt {
/// Creates an adapter which will read at most `limit` bytes from the
/// wrapped stream.
fn take_seek(self, limit: u64) -> TakeSeek<Self>
where Self: Sized;
}
impl<T: Read + Seek> TakeSeekExt for T {
fn take_seek(mut self, limit: u64) -> TakeSeek<Self>
where Self: Sized {
let pos = self.stream_position().expect("cannot get position for `take_seek`");
TakeSeek { inner: self, pos, end: pos + limit }
}
}

174
nod/src/write.rs Normal file
View File

@ -0,0 +1,174 @@
//! [`DiscWriter`] and associated types.
use std::io;
use bytes::Bytes;
use crate::{
Error, Result,
common::{Compression, Format},
disc,
read::DiscReader,
};
/// Options for writing a disc image.
#[derive(Default, Debug, Clone)]
pub struct FormatOptions {
/// The disc format to write.
pub format: Format,
/// The compression algorithm to use for the output format, if supported.
///
/// If unsure, use [`Format::default_compression`] to get the default compression for the format.
pub compression: Compression,
/// Block size to use.
///
/// If unsure, use [`Format::default_block_size`] to get the default block size for the format.
pub block_size: u32,
}
impl FormatOptions {
/// Creates options for the specified format.
/// Uses the default compression and block size for the format.
#[inline]
pub fn new(format: Format) -> FormatOptions {
FormatOptions {
format,
compression: format.default_compression(),
block_size: format.default_block_size(),
}
}
}
/// Options for processing a disc image writer.
#[derive(Default, Debug, Clone)]
pub struct ProcessOptions {
/// If the output format supports multithreaded processing, this sets the number of threads to
/// use for processing data. This is particularly useful for formats that compress data or
/// perform other transformations. The default value of 0 disables multithreading.
pub processor_threads: usize,
/// Enables CRC32 checksum calculation for the disc data.
///
/// If the output format supports it, this will be stored in the disc data. (NKit 2 compatible)
/// Each digest calculation will run on a separate thread, unaffected by the processor thread
/// count.
pub digest_crc32: bool,
/// Enables MD5 checksum calculation for the disc data. (Slow!)
///
/// If the output format supports it, this will be stored in the disc data. (NKit 2 compatible)
/// Each digest calculation will run on a separate thread, unaffected by the processor thread
/// count.
pub digest_md5: bool,
/// Enables SHA-1 checksum calculation for the disc data.
///
/// If the output format supports it, this will be stored in the disc data. (NKit 2 compatible)
/// Each digest calculation will run on a separate thread, unaffected by the processor thread
/// count.
pub digest_sha1: bool,
/// Enables XXH64 checksum calculation for the disc data.
///
/// If the output format supports it, this will be stored in the disc data. (NKit 2 compatible)
/// Each digest calculation will run on a separate thread, unaffected by the processor thread
/// count.
pub digest_xxh64: bool,
}
/// A callback for writing disc data.
///
/// The callback should write all data to the output stream before returning, or return an error if
/// writing fails. The second and third arguments are the current bytes processed and the total
/// bytes to process, respectively. For most formats, this has no relation to the written disc size,
/// but can be used to display progress.
pub type DataCallback<'a> = dyn FnMut(Bytes, u64, u64) -> io::Result<()> + 'a;
/// A constructed disc writer.
///
/// This is the primary entry point for writing disc images.
#[derive(Clone)]
#[repr(transparent)]
pub struct DiscWriter(Box<dyn disc::writer::DiscWriter>);
impl DiscWriter {
/// Creates a new disc writer with the specified format options.
#[inline]
pub fn new(disc: DiscReader, options: &FormatOptions) -> Result<DiscWriter> {
let mut options = options.clone();
options.compression.validate_level()?;
let mut reader = disc.into_inner();
reader.reset();
let inner = match options.format {
Format::Iso => {
if options.compression != Compression::None {
return Err(Error::Other("ISO/GCM does not support compression".to_string()));
}
Box::new(reader)
}
Format::Ciso => crate::io::ciso::DiscWriterCISO::new(reader, &options)?,
#[cfg(feature = "compress-zlib")]
Format::Gcz => crate::io::gcz::DiscWriterGCZ::new(reader, &options)?,
Format::Tgc => crate::io::tgc::DiscWriterTGC::new(reader, &options)?,
Format::Wbfs => crate::io::wbfs::DiscWriterWBFS::new(reader, &options)?,
Format::Wia | Format::Rvz => crate::io::wia::DiscWriterWIA::new(reader, &options)?,
format => return Err(Error::Other(format!("Unsupported write format: {format}"))),
};
Ok(DiscWriter(inner))
}
/// Processes the disc writer to completion, calling the data callback, in order, for each block
/// of data to write to the output file. The callback should write all data before returning, or
/// return an error if writing fails.
///
/// See [`DataCallback`] for more information.
#[inline]
pub fn process(
&self,
mut data_callback: impl FnMut(Bytes, u64, u64) -> io::Result<()>,
options: &ProcessOptions,
) -> Result<DiscFinalization> {
self.0.process(&mut data_callback, options)
}
/// Returns the progress upper bound for the disc writer. For most formats, this has no
/// relation to the written disc size, but can be used to display progress.
#[inline]
pub fn progress_bound(&self) -> u64 { self.0.progress_bound() }
/// Returns the weight of the disc writer, which can help determine the number of threads to
/// dedicate for output processing. This may depend on the format's configuration, such as
/// whether compression is enabled.
#[inline]
pub fn weight(&self) -> DiscWriterWeight { self.0.weight() }
}
/// Data returned by the disc writer after processing.
///
/// If header data is provided, the consumer should seek to the beginning of the output stream and
/// write the header data, overwriting any existing data. Otherwise, the output disc will be
/// invalid.
#[derive(Default, Clone)]
pub struct DiscFinalization {
/// Header data to write to the beginning of the output stream, if any.
pub header: Bytes,
/// The calculated CRC32 checksum of the input disc data, if any.
pub crc32: Option<u32>,
/// The calculated MD5 hash of the input disc data, if any.
pub md5: Option<[u8; 16]>,
/// The calculated SHA-1 hash of the input disc data, if any.
pub sha1: Option<[u8; 20]>,
/// The calculated SHA-256 hash of the input disc data, if any.
pub xxh64: Option<u64>,
}
/// The weight of a disc writer, which can help determine the number of threads to use for
/// processing.
pub enum DiscWriterWeight {
/// The writer performs little to no processing of the input data, and is mostly I/O bound.
/// This means that this writer does not benefit from parallelization, and will ignore the
/// number of threads specified.
Light,
/// The writer performs some processing of the input data, and is somewhat CPU bound. This means
/// that this writer benefits from parallelization, but not as much as a heavy writer.
Medium,
/// The writer performs significant processing of the input data, and is mostly CPU bound. This
/// means that this writer benefits from parallelization.
Heavy,
}

View File

@ -1,51 +1,58 @@
[package]
name = "nodtool"
version = "1.2.0"
edition = "2021"
rust-version = "1.73.0"
authors = ["Luke Street <luke@street.dev>"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/encounter/nod-rs"
documentation = "https://docs.rs/nod"
version.workspace = true
edition.workspace = true
rust-version.workspace = true
authors.workspace = true
license.workspace = true
repository.workspace = true
documentation = "https://docs.rs/nodtool"
readme = "../README.md"
description = """
CLI tool for verifying and converting GameCube and Wii disc images.
CLI tool for extracting and converting GameCube and Wii disc images.
"""
keywords = ["gamecube", "wii", "iso", "wbfs", "rvz"]
keywords.workspace = true
categories = ["command-line-utilities", "parser-implementations"]
build = "build.rs"
[features]
asm = ["md-5/asm", "nod/asm", "sha1/asm"]
nightly = ["crc32fast/nightly"]
default = ["compress-bzip2", "compress-lzma", "compress-zlib", "compress-zstd"]
compress-bzip2 = ["nod/compress-bzip2"]
compress-lzma = ["nod/compress-lzma"]
compress-zlib = ["nod/compress-zlib"]
compress-zstd = ["nod/compress-zstd"]
openssl = ["nod/openssl"]
openssl-vendored = ["nod/openssl-vendored"]
tracy = ["dep:tracing-tracy"]
[dependencies]
argp = "0.3.0"
base16ct = "0.2.0"
crc32fast = "1.4.2"
digest = "0.10.7"
enable-ansi-support = "0.2.1"
hex = { version = "0.4.3", features = ["serde"] }
indicatif = "0.17.8"
itertools = "0.12.1"
log = "0.4.20"
md-5 = "0.10.6"
nod = { path = "../nod" }
quick-xml = { version = "0.31.0", features = ["serialize"] }
serde = { version = "1.0.197", features = ["derive"] }
sha1 = "0.10.6"
size = "0.4.1"
supports-color = "3.0.0"
tracing = "0.1.40"
tracing-attributes = "0.1.27"
tracing-subscriber = { version = "0.3.18", features = ["env-filter"] }
xxhash-rust = { version = "0.8.10", features = ["xxh64"] }
zerocopy = { version = "0.7.32", features = ["alloc", "derive"] }
zstd = "0.13.1"
argp = "0.4"
crc32fast = "1.5"
digest = { workspace = true }
enable-ansi-support = "0.2"
hex = { version = "0.4", features = ["serde"] }
indicatif = "0.18"
md-5 = { workspace = true }
nod = { version = "2.0.0-alpha", path = "../nod", default-features = false }
num_cpus = "1.17"
quick-xml = { version = "0.38", features = ["serialize"] }
serde = { version = "1.0", features = ["derive"] }
sha1 = { workspace = true }
size = "0.5"
supports-color = "3.0"
tracing = { workspace = true }
tracing-attributes = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
tracing-tracy = { version = "0.11", features = ["flush-on-exit"], optional = true }
zerocopy = { workspace = true }
zstd = "0.13"
[target.'cfg(target_env = "musl")'.dependencies]
mimalloc = "0.1"
[build-dependencies]
hex = { version = "0.4.3", features = ["serde"] }
quick-xml = { version = "0.31.0", features = ["serialize"] }
serde = { version = "1.0.197", features = ["derive"] }
zerocopy = { version = "0.7.32", features = ["alloc", "derive"] }
zstd = "0.13.1"
hex = { version = "0.4", features = ["serde"] }
quick-xml = { version = "0.38", features = ["serialize"] }
serde = { version = "1.0", features = ["derive"] }
zerocopy = { version = "0.8", features = ["alloc", "derive"] }
zstd = "0.13"

View File

@ -5,7 +5,7 @@
<name>Non-Redump - Nintendo - Nintendo GameCube</name>
<description>Non-Redump - Nintendo - Nintendo GameCube</description>
<subset>Non-Redump</subset>
<version>20240602-041318</version>
<version>20240904-155318</version>
<author>bikerspade, Gefflon, Hiccup, NovaAurora, rarenight, relax, Seventy7, togemet2</author>
<homepage>No-Intro</homepage>
<url>https://www.no-intro.org</url>
@ -94,12 +94,6 @@
<description>Major League Baseball 2K6 (USA) (Beta) (2006-05-18)</description>
<rom name="Major League Baseball 2K6 (USA) (Beta) (2006-05-18).iso" size="1459978240" crc="2e97a802" md5="707168f95134d4847278472b84c7657b" sha1="d18ee85aeb558bb7d357bd0fc2a6c6a4bda2467c" serial="G62ET2"/>
</game>
<game name="Mega Man X - Command Mission (USA) (Beta)" id="0038">
<category>Games</category>
<category>Preproduction</category>
<description>Mega Man X - Command Mission (USA) (Beta)</description>
<rom name="Mega Man X - Command Mission (USA) (Beta).iso" size="1459978240" crc="7b88690e" md5="801aeb70186915ac0a53494006ff74f8" sha1="24d72340954cd62e142a6d11a6582fc5f40ba994" sha256="de74aab25978ee3523e7de522d9da9e7f65690eb161094163d342c423d55d25c" serial="GXRE08"/>
</game>
<game name="Metal Gear Solid - The Twin Snakes (USA) (Disc 2) (Beta)" id="0014">
<description>Metal Gear Solid - The Twin Snakes (USA) (Disc 2) (Beta)</description>
<rom name="Metal Gear Solid - The Twin Snakes (USA) (Disc 2) (Beta).iso" size="1459978240" crc="85409e81" md5="ac1181d6223c1b82596b41ddac9e40e7" sha1="523b0e82b84c91946da05a48841914df6729e747" serial="GGSEA4"/>

View File

@ -3,9 +3,9 @@
<datafile>
<header>
<name>Nintendo - GameCube</name>
<description>Nintendo - GameCube - Discs (1992) (2024-06-02 00-38-06)</description>
<version>2024-06-02 00-38-06</version>
<date>2024-06-02 00-38-06</date>
<description>Nintendo - GameCube - Discs (2001) (2024-12-01 23-54-46)</description>
<version>2024-12-01 23-54-46</version>
<date>2024-12-01 23-54-46</date>
<author>redump.org</author>
<homepage>redump.org</homepage>
<url>http://redump.org/</url>
@ -2085,10 +2085,10 @@
<description>Nickelodeon Tak 2 - The Staff of Dreams (USA)</description>
<rom name="Nickelodeon Tak 2 - The Staff of Dreams (USA).iso" size="1459978240" crc="ca5ada0f" md5="0d9a7c74d1ff98dd5ad867bd67377bb1" sha1="bc36de899604771551b93638ce204da92e0b7582"/>
</game>
<game name="Nintendo GameCube Preview Disc - May 2003 (USA)">
<game name="Nintendo GameCube Preview Disc - May 2003 (USA, Canada)">
<category>Demos</category>
<description>Nintendo GameCube Preview Disc - May 2003 (USA)</description>
<rom name="Nintendo GameCube Preview Disc - May 2003 (USA).iso" size="1459978240" crc="d45c8c07" md5="73e092844193533bfd6d5c27034446a3" sha1="43047f599583e1b0357881d21cba2e3958c57822"/>
<description>Nintendo GameCube Preview Disc - May 2003 (USA, Canada)</description>
<rom name="Nintendo GameCube Preview Disc - May 2003 (USA, Canada).iso" size="1459978240" crc="d45c8c07" md5="73e092844193533bfd6d5c27034446a3" sha1="43047f599583e1b0357881d21cba2e3958c57822"/>
</game>
<game name="Interactive Multi-Game Demo Disc - July 2002 (USA)">
<category>Demos</category>
@ -2395,10 +2395,10 @@
<description>Need for Speed - Underground (USA)</description>
<rom name="Need for Speed - Underground (USA).iso" size="1459978240" crc="01c154ba" md5="c20f02454166bc8fa08f84307871ac7d" sha1="d6882a712509e8bba75ee8837bcac65f8352b1d1"/>
</game>
<game name="Nickelodeon SpongeBob SquarePants - The Movie (USA) (Rev 1)">
<game name="Nickelodeon The SpongeBob SquarePants Movie (USA) (Rev 1)">
<category>Games</category>
<description>Nickelodeon SpongeBob SquarePants - The Movie (USA) (Rev 1)</description>
<rom name="Nickelodeon SpongeBob SquarePants - The Movie (USA) (Rev 1).iso" size="1459978240" crc="77be3cc6" md5="9fbad7186b2168190082a54fa20d63b7" sha1="2aea159b1e98a220a6e4534f0f017b8722ce4caa"/>
<description>Nickelodeon The SpongeBob SquarePants Movie (USA) (Rev 1)</description>
<rom name="Nickelodeon The SpongeBob SquarePants Movie (USA) (Rev 1).iso" size="1459978240" crc="77be3cc6" md5="9fbad7186b2168190082a54fa20d63b7" sha1="2aea159b1e98a220a6e4534f0f017b8722ce4caa"/>
</game>
<game name="Star Wars - Rogue Squadron III - Rebel Strike (USA) (En,Fr,De,Es,It)">
<category>Games</category>
@ -2760,15 +2760,15 @@
<description>Sonic Adventure DX - Director's Cut (Europe) (En,Ja,Fr,De,Es) (Rev 1)</description>
<rom name="Sonic Adventure DX - Director's Cut (Europe) (En,Ja,Fr,De,Es) (Rev 1).iso" size="1459978240" crc="9ba7f3af" md5="2136ed4b6a27dc64066c8a689918dce0" sha1="99ce5f51388919a95d571a730c50362ba14277c3"/>
</game>
<game name="GoldenEye - Rogue Agent (France) (Disc 1)">
<game name="GoldenEye - Au Service du Mal (France) (Disc 1)">
<category>Games</category>
<description>GoldenEye - Rogue Agent (France) (Disc 1)</description>
<rom name="GoldenEye - Rogue Agent (France) (Disc 1).iso" size="1459978240" crc="4564bb01" md5="08110ba8ac059a8f735bf951bee12edd" sha1="107e792ae93495179f5bd657da166947751e37cc"/>
<description>GoldenEye - Au Service du Mal (France) (Disc 1)</description>
<rom name="GoldenEye - Au Service du Mal (France) (Disc 1).iso" size="1459978240" crc="4564bb01" md5="08110ba8ac059a8f735bf951bee12edd" sha1="107e792ae93495179f5bd657da166947751e37cc"/>
</game>
<game name="GoldenEye - Rogue Agent (France) (Disc 2)">
<game name="GoldenEye - Au Service du Mal (France) (Disc 2)">
<category>Games</category>
<description>GoldenEye - Rogue Agent (France) (Disc 2)</description>
<rom name="GoldenEye - Rogue Agent (France) (Disc 2).iso" size="1459978240" crc="dba8aa7f" md5="57f801c71548bd7a6e281b0b028a4ac6" sha1="7bb6875321b6ea90146bfb71bb730f6317c2a172"/>
<description>GoldenEye - Au Service du Mal (France) (Disc 2)</description>
<rom name="GoldenEye - Au Service du Mal (France) (Disc 2).iso" size="1459978240" crc="dba8aa7f" md5="57f801c71548bd7a6e281b0b028a4ac6" sha1="7bb6875321b6ea90146bfb71bb730f6317c2a172"/>
</game>
<game name="Lost Kingdoms (Europe) (En,Fr)">
<category>Games</category>
@ -3265,10 +3265,10 @@
<description>Mega Man X - Command Mission (USA)</description>
<rom name="Mega Man X - Command Mission (USA).iso" size="1459978240" crc="82115bd2" md5="ddaf03e8bb5b7ca43ae2f2d77087f917" sha1="98fafeb1fbe04dfec9a4ff1d0c159627bb288aad"/>
</game>
<game name="Skies of Arcadia Legends (USA)">
<game name="Skies of Arcadia - Legends (USA)">
<category>Games</category>
<description>Skies of Arcadia Legends (USA)</description>
<rom name="Skies of Arcadia Legends (USA).iso" size="1459978240" crc="23e347b6" md5="3e7fa5033c4a2704434fb6ba98195ecd" sha1="46105320553c858f25fafc5fd357566b505a4940"/>
<description>Skies of Arcadia - Legends (USA)</description>
<rom name="Skies of Arcadia - Legends (USA).iso" size="1459978240" crc="23e347b6" md5="3e7fa5033c4a2704434fb6ba98195ecd" sha1="46105320553c858f25fafc5fd357566b505a4940"/>
</game>
<game name="Harvest Moon - Another Wonderful Life (USA)">
<category>Games</category>
@ -4380,10 +4380,10 @@
<description>World Soccer Winning Eleven 6 - Final Evolution (Japan)</description>
<rom name="World Soccer Winning Eleven 6 - Final Evolution (Japan).iso" size="1459978240" crc="07f76bcc" md5="db339f4d36b698e0c18de99a91c0c165" sha1="4d0a7142474ad5a4d6e7245f252eab308f783fbe"/>
</game>
<game name="Skies of Arcadia Legends (Europe) (En,Fr,De,Es)">
<game name="Skies of Arcadia - Legends (Europe) (En,Fr,De,Es)">
<category>Games</category>
<description>Skies of Arcadia Legends (Europe) (En,Fr,De,Es)</description>
<rom name="Skies of Arcadia Legends (Europe) (En,Fr,De,Es).iso" size="1459978240" crc="a8e18c76" md5="bd814992f1e39d4147c775ff8b32d022" sha1="30ee1d7777fe51bcd7c4deeb867651d5b6e96e41"/>
<description>Skies of Arcadia - Legends (Europe) (En,Fr,De,Es)</description>
<rom name="Skies of Arcadia - Legends (Europe) (En,Fr,De,Es).iso" size="1459978240" crc="a8e18c76" md5="bd814992f1e39d4147c775ff8b32d022" sha1="30ee1d7777fe51bcd7c4deeb867651d5b6e96e41"/>
</game>
<game name="Serious Sam - Next Encounter (Europe) (En,Fr,De)">
<category>Games</category>
@ -6555,10 +6555,10 @@
<description>2002 FIFA World Cup (Europe) (Fr,Nl)</description>
<rom name="2002 FIFA World Cup (Europe) (Fr,Nl).iso" size="1459978240" crc="5980022f" md5="54411489d800faf171f5ff203b1a0c46" sha1="b7e6aa5d84e35fe1de1879c9dcd12c0c1987e275"/>
</game>
<game name="Nickelodeon SpongeBob SquarePants - The Movie (Europe) (Fr,Nl)">
<game name="Nickelodeon The SpongeBob SquarePants Movie (Europe) (Fr,Nl)">
<category>Games</category>
<description>Nickelodeon SpongeBob SquarePants - The Movie (Europe) (Fr,Nl)</description>
<rom name="Nickelodeon SpongeBob SquarePants - The Movie (Europe) (Fr,Nl).iso" size="1459978240" crc="084ea086" md5="5c8a6eeeb849e20c59bc245a56d4f174" sha1="09606bce7bc063d257f006973f1b5c70e8e9cd42"/>
<description>Nickelodeon The SpongeBob SquarePants Movie (Europe) (Fr,Nl)</description>
<rom name="Nickelodeon The SpongeBob SquarePants Movie (Europe) (Fr,Nl).iso" size="1459978240" crc="084ea086" md5="5c8a6eeeb849e20c59bc245a56d4f174" sha1="09606bce7bc063d257f006973f1b5c70e8e9cd42"/>
</game>
<game name="FIFA 06 (Netherlands)">
<category>Games</category>
@ -7070,10 +7070,10 @@
<description>Nickelodeon SpongeBob SquarePants - Creature from the Krusty Krab (Europe)</description>
<rom name="Nickelodeon SpongeBob SquarePants - Creature from the Krusty Krab (Europe).iso" size="1459978240" crc="5dcf1734" md5="10b725bed9a3be326a776db800682b47" sha1="53238fd27c00b2f51b19e2fcf4f3bc8ae34aa612"/>
</game>
<game name="Nickelodeon SpongeBob SquarePants - The Movie (Europe)">
<game name="Nickelodeon The SpongeBob SquarePants Movie (Europe)">
<category>Games</category>
<description>Nickelodeon SpongeBob SquarePants - The Movie (Europe)</description>
<rom name="Nickelodeon SpongeBob SquarePants - The Movie (Europe).iso" size="1459978240" crc="334aac70" md5="62bc84bfcad66e8d24f83873b247b628" sha1="f13f66669639b95d2c63f51cc3deea1b334998e5"/>
<description>Nickelodeon The SpongeBob SquarePants Movie (Europe)</description>
<rom name="Nickelodeon The SpongeBob SquarePants Movie (Europe).iso" size="1459978240" crc="334aac70" md5="62bc84bfcad66e8d24f83873b247b628" sha1="f13f66669639b95d2c63f51cc3deea1b334998e5"/>
</game>
<game name="MC Groovz Dance Craze (Europe) (En,Fr,De,Es,It)">
<category>Games</category>
@ -7205,10 +7205,10 @@
<description>Gekkan Nintendo Tentou Demo 2002.7.1 (Japan)</description>
<rom name="Gekkan Nintendo Tentou Demo 2002.7.1 (Japan).iso" size="1459978240" crc="86dacab6" md5="84936411bf7768bcdac3be8f401ad8eb" sha1="4d7d77f67288902ba17bf6f42d19679e45cbcb54"/>
</game>
<game name="Gekkan Nintendo Tentou Demo 2002.7.10 (Japan)">
<game name="Gekkan Nintendo Tentou Demo 2002.7.10 Zoukan-gou (Japan)">
<category>Demos</category>
<description>Gekkan Nintendo Tentou Demo 2002.7.10 (Japan)</description>
<rom name="Gekkan Nintendo Tentou Demo 2002.7.10 (Japan).iso" size="1459978240" crc="878c6dea" md5="f185e3c31a81519ee0c0b2539ebcd442" sha1="8b863d521ce96c85c9de60a9d95ccf10e6d8edb6"/>
<description>Gekkan Nintendo Tentou Demo 2002.7.10 Zoukan-gou (Japan)</description>
<rom name="Gekkan Nintendo Tentou Demo 2002.7.10 Zoukan-gou (Japan).iso" size="1459978240" crc="878c6dea" md5="f185e3c31a81519ee0c0b2539ebcd442" sha1="8b863d521ce96c85c9de60a9d95ccf10e6d8edb6"/>
</game>
<game name="Rune II - Koruten no Kagi no Himitsu (Japan) (Taikenban)">
<category>Demos</category>
@ -7725,10 +7725,10 @@
<description>Family Stadium 2003 (Japan)</description>
<rom name="Family Stadium 2003 (Japan).iso" size="1459978240" crc="69b9e609" md5="d6fe5f8e1b6c915c6812b073ac23aada" sha1="d49244a31f95a77c296fba70b8b6550b7e017efc"/>
</game>
<game name="Gakuen Toshi Vara Noir Roses (Japan)">
<game name="Gakuen Toshi Varanoir - Roses (Japan)">
<category>Games</category>
<description>Gakuen Toshi Vara Noir Roses (Japan)</description>
<rom name="Gakuen Toshi Vara Noir Roses (Japan).iso" size="1459978240" crc="cf6d0aa5" md5="364f6513ce08ebbfc312e7a05a6b4225" sha1="d403770cba0c71520df8fd853275508130ac04f4"/>
<description>Gakuen Toshi Varanoir - Roses (Japan)</description>
<rom name="Gakuen Toshi Varanoir - Roses (Japan).iso" size="1459978240" crc="cf6d0aa5" md5="364f6513ce08ebbfc312e7a05a6b4225" sha1="d403770cba0c71520df8fd853275508130ac04f4"/>
</game>
<game name="Gekitou Pro Yakyuu - Mizushima Shinji All Stars vs. Pro Yakyuu (Japan)">
<category>Games</category>
@ -8660,10 +8660,10 @@
<description>SSX on Tour (Europe) (En,Fr,De)</description>
<rom name="SSX on Tour (Europe) (En,Fr,De).iso" size="1459978240" crc="886defd6" md5="e86b6fc0db336d2e55c08b23d76ab554" sha1="2a6055689d79602d92a5bbbcf9d5a9613301d802"/>
</game>
<game name="2 Games in 1 - Bob L'eponge - Le Film + Tak 2 - Le Sceptre des Reves (France) (Disc 1) (Bob L'eponge - Le Film)">
<game name="2 Games in 1 - Bob L'eponge - Le Film + Tak 2 - Le Sceptre des Reves (France) (Fr,Nl) (Disc 1) (Bob L'eponge - Le Film)">
<category>Games</category>
<description>2 Games in 1 - Bob L'eponge - Le Film + Tak 2 - Le Sceptre des Reves (France) (Disc 1) (Bob L'eponge - Le Film)</description>
<rom name="2 Games in 1 - Bob L'eponge - Le Film + Tak 2 - Le Sceptre des Reves (France) (Disc 1) (Bob L'eponge - Le Film).iso" size="1459978240" crc="3cb489d7" md5="592585ee660990d4953e8d6b9f961d3c" sha1="e52e6b4d76461a9041dfbcc95ecef8b1d1ecc051"/>
<description>2 Games in 1 - Bob L'eponge - Le Film + Tak 2 - Le Sceptre des Reves (France) (Fr,Nl) (Disc 1) (Bob L'eponge - Le Film)</description>
<rom name="2 Games in 1 - Bob L'eponge - Le Film + Tak 2 - Le Sceptre des Reves (France) (Fr,Nl) (Disc 1) (Bob L'eponge - Le Film).iso" size="1459978240" crc="3cb489d7" md5="592585ee660990d4953e8d6b9f961d3c" sha1="e52e6b4d76461a9041dfbcc95ecef8b1d1ecc051"/>
</game>
<game name="2 Games in 1 - Bob L'eponge - Le Film + Tak 2 - Le Sceptre des Reves (France) (Disc 2) (Tak 2 - Le Sceptre des Reves)">
<category>Games</category>
@ -9325,10 +9325,10 @@
<description>Disney-Pixar Nemo-reul Chajaseo (Korea)</description>
<rom name="Disney-Pixar Nemo-reul Chajaseo (Korea).iso" size="1459978240" crc="312420a2" md5="7ee187f776e1cbed1f8836e645560d46" sha1="340ef094dfe69c0253e1539911f7f508d91108fa"/>
</game>
<game name="Action Replay for GameCube (USA) (En,Fr,De,Es,It,Pt) (Unl) (v1.08)">
<game name="Action Replay for GameCube (USA, Europe) (En,Fr,De,Es,It,Pt) (Unl) (v1.08)">
<category>Applications</category>
<description>Action Replay for GameCube (USA) (En,Fr,De,Es,It,Pt) (Unl) (v1.08)</description>
<rom name="Action Replay for GameCube (USA) (En,Fr,De,Es,It,Pt) (Unl) (v1.08).iso" size="1459978240" crc="9cb75b81" md5="ba5aadafb4b364679e6950d339aaedfd" sha1="8d480dc45b47e11c1bb5f7f500e2c78b08a1153a"/>
<description>Action Replay for GameCube (USA, Europe) (En,Fr,De,Es,It,Pt) (Unl) (v1.08)</description>
<rom name="Action Replay for GameCube (USA, Europe) (En,Fr,De,Es,It,Pt) (Unl) (v1.08).iso" size="1459978240" crc="9cb75b81" md5="ba5aadafb4b364679e6950d339aaedfd" sha1="8d480dc45b47e11c1bb5f7f500e2c78b08a1153a"/>
</game>
<game name="Mario Kart - Double Dash!! (Korea)">
<category>Games</category>
@ -9480,10 +9480,10 @@
<description>Cube CD 14 (33) (UK) (Unl)</description>
<rom name="Cube CD 14 (33) (UK) (Unl).iso" size="1459978240" crc="b21fe659" md5="82109cff554d0ecadb0fffb7030dc5a8" sha1="884052c0eb3f0d515ca0c7b173e0a5dfd8071a35"/>
</game>
<game name="Action Replay Max (Europe) (En,Fr,De,Es,It,Pt) (Unl)">
<game name="Action Replay Max (USA, Europe) (En,Fr,De,Es,It,Pt) (Unl)">
<category>Applications</category>
<description>Action Replay Max (Europe) (En,Fr,De,Es,It,Pt) (Unl)</description>
<rom name="Action Replay Max (Europe) (En,Fr,De,Es,It,Pt) (Unl).iso" size="1459978240" crc="989e3a76" md5="335e62cdeb07ef3e15d1a3bfceaa28d2" sha1="b339b3f828abe509bfbb4a745322eebea54e105d"/>
<description>Action Replay Max (USA, Europe) (En,Fr,De,Es,It,Pt) (Unl)</description>
<rom name="Action Replay Max (USA, Europe) (En,Fr,De,Es,It,Pt) (Unl).iso" size="1459978240" crc="989e3a76" md5="335e62cdeb07ef3e15d1a3bfceaa28d2" sha1="b339b3f828abe509bfbb4a745322eebea54e105d"/>
</game>
<game name="Capcom vs. SNK 2 EO (Japan) (Tentou Taikenban)">
<category>Demos</category>
@ -9880,10 +9880,10 @@
<description>FIFA Soccer 07 (Latin America)</description>
<rom name="FIFA Soccer 07 (Latin America).iso" size="1459978240" crc="eb1be753" md5="f0f6029d90191ac10a92938597735f37" sha1="64ba3862474dc9708d7263e0136299f684f38ee6"/>
</game>
<game name="Pickles (USA) (Proto)">
<game name="Pickles (USA) (Proto 1)">
<category>Preproduction</category>
<description>Pickles (USA) (Proto)</description>
<rom name="Pickles (USA) (Proto).iso" size="1459978240" crc="5542f6e9" md5="3cdde8d4002b4268c573cf2c2a9009c1" sha1="7011e5c016f3e87b9786d542cb555b111c8293ee"/>
<description>Pickles (USA) (Proto 1)</description>
<rom name="Pickles (USA) (Proto 1).iso" size="1459978240" crc="5542f6e9" md5="3cdde8d4002b4268c573cf2c2a9009c1" sha1="7011e5c016f3e87b9786d542cb555b111c8293ee"/>
</game>
<game name="18 Wheeler - American Pro Trucker (USA) (Beta) (2001-11-12)">
<category>Preproduction</category>
@ -9970,4 +9970,49 @@
<description>Evolution Worlds (USA) (Beta)</description>
<rom name="Evolution Worlds (USA) (Beta).iso" size="1459978240" crc="74e3fbee" md5="f5a5d5d02b8ce369b53e5efcaf2c5d71" sha1="061ba70fe611a7c36a3a99dc52839fc275724f19"/>
</game>
<game name="Pickles (USA) (Proto 2)">
<category>Preproduction</category>
<description>Pickles (USA) (Proto 2)</description>
<rom name="Pickles (USA) (Proto 2).iso" size="1459978240" crc="7fa9a839" md5="313057403f079b96926335202a6ca40c" sha1="9038eb0c7c013421a9be5a94fcd3d27c7f2f8767"/>
</game>
<game name="Advance Connector GC-you (Japan) (Unl)">
<category>Applications</category>
<description>Advance Connector GC-you (Japan) (Unl)</description>
<rom name="Advance Connector GC-you (Japan) (Unl).iso" size="1459978240" crc="93418d2a" md5="9ad3900dbde0c1b588212dce88730b7a" sha1="cf3f68552c099aa2830c2dd74fc3e90d3518f82f"/>
</game>
<game name="Xeno Crisis (Japan) (En,Ja,Fr,De,Es,It,Nl,Pt) (Unl)">
<category>Games</category>
<description>Xeno Crisis (Japan) (En,Ja,Fr,De,Es,It,Nl,Pt) (Unl)</description>
<rom name="Xeno Crisis (Japan) (En,Ja,Fr,De,Es,It,Nl,Pt) (Unl).iso" size="115898368" crc="0c6d4075" md5="4244e7887f60c1336d711fc4a1fd13e0" sha1="46acc5a470af00a3adf916ba879e382d1540b1c5"/>
</game>
<game name="Gekkan Nintendo Tentou Demo 2002.8.1 (Japan)">
<category>Demos</category>
<description>Gekkan Nintendo Tentou Demo 2002.8.1 (Japan)</description>
<rom name="Gekkan Nintendo Tentou Demo 2002.8.1 (Japan).iso" size="1459978240" crc="06588854" md5="405c464ef39eb55f63fd6cd0a947de86" sha1="cd7d1418c2aba5ee614fdd22a46b1f48cbbc54cf"/>
</game>
<game name="NBA Live 2003 (France)">
<category>Games</category>
<description>NBA Live 2003 (France)</description>
<rom name="NBA Live 2003 (France).iso" size="1459978240" crc="ca606a11" md5="b300251ad4be896443143f7dac64e9dc" sha1="f8117becd57e358afed78b35e79800df094bbd3e"/>
</game>
<game name="Advance Game Port (USA) (Unl) (Rev 2)">
<category>Applications</category>
<description>Advance Game Port (USA) (Unl) (Rev 2)</description>
<rom name="Advance Game Port (USA) (Unl) (Rev 2).iso" size="1459978240" crc="0e8bb8d0" md5="097275ac8c60735001d3c0623529fcfc" sha1="d4ac3fed9cc08a19cdad5e46cc5870e909723691"/>
</game>
<game name="SD Media Launcher for GameCube &amp; Wii (Japan) (Unl)">
<category>Applications</category>
<description>SD Media Launcher for GameCube &amp; Wii (Japan) (Unl)</description>
<rom name="SD Media Launcher for GameCube &amp; Wii (Japan) (Unl).iso" size="1459978240" crc="7b642b57" md5="4e260af6d573045186b8794587a466a5" sha1="cf5c5e8b9e073a3bbf993cce3910aa0814d9eb96"/>
</game>
<game name="Mega Man X - Command Mission (USA) (Beta) (2004-07-23)">
<category>Preproduction</category>
<description>Mega Man X - Command Mission (USA) (Beta) (2004-07-23)</description>
<rom name="Mega Man X - Command Mission (USA) (Beta) (2004-07-23).iso" size="1459978240" crc="7b88690e" md5="801aeb70186915ac0a53494006ff74f8" sha1="24d72340954cd62e142a6d11a6582fc5f40ba994"/>
</game>
<game name="Mario Party 4 Event-you Disc (Japan)">
<category>Demos</category>
<description>Mario Party 4 Event-you Disc (Japan)</description>
<rom name="Mario Party 4 Event-you Disc (Japan).iso" size="1459978240" crc="4d3a81a8" md5="fe10c951d2345f94b0633d4861c65598" sha1="de8c7b6f1ca3e7b4bf29a3fa3d12c4bfc42dfad4"/>
</game>
</datafile>

View File

@ -3,9 +3,9 @@
<datafile>
<header>
<name>Nintendo - Wii</name>
<description>Nintendo - Wii - Discs (3770) (2024-02-13 21-52-18)</description>
<version>2024-02-13 21-52-18</version>
<date>2024-02-13 21-52-18</date>
<description>Nintendo - Wii - Discs (3775) (2024-11-29 20-05-00)</description>
<version>2024-11-29 20-05-00</version>
<date>2024-11-29 20-05-00</date>
<author>redump.org</author>
<homepage>redump.org</homepage>
<url>http://redump.org/</url>
@ -400,10 +400,10 @@
<description>Wii Music (USA) (En,Fr,Es)</description>
<rom name="Wii Music (USA) (En,Fr,Es).iso" size="4699979776" crc="59d46fe4" md5="ce0d062df2a18fcbb145e52634679ca6" sha1="50c784119b4e5cfed244a744183c9d5214227840"/>
</game>
<game name="Trauma Center - New Blood (USA)">
<game name="Trauma Center - New Blood (USA) (En,Ja)">
<category>Games</category>
<description>Trauma Center - New Blood (USA)</description>
<rom name="Trauma Center - New Blood (USA).iso" size="4699979776" crc="1b9fc0a4" md5="4ece4485e318a2fa2aab5cbf5aab1b18" sha1="3778e0c4d0b74be925c894813c9f6e2c6078dda5"/>
<description>Trauma Center - New Blood (USA) (En,Ja)</description>
<rom name="Trauma Center - New Blood (USA) (En,Ja).iso" size="4699979776" crc="1b9fc0a4" md5="4ece4485e318a2fa2aab5cbf5aab1b18" sha1="3778e0c4d0b74be925c894813c9f6e2c6078dda5"/>
</game>
<game name="Kororinpa - Marble Mania (USA)">
<category>Games</category>
@ -13390,10 +13390,10 @@
<description>Nickelodeon Dora the Explorer - Dora Saves the Snow Princess (Europe) (En,Fr,Nl)</description>
<rom name="Nickelodeon Dora the Explorer - Dora Saves the Snow Princess (Europe) (En,Fr,Nl).iso" size="4699979776" crc="bc05608d" md5="b08b2d0a6a7262cd43069ca94fc2cb57" sha1="cfb57ba6736728b181e84826f11d20428bddcb13"/>
</game>
<game name="Food Network - Cook or Be Cooked (USA)">
<game name="Food Network - Cook or Be Cooked! (USA)">
<category>Games</category>
<description>Food Network - Cook or Be Cooked (USA)</description>
<rom name="Food Network - Cook or Be Cooked (USA).iso" size="4699979776" crc="30a9d897" md5="ce5ec10957e7f90bdf1419237ed612f7" sha1="8dfc46d40fde135de2fb24d1e056bcf53001c714"/>
<description>Food Network - Cook or Be Cooked! (USA)</description>
<rom name="Food Network - Cook or Be Cooked! (USA).iso" size="4699979776" crc="30a9d897" md5="ce5ec10957e7f90bdf1419237ed612f7" sha1="8dfc46d40fde135de2fb24d1e056bcf53001c714"/>
</game>
<game name="Get Fit with Mel B (USA) (En,Fr,Es)">
<category>Games</category>
@ -17300,10 +17300,10 @@
<description>Horrid Henry - Missions of Mischief (Europe) (En,Fr,De,Es,It)</description>
<rom name="Horrid Henry - Missions of Mischief (Europe) (En,Fr,De,Es,It).iso" size="4699979776" crc="58596d71" md5="c5225b3248eebb54f0256cbf3c518e34" sha1="cd1bbc1540bf9773c56572617dc4e7bc0874d8b7"/>
</game>
<game name="I'm a Celebrity...Get Me Out of Here! (Europe)">
<game name="I'm a Celebrity...Get Me Out of Here! (UK)">
<category>Games</category>
<description>I'm a Celebrity...Get Me Out of Here! (Europe)</description>
<rom name="I'm a Celebrity...Get Me Out of Here! (Europe).iso" size="4699979776" crc="96f013c7" md5="c272b7ad6ea6f7e170f9603092f9cc5b" sha1="34c56074bce0e4ba2bc36696954d21f8e38b0602"/>
<description>I'm a Celebrity...Get Me Out of Here! (UK)</description>
<rom name="I'm a Celebrity...Get Me Out of Here! (UK).iso" size="4699979776" crc="96f013c7" md5="c272b7ad6ea6f7e170f9603092f9cc5b" sha1="34c56074bce0e4ba2bc36696954d21f8e38b0602"/>
</game>
<game name="Musiic Party - Rock the House (UK) (En,Fr,De,Es,It)">
<category>Games</category>
@ -17945,10 +17945,10 @@
<description>Transformers - The Game (Korea)</description>
<rom name="Transformers - The Game (Korea).iso" size="4699979776" crc="65b28e90" md5="caa4ba68d9590cb14f268d6b859de799" sha1="f0f1c9e40010a5ea40c6119fc2d1d06ebd0e35e9"/>
</game>
<game name="FreeLoader for Nintendo Wii (USA) (Unl)">
<game name="FreeLoader for Nintendo Wii (USA) (Unl) (Rev 1)">
<category>Applications</category>
<description>FreeLoader for Nintendo Wii (USA) (Unl)</description>
<rom name="FreeLoader for Nintendo Wii (USA) (Unl).iso" size="1459978240" crc="1cc40417" md5="7ef5176eee10d71f6094bae0821d0b44" sha1="3cbab4236fe31abc15e253adadf963ba8fa7261d"/>
<description>FreeLoader for Nintendo Wii (USA) (Unl) (Rev 1)</description>
<rom name="FreeLoader for Nintendo Wii (USA) (Unl) (Rev 1).iso" size="1459978240" crc="1cc40417" md5="7ef5176eee10d71f6094bae0821d0b44" sha1="3cbab4236fe31abc15e253adadf963ba8fa7261d"/>
</game>
<game name="FreeLoader for Nintendo Wii (Japan) (Unl)">
<category>Applications</category>
@ -18450,10 +18450,10 @@
<description>Fishing Master World Tour (USA) (Beta) (2008-11-14)</description>
<rom name="Fishing Master World Tour (USA) (Beta) (2008-11-14).iso" size="4707319808" crc="e030529b" md5="f4530ea0a779d4f47f1e9682d6cb8d45" sha1="87d23879d8320f6d8d4d84df796587bbd3ef6ed1"/>
</game>
<game name="Food Network - Cook or Be Cooked (USA) (Beta) (2009-07-20)">
<game name="Food Network - Cook or Be Cooked! (USA) (Beta) (2009-07-20)">
<category>Preproduction</category>
<description>Food Network - Cook or Be Cooked (USA) (Beta) (2009-07-20)</description>
<rom name="Food Network - Cook or Be Cooked (USA) (Beta) (2009-07-20).iso" size="4707319808" crc="d5aae4a9" md5="3d4e8632d621dfdc142c364e2f645822" sha1="6d10aa3438a655a4e348b82a4e78ff6120690bc5"/>
<description>Food Network - Cook or Be Cooked! (USA) (Beta) (2009-07-20)</description>
<rom name="Food Network - Cook or Be Cooked! (USA) (Beta) (2009-07-20).iso" size="4707319808" crc="d5aae4a9" md5="3d4e8632d621dfdc142c364e2f645822" sha1="6d10aa3438a655a4e348b82a4e78ff6120690bc5"/>
</game>
<game name="Fragile Dreams - Farewell Ruins of the Moon (USA) (Beta) (2009-12-10)">
<category>Preproduction</category>
@ -18810,10 +18810,10 @@
<description>Lara Croft Tomb Raider - Anniversary (USA) (Beta) (2007-09-20)</description>
<rom name="Lara Croft Tomb Raider - Anniversary (USA) (Beta) (2007-09-20).iso" size="4707319808" crc="61ef67d0" md5="0bf1f1a9941519c76c5f1e957c6118cd" sha1="3c897f945fa9ee1a681f07654a9484c23bdf63fc"/>
</game>
<game name="Trauma Center - New Blood (USA) (Beta) (2007-10-02)">
<game name="Trauma Center - New Blood (USA) (En,Ja) (Beta) (2007-10-02)">
<category>Preproduction</category>
<description>Trauma Center - New Blood (USA) (Beta) (2007-10-02)</description>
<rom name="Trauma Center - New Blood (USA) (Beta) (2007-10-02).iso" size="4707319808" crc="6f01fefc" md5="7c38e763310b81ac68df7eacd4f58a11" sha1="5fed4b32ba9d06d1fbc21d9771b111f2279d1a16"/>
<description>Trauma Center - New Blood (USA) (En,Ja) (Beta) (2007-10-02)</description>
<rom name="Trauma Center - New Blood (USA) (En,Ja) (Beta) (2007-10-02).iso" size="4707319808" crc="6f01fefc" md5="7c38e763310b81ac68df7eacd4f58a11" sha1="5fed4b32ba9d06d1fbc21d9771b111f2279d1a16"/>
</game>
<game name="Wacky World of Sports (USA) (Beta) (2009-03-25)">
<category>Preproduction</category>
@ -18860,4 +18860,29 @@
<description>Yu-Gi-Oh! 5D's - Duel Transer (USA) (Beta) (2010-07-15)</description>
<rom name="Yu-Gi-Oh! 5D's - Duel Transer (USA) (Beta) (2010-07-15).iso" size="4707319808" crc="fc013893" md5="572d467f9186b5f7203bfb2723ab2423" sha1="df639ddde0fdae538931443e18db80dface559fd"/>
</game>
<game name="Deca Sporta - Wiiro Jeulgineun Sports 10 Jongmok! (Korea)">
<category>Games</category>
<description>Deca Sporta - Wiiro Jeulgineun Sports 10 Jongmok! (Korea)</description>
<rom name="Deca Sporta - Wiiro Jeulgineun Sports 10 Jongmok! (Korea).iso" size="4699979776" crc="a5643872" md5="89b7a6b6952c0d1c2a4d3a45e91b2c0f" sha1="58454d77b159066845d3774853eeb87145c5e742"/>
</game>
<game name="Wii Fit (Japan) (Taikenban)">
<category>Demos</category>
<description>Wii Fit (Japan) (Taikenban)</description>
<rom name="Wii Fit (Japan) (Taikenban).iso" size="4699979776" crc="840639c5" md5="6428ece705a4f8e8528d72d3228ce02f" sha1="7edfe8a76c90fd8abc5d60c2643ef7791d080223"/>
</game>
<game name="FreeLoader for Nintendo Wii (USA) (Unl)">
<category>Applications</category>
<description>FreeLoader for Nintendo Wii (USA) (Unl)</description>
<rom name="FreeLoader for Nintendo Wii (USA) (Unl).iso" size="1459978240" crc="20261177" md5="5d7f821663974428804dae7df56f789d" sha1="70c05e6713e6f7acbc65929628b4ab35bb94eb7d"/>
</game>
<game name="Food Network - Cook or Be Cooked! (USA) (Demo)">
<category>Demos</category>
<description>Food Network - Cook or Be Cooked! (USA) (Demo)</description>
<rom name="Food Network - Cook or Be Cooked! (USA) (Demo).iso" size="4699979776" crc="16705e31" md5="6b8659f8ec82a16e9442df2c589cf601" sha1="3c623f447cb6cbf2c7abd9ce18f232cc2f00ff95"/>
</game>
<game name="Disney Sing It (Russia) (En,Ru)">
<category>Games</category>
<description>Disney Sing It (Russia) (En,Ru)</description>
<rom name="Disney Sing It (Russia) (En,Ru).iso" size="4699979776" crc="23644f21" md5="1d86f24d8f618c34e466a82f1f4a27e7" sha1="201c9ad4442830879b538e54c1fcc8e137fa7a92"/>
</game>
</datafile>

View File

@ -8,10 +8,10 @@ use std::{
use hex::deserialize as deserialize_hex;
use serde::Deserialize;
use zerocopy::AsBytes;
use zerocopy::{Immutable, IntoBytes, KnownLayout};
// Keep in sync with build.rs
#[derive(Clone, Debug, AsBytes)]
#[derive(Clone, Debug, IntoBytes, Immutable, KnownLayout)]
#[repr(C, align(4))]
struct Header {
entry_count: u32,
@ -19,7 +19,7 @@ struct Header {
}
// Keep in sync with redump.rs
#[derive(Clone, Debug, AsBytes)]
#[derive(Clone, Debug, IntoBytes, Immutable, KnownLayout)]
#[repr(C, align(4))]
struct GameEntry {
crc32: u32,
@ -91,7 +91,7 @@ fn main() {
for (entry, name) in &mut entries {
entry.string_table_offset = string_table_offset;
out.write_all(entry.as_bytes()).unwrap();
string_table_offset += name.as_bytes().len() as u32 + 4;
string_table_offset += name.len() as u32 + 4;
}
// Write string table

View File

@ -4,7 +4,7 @@
//! For now, this only adds a --version/-V option which causes early-exit.
use std::ffi::OsStr;
use argp::{parser::ParseGlobalOptions, EarlyExit, FromArgs, TopLevelCommand};
use argp::{EarlyExit, FromArgs, TopLevelCommand, parser::ParseGlobalOptions};
struct ArgsOrVersion<T>(T)
where T: FromArgs;

View File

@ -1,8 +1,13 @@
use std::path::PathBuf;
use std::{ffi::OsStr, path::PathBuf};
use argp::FromArgs;
use nod::{
common::Format,
read::{DiscOptions, PartitionEncryption},
write::FormatOptions,
};
use crate::util::{redump, shared::convert_and_verify};
use crate::util::{path_display, redump, shared::convert_and_verify};
#[derive(FromArgs, Debug)]
/// Converts a disc image to ISO.
@ -20,6 +25,15 @@ pub struct Args {
#[argp(option, short = 'd')]
/// path to DAT file(s) for verification (optional)
dat: Vec<PathBuf>,
#[argp(switch)]
/// decrypt Wii partition data
decrypt: bool,
#[argp(switch)]
/// encrypt Wii partition data
encrypt: bool,
#[argp(option, short = 'c')]
/// compression format and level (e.g. "zstd:19")
compress: Option<String>,
}
pub fn run(args: Args) -> nod::Result<()> {
@ -27,5 +41,48 @@ pub fn run(args: Args) -> nod::Result<()> {
println!("Loading dat files...");
redump::load_dats(args.dat.iter().map(PathBuf::as_ref))?;
}
convert_and_verify(&args.file, Some(&args.out), args.md5)
let options = DiscOptions {
partition_encryption: match (args.decrypt, args.encrypt) {
(true, false) => PartitionEncryption::ForceDecrypted,
(false, true) => PartitionEncryption::ForceEncrypted,
(false, false) => PartitionEncryption::Original,
(true, true) => {
return Err(nod::Error::Other(
"Both --decrypt and --encrypt specified".to_string(),
));
}
},
preloader_threads: 4,
};
let format = match args.out.extension() {
Some(ext)
if ext.eq_ignore_ascii_case(OsStr::new("iso"))
|| ext.eq_ignore_ascii_case(OsStr::new("gcm")) =>
{
Format::Iso
}
Some(ext) if ext.eq_ignore_ascii_case(OsStr::new("ciso")) => Format::Ciso,
Some(ext) if ext.eq_ignore_ascii_case(OsStr::new("gcz")) => Format::Gcz,
Some(ext) if ext.eq_ignore_ascii_case(OsStr::new("nfs")) => Format::Nfs,
Some(ext) if ext.eq_ignore_ascii_case(OsStr::new("rvz")) => Format::Rvz,
Some(ext) if ext.eq_ignore_ascii_case(OsStr::new("wbfs")) => Format::Wbfs,
Some(ext) if ext.eq_ignore_ascii_case(OsStr::new("wia")) => Format::Wia,
Some(ext) if ext.eq_ignore_ascii_case(OsStr::new("tgc")) => Format::Tgc,
Some(_) => {
return Err(nod::Error::Other(format!(
"Unknown file extension: {}",
path_display(&args.out)
)));
}
None => Format::Iso,
};
let mut compression = if let Some(compress) = args.compress {
compress.parse()?
} else {
format.default_compression()
};
compression.validate_level()?;
let format_options =
FormatOptions { format, compression, block_size: format.default_block_size() };
convert_and_verify(&args.file, Some(&args.out), args.md5, &options, &format_options)
}

View File

@ -1,24 +1,19 @@
use std::{
cmp::min,
collections::BTreeMap,
fmt,
io::Read,
path::{Path, PathBuf},
sync::{mpsc::sync_channel, Arc},
thread,
};
use argp::FromArgs;
use indicatif::{ProgressBar, ProgressState, ProgressStyle};
use nod::{Disc, OpenOptions, Result, ResultContext};
use zerocopy::FromZeroes;
use crate::util::{
digest::{digest_thread, DigestResult},
redump,
redump::GameResult,
use nod::{
Result, ResultContext,
read::{DiscOptions, DiscReader, PartitionEncryption},
write::{DiscWriter, FormatOptions, ProcessOptions},
};
use crate::util::{redump, redump::GameResult};
#[derive(FromArgs, Debug)]
/// Commands related to DAT files.
#[argp(subcommand, name = "dat")]
@ -165,11 +160,9 @@ struct DiscHashes {
}
fn load_disc(path: &Path, name: &str, full_verify: bool) -> Result<DiscHashes> {
let mut disc = Disc::new_with_options(path, &OpenOptions {
rebuild_encryption: true,
validate_hashes: false,
})?;
let disc_size = disc.disc_size();
let options =
DiscOptions { partition_encryption: PartitionEncryption::Original, preloader_threads: 4 };
let disc = DiscReader::new(path, &options)?;
if !full_verify {
let meta = disc.meta();
if let (Some(crc32), Some(sha1)) = (meta.crc32, meta.sha1) {
@ -177,7 +170,8 @@ fn load_disc(path: &Path, name: &str, full_verify: bool) -> Result<DiscHashes> {
}
}
let pb = ProgressBar::new(disc_size).with_message(format!("{}:", name));
let disc_writer = DiscWriter::new(disc, &FormatOptions::default())?;
let pb = ProgressBar::new(disc_writer.progress_bound()).with_message(format!("{}:", name));
pb.set_style(ProgressStyle::with_template("{msg} {spinner:.green} [{elapsed_precise}] [{wide_bar:.cyan/blue}] {bytes}/{total_bytes} ({bytes_per_sec}, {eta})")
.unwrap()
.with_key("eta", |state: &ProgressState, w: &mut dyn fmt::Write| {
@ -185,47 +179,22 @@ fn load_disc(path: &Path, name: &str, full_verify: bool) -> Result<DiscHashes> {
})
.progress_chars("#>-"));
const BUFFER_SIZE: usize = 1015808; // LCM(0x8000, 0x7C00)
let digest_threads = [digest_thread::<crc32fast::Hasher>(), digest_thread::<sha1::Sha1>()];
let (w_tx, w_rx) = sync_channel::<Arc<[u8]>>(1);
let w_thread = thread::spawn(move || {
let mut total_written = 0u64;
while let Ok(data) = w_rx.recv() {
let mut total_written = 0u64;
let finalization = disc_writer.process(
|data, pos, _| {
total_written += data.len() as u64;
pb.set_position(total_written);
}
pb.finish_and_clear();
});
pb.set_position(pos);
Ok(())
},
&ProcessOptions {
processor_threads: 12, // TODO
digest_crc32: true,
digest_md5: false,
digest_sha1: true,
digest_xxh64: false,
},
)?;
pb.finish();
let mut total_read = 0u64;
let mut buf = <u8>::new_box_slice_zeroed(BUFFER_SIZE);
while total_read < disc_size {
let read = min(BUFFER_SIZE as u64, disc_size - total_read) as usize;
disc.read_exact(&mut buf[..read]).with_context(|| {
format!("Reading {} bytes at disc offset {}", BUFFER_SIZE, total_read)
})?;
let arc = Arc::<[u8]>::from(&buf[..read]);
for (tx, _) in &digest_threads {
tx.send(arc.clone()).map_err(|_| "Sending data to hash thread")?;
}
w_tx.send(arc).map_err(|_| "Sending data to write thread")?;
total_read += read as u64;
}
drop(w_tx); // Close channel
w_thread.join().unwrap();
let mut crc32 = None;
let mut sha1 = None;
for (tx, handle) in digest_threads {
drop(tx); // Close channel
match handle.join().unwrap() {
DigestResult::Crc32(v) => crc32 = Some(v),
DigestResult::Sha1(v) => sha1 = Some(v),
_ => {}
}
}
Ok(DiscHashes { crc32: crc32.unwrap(), sha1: sha1.unwrap() })
Ok(DiscHashes { crc32: finalization.crc32.unwrap(), sha1: finalization.sha1.unwrap() })
}

View File

@ -1,22 +1,21 @@
use std::{
borrow::Cow,
fs,
fs::File,
io,
io::{BufWriter, Write},
io::{BufRead, Write},
path::{Path, PathBuf},
};
use argp::FromArgs;
use itertools::Itertools;
use nod::{
Disc, DiscHeader, Fst, Node, OpenOptions, PartitionBase, PartitionKind, PartitionMeta,
ResultContext,
common::PartitionKind,
disc::fst::{Fst, Node},
read::{DiscOptions, DiscReader, PartitionMeta, PartitionOptions, PartitionReader},
};
use size::{Base, Size};
use zerocopy::AsBytes;
use zerocopy::IntoBytes;
use crate::util::{display, has_extension};
use crate::util::{has_extension, path_display};
#[derive(FromArgs, Debug)]
/// Extracts a disc image.
@ -54,74 +53,57 @@ pub fn run(args: Args) -> nod::Result<()> {
} else {
output_dir = args.file.with_extension("");
}
let disc = Disc::new_with_options(&args.file, &OpenOptions {
rebuild_encryption: false,
validate_hashes: args.validate,
})?;
let disc =
DiscReader::new(&args.file, &DiscOptions { preloader_threads: 4, ..Default::default() })?;
let header = disc.header();
let is_wii = header.is_wii();
let options = PartitionOptions { validate_hashes: args.validate };
if let Some(partition) = args.partition {
if partition.eq_ignore_ascii_case("all") {
for info in disc.partitions() {
let mut out_dir = output_dir.clone();
out_dir.push(info.kind.dir_name().as_ref());
let mut partition = disc.open_partition(info.index)?;
extract_partition(header, partition.as_mut(), &out_dir, is_wii, args.quiet)?;
let mut partition = disc.open_partition(info.index, &options)?;
extract_partition(&disc, partition.as_mut(), &out_dir, is_wii, args.quiet)?;
}
} else if partition.eq_ignore_ascii_case("data") {
let mut partition = disc.open_partition_kind(PartitionKind::Data)?;
extract_partition(header, partition.as_mut(), &output_dir, is_wii, args.quiet)?;
let mut partition = disc.open_partition_kind(PartitionKind::Data, &options)?;
extract_partition(&disc, partition.as_mut(), &output_dir, is_wii, args.quiet)?;
} else if partition.eq_ignore_ascii_case("update") {
let mut partition = disc.open_partition_kind(PartitionKind::Update)?;
extract_partition(header, partition.as_mut(), &output_dir, is_wii, args.quiet)?;
let mut partition = disc.open_partition_kind(PartitionKind::Update, &options)?;
extract_partition(&disc, partition.as_mut(), &output_dir, is_wii, args.quiet)?;
} else if partition.eq_ignore_ascii_case("channel") {
let mut partition = disc.open_partition_kind(PartitionKind::Channel)?;
extract_partition(header, partition.as_mut(), &output_dir, is_wii, args.quiet)?;
let mut partition = disc.open_partition_kind(PartitionKind::Channel, &options)?;
extract_partition(&disc, partition.as_mut(), &output_dir, is_wii, args.quiet)?;
} else {
let idx = partition.parse::<usize>().map_err(|_| "Invalid partition index")?;
let mut partition = disc.open_partition(idx)?;
extract_partition(header, partition.as_mut(), &output_dir, is_wii, args.quiet)?;
let mut partition = disc.open_partition(idx, &options)?;
extract_partition(&disc, partition.as_mut(), &output_dir, is_wii, args.quiet)?;
}
} else {
let mut partition = disc.open_partition_kind(PartitionKind::Data)?;
extract_partition(header, partition.as_mut(), &output_dir, is_wii, args.quiet)?;
let mut partition = disc.open_partition_kind(PartitionKind::Data, &options)?;
extract_partition(&disc, partition.as_mut(), &output_dir, is_wii, args.quiet)?;
}
Ok(())
}
fn extract_partition(
header: &DiscHeader,
partition: &mut dyn PartitionBase,
disc: &DiscReader,
partition: &mut dyn PartitionReader,
out_dir: &Path,
is_wii: bool,
quiet: bool,
) -> nod::Result<()> {
let meta = partition.meta()?;
extract_sys_files(header, meta.as_ref(), out_dir, quiet)?;
extract_sys_files(disc, &meta, out_dir, quiet)?;
// Extract FST
let files_dir = out_dir.join("files");
fs::create_dir_all(&files_dir)
.with_context(|| format!("Creating directory {}", display(&files_dir)))?;
.with_context(|| format!("Creating directory {}", path_display(&files_dir)))?;
let fst = Fst::new(&meta.raw_fst)?;
let mut path_segments = Vec::<(Cow<str>, usize)>::new();
for (idx, node, name) in fst.iter() {
// Remove ended path segments
let mut new_size = 0;
for (_, end) in path_segments.iter() {
if *end == idx {
break;
}
new_size += 1;
}
path_segments.truncate(new_size);
// Add the new path segment
let end = if node.is_dir() { node.length() as usize } else { idx + 1 };
path_segments.push((name?, end));
let path = path_segments.iter().map(|(name, _)| name.as_ref()).join("/");
for (_, node, path) in fst.iter() {
if node.is_dir() {
fs::create_dir_all(files_dir.join(&path))
.with_context(|| format!("Creating directory {}", path))?;
@ -133,14 +115,14 @@ fn extract_partition(
}
fn extract_sys_files(
header: &DiscHeader,
disc: &DiscReader,
data: &PartitionMeta,
out_dir: &Path,
quiet: bool,
) -> nod::Result<()> {
let sys_dir = out_dir.join("sys");
fs::create_dir_all(&sys_dir)
.with_context(|| format!("Creating directory {}", display(&sys_dir)))?;
.with_context(|| format!("Creating directory {}", path_display(&sys_dir)))?;
extract_file(data.raw_boot.as_ref(), &sys_dir.join("boot.bin"), quiet)?;
extract_file(data.raw_bi2.as_ref(), &sys_dir.join("bi2.bin"), quiet)?;
extract_file(data.raw_apploader.as_ref(), &sys_dir.join("apploader.img"), quiet)?;
@ -148,23 +130,27 @@ fn extract_sys_files(
extract_file(data.raw_dol.as_ref(), &sys_dir.join("main.dol"), quiet)?;
// Wii files
if header.is_wii() {
let disc_header = disc.header();
if disc_header.is_wii() {
let disc_dir = out_dir.join("disc");
fs::create_dir_all(&disc_dir)
.with_context(|| format!("Creating directory {}", display(&disc_dir)))?;
extract_file(&header.as_bytes()[..0x100], &disc_dir.join("header.bin"), quiet)?;
}
if let Some(ticket) = data.raw_ticket.as_deref() {
extract_file(ticket, &out_dir.join("ticket.bin"), quiet)?;
}
if let Some(tmd) = data.raw_tmd.as_deref() {
extract_file(tmd, &out_dir.join("tmd.bin"), quiet)?;
}
if let Some(cert_chain) = data.raw_cert_chain.as_deref() {
extract_file(cert_chain, &out_dir.join("cert.bin"), quiet)?;
}
if let Some(h3_table) = data.raw_h3_table.as_deref() {
extract_file(h3_table, &out_dir.join("h3.bin"), quiet)?;
.with_context(|| format!("Creating directory {}", path_display(&disc_dir)))?;
extract_file(&disc_header.as_bytes()[..0x100], &disc_dir.join("header.bin"), quiet)?;
if let Some(region) = disc.region() {
extract_file(region, &disc_dir.join("region.bin"), quiet)?;
}
if let Some(ticket) = data.raw_ticket.as_deref() {
extract_file(ticket, &out_dir.join("ticket.bin"), quiet)?;
}
if let Some(tmd) = data.raw_tmd.as_deref() {
extract_file(tmd, &out_dir.join("tmd.bin"), quiet)?;
}
if let Some(cert_chain) = data.raw_cert_chain.as_deref() {
extract_file(cert_chain, &out_dir.join("cert.bin"), quiet)?;
}
if let Some(h3_table) = data.raw_h3_table.as_deref() {
extract_file(h3_table, &out_dir.join("h3.bin"), quiet)?;
}
}
Ok(())
}
@ -173,17 +159,18 @@ fn extract_file(bytes: &[u8], out_path: &Path, quiet: bool) -> nod::Result<()> {
if !quiet {
println!(
"Extracting {} (size: {})",
display(out_path),
path_display(out_path),
Size::from_bytes(bytes.len()).format().with_base(Base::Base10)
);
}
fs::write(out_path, bytes).with_context(|| format!("Writing file {}", display(out_path)))?;
fs::write(out_path, bytes)
.with_context(|| format!("Writing file {}", path_display(out_path)))?;
Ok(())
}
fn extract_node(
node: &Node,
partition: &mut dyn PartitionBase,
node: Node,
partition: &mut dyn PartitionReader,
base_path: &Path,
name: &str,
is_wii: bool,
@ -193,13 +180,12 @@ fn extract_node(
if !quiet {
println!(
"Extracting {} (size: {})",
display(&file_path),
path_display(&file_path),
Size::from_bytes(node.length()).format().with_base(Base::Base10)
);
}
let file = File::create(&file_path)
.with_context(|| format!("Creating file {}", display(&file_path)))?;
let mut w = BufWriter::with_capacity(partition.ideal_buffer_size(), file);
let mut file = File::create(&file_path)
.with_context(|| format!("Creating file {}", path_display(&file_path)))?;
let mut r = partition.open_file(node).with_context(|| {
format!(
"Opening file {} on disc for reading (offset {}, size {})",
@ -208,7 +194,18 @@ fn extract_node(
node.length()
)
})?;
io::copy(&mut r, &mut w).with_context(|| format!("Extracting file {}", display(&file_path)))?;
w.flush().with_context(|| format!("Flushing file {}", display(&file_path)))?;
loop {
let buf = r
.fill_buf()
.with_context(|| format!("Extracting file {}", path_display(&file_path)))?;
let len = buf.len();
if len == 0 {
break;
}
file.write_all(buf)
.with_context(|| format!("Writing file {}", path_display(&file_path)))?;
r.consume(len);
}
file.flush().with_context(|| format!("Flushing file {}", path_display(&file_path)))?;
Ok(())
}

747
nodtool/src/cmd/gen.rs Normal file
View File

@ -0,0 +1,747 @@
use std::{
fs,
fs::File,
io,
io::{Read, Seek, SeekFrom, Write},
path::{Path, PathBuf},
str::from_utf8,
time::Instant,
};
use argp::FromArgs;
use nod::{
ResultContext,
build::gc::{FileCallback, FileInfo, GCPartitionBuilder, PartitionOverrides},
common::PartitionKind,
disc::{
BB2_OFFSET, BI2_SIZE, BOOT_SIZE, BootHeader, DiscHeader, MINI_DVD_SIZE, SECTOR_SIZE,
fst::Fst,
},
read::{
DiscOptions, DiscReader, PartitionEncryption, PartitionMeta, PartitionOptions,
PartitionReader,
},
util::{buf_copy, lfg::LaggedFibonacci},
write::{DiscWriter, FormatOptions, ProcessOptions},
};
use tracing::{debug, error, info, warn};
use zerocopy::{FromBytes, FromZeros};
use crate::util::{array_ref, redump, shared::convert_and_verify};
#[derive(FromArgs, Debug)]
/// Generates a disc image.
#[argp(subcommand, name = "gen")]
pub struct Args {
#[argp(positional)]
/// Path to extracted disc image
dir: PathBuf,
#[argp(positional)]
/// Output ISO file
out: PathBuf,
}
#[derive(FromArgs, Debug)]
/// Test disc image generation.
#[argp(subcommand, name = "gentest")]
pub struct TestArgs {
#[argp(positional)]
/// Path to original disc images
inputs: Vec<PathBuf>,
#[argp(option, short = 'o')]
/// Output ISO file
output: Option<PathBuf>,
#[argp(option, short = 't')]
/// Output original ISO for comparison
test_output: Option<PathBuf>,
}
fn read_fixed<const N: usize>(path: &Path) -> nod::Result<Box<[u8; N]>> {
let mut buf = <[u8; N]>::new_box_zeroed()?;
File::open(path)
.with_context(|| format!("Failed to open {}", path.display()))?
.read_exact(buf.as_mut())
.with_context(|| format!("Failed to read {}", path.display()))?;
Ok(buf)
}
fn read_all(path: &Path) -> nod::Result<Box<[u8]>> {
let mut buf = Vec::new();
File::open(path)
.with_context(|| format!("Failed to open {}", path.display()))?
.read_to_end(&mut buf)
.with_context(|| format!("Failed to read {}", path.display()))?;
Ok(buf.into_boxed_slice())
}
struct FileWriteInfo {
name: String,
offset: u64,
length: u64,
}
fn file_size(path: &Path) -> nod::Result<u64> {
Ok(fs::metadata(path)
.with_context(|| format!("Failed to get metadata for {}", path.display()))?
.len())
}
fn check_file_size(path: &Path, expected: u64) -> nod::Result<()> {
let actual = file_size(path)?;
if actual != expected {
return Err(nod::Error::DiscFormat(format!(
"File {} has size {}, expected {}",
path.display(),
actual,
expected
)));
}
Ok(())
}
pub fn run(args: Args) -> nod::Result<()> {
let start = Instant::now();
// Validate file sizes
let boot_path = args.dir.join("sys/boot.bin");
check_file_size(&boot_path, BOOT_SIZE as u64)?;
let bi2_path = args.dir.join("sys/bi2.bin");
check_file_size(&bi2_path, BI2_SIZE as u64)?;
let apploader_path = args.dir.join("sys/apploader.img");
let apploader_size = file_size(&apploader_path)?;
let dol_path = args.dir.join("sys/main.dol");
let dol_size = file_size(&dol_path)?;
// Build metadata
let mut file_infos = Vec::new();
let boot_data: Box<[u8; BOOT_SIZE]> = read_fixed(&boot_path)?;
let header = DiscHeader::ref_from_bytes(array_ref![boot_data, 0, size_of::<DiscHeader>()])
.expect("Failed to read disc header");
let junk_id = get_junk_id(header);
let boot_header =
BootHeader::ref_from_bytes(array_ref![boot_data, BB2_OFFSET, size_of::<BootHeader>()])
.expect("Failed to read boot header");
let fst_path = args.dir.join("sys/fst.bin");
let fst_data = read_all(&fst_path)?;
let fst = Fst::new(&fst_data).expect("Failed to parse FST");
file_infos.push(FileWriteInfo {
name: "sys/boot.bin".to_string(),
offset: 0,
length: BOOT_SIZE as u64,
});
file_infos.push(FileWriteInfo {
name: "sys/bi2.bin".to_string(),
offset: BOOT_SIZE as u64,
length: BI2_SIZE as u64,
});
file_infos.push(FileWriteInfo {
name: "sys/apploader.img".to_string(),
offset: BOOT_SIZE as u64 + BI2_SIZE as u64,
length: apploader_size,
});
let fst_offset = boot_header.fst_offset(false);
let dol_offset = boot_header.dol_offset(false);
if dol_offset < fst_offset {
file_infos.push(FileWriteInfo {
name: "sys/main.dol".to_string(),
offset: dol_offset,
length: dol_size,
});
} else {
let mut found = false;
for (_, node, path) in fst.iter() {
if !node.is_file() {
continue;
}
let offset = node.offset(false);
if offset == dol_offset {
info!("Using DOL from FST: {}", path);
found = true;
}
}
if !found {
return Err(nod::Error::DiscFormat("DOL not found in FST".to_string()));
}
}
let fst_size = boot_header.fst_size(false);
file_infos.push(FileWriteInfo {
name: "sys/fst.bin".to_string(),
offset: fst_offset,
length: fst_size,
});
// Collect files
for (_, node, path) in fst.iter() {
let length = node.length() as u64;
if node.is_dir() {
continue;
}
let mut file_path = args.dir.join("files");
file_path.extend(path.split('/'));
let metadata = match fs::metadata(&file_path) {
Ok(meta) => meta,
Err(e) if e.kind() == io::ErrorKind::NotFound => {
warn!("File not found: {}", file_path.display());
continue;
}
Err(e) => {
return Err(e)
.context(format!("Failed to get metadata for {}", file_path.display()));
}
};
if metadata.is_dir() {
return Err(nod::Error::Other(format!("Path {} is a directory", file_path.display())));
}
if metadata.len() != length {
return Err(nod::Error::Other(format!(
"File {} has size {}, expected {}",
file_path.display(),
metadata.len(),
length
)));
}
let offset = node.offset(false);
file_infos.push(FileWriteInfo {
name: file_path.into_os_string().into_string().unwrap(),
offset,
length,
});
}
sort_files(&mut file_infos)?;
// Write files
let mut out = File::create(&args.out)
.with_context(|| format!("Failed to create {}", args.out.display()))?;
info!("Writing disc image to {} ({} files)", args.out.display(), file_infos.len());
let crc =
write_files(&mut out, &file_infos, header, boot_header, junk_id, |out, name| match name {
"sys/boot.bin" => out.write_all(boot_data.as_ref()),
"sys/fst.bin" => out.write_all(fst_data.as_ref()),
path => {
let mut in_file = File::open(args.dir.join(path))?;
io::copy(&mut in_file, out).map(|_| ())
}
})?;
out.flush().context("Failed to flush output file")?;
info!("Generated disc image in {:?} (CRC32: {:08X})", start.elapsed(), crc);
let redump_entry = redump::find_by_crc32(crc);
if let Some(entry) = &redump_entry {
println!("Redump: {}", entry.name);
} else {
println!("Redump: Not found ❌");
}
Ok(())
}
#[inline]
fn align_up<const N: u64>(n: u64) -> u64 { (n + N - 1) & !(N - 1) }
#[inline]
fn gcm_align(n: u64) -> u64 { (n + 31) & !3 }
/// Files can be located on the inner rim of the disc (closer to the center) or the outer rim
/// (closer to the edge). The inner rim is slower to read, so developers often configured certain
/// files to be located on the outer rim. This function attempts to find a gap in the file offsets
/// between the inner and outer rim, which we need to recreate junk data properly.
fn find_file_gap(file_infos: &[FileWriteInfo], fst_end: u64) -> Option<u64> {
let mut last_offset = 0;
for info in file_infos {
if last_offset > fst_end && info.offset > last_offset + SECTOR_SIZE as u64 {
debug!("Found file gap at {:X} -> {:X}", last_offset, info.offset);
return Some(last_offset);
}
last_offset = info.offset + info.length;
}
None
}
fn write_files<W>(
w: &mut W,
file_infos: &[FileWriteInfo],
header: &DiscHeader,
boot_header: &BootHeader,
junk_id: Option<[u8; 4]>,
mut callback: impl FnMut(&mut HashStream<&mut W>, &str) -> io::Result<()>,
) -> nod::Result<u32>
where
W: Write + ?Sized,
{
let fst_end = boot_header.fst_offset(false) + boot_header.fst_size(false);
let file_gap = find_file_gap(file_infos, fst_end);
let mut lfg = LaggedFibonacci::default();
let mut out = HashStream::new(w);
let mut last_end = 0;
for info in file_infos {
if let Some(junk_id) = junk_id {
let aligned_end = gcm_align(last_end);
if info.offset > aligned_end && last_end >= fst_end {
// Junk data is aligned to 4 bytes with a 28 byte padding (aka `(n + 31) & !3`)
// but a few cases don't have the 28 byte padding. Namely, the junk data after the
// FST, and the junk data in between the inner and outer rim files. This attempts to
// determine the correct alignment, but is not 100% accurate.
let junk_start =
if file_gap == Some(last_end) { align_up::<4>(last_end) } else { aligned_end };
debug!("Writing junk data at {:X} -> {:X}", junk_start, info.offset);
write_junk_data(
&mut lfg,
&mut out,
junk_id,
header.disc_num,
junk_start,
info.offset,
)?;
}
}
debug!(
"Writing file {} at {:X} -> {:X}",
info.name,
info.offset,
info.offset + info.length
);
out.seek(SeekFrom::Start(info.offset))
.with_context(|| format!("Seeking to offset {}", info.offset))?;
if info.length > 0 {
callback(&mut out, &info.name)
.with_context(|| format!("Failed to write file {}", info.name))?;
let cur = out.stream_position().context("Getting current position")?;
if cur != info.offset + info.length {
return Err(nod::Error::Other(format!(
"Wrote {} bytes, expected {}",
cur - info.offset,
info.length
)));
}
}
last_end = info.offset + info.length;
}
if let Some(junk_id) = junk_id {
let aligned_end = gcm_align(last_end);
if aligned_end < MINI_DVD_SIZE && aligned_end >= fst_end {
debug!("Writing junk data at {:X} -> {:X}", aligned_end, MINI_DVD_SIZE);
write_junk_data(
&mut lfg,
&mut out,
junk_id,
header.disc_num,
aligned_end,
MINI_DVD_SIZE,
)?;
last_end = MINI_DVD_SIZE;
}
}
out.write_zeroes(MINI_DVD_SIZE - last_end).context("Writing end of file")?;
out.flush().context("Flushing output")?;
Ok(out.finish())
}
fn write_junk_data<W>(
lfg: &mut LaggedFibonacci,
out: &mut W,
junk_id: [u8; 4],
disc_num: u8,
pos: u64,
end: u64,
) -> nod::Result<()>
where
W: Write + Seek + ?Sized,
{
out.seek(SeekFrom::Start(pos)).with_context(|| format!("Seeking to offset {}", pos))?;
lfg.write_sector_chunked(out, end - pos, junk_id, disc_num, pos)
.with_context(|| format!("Failed to write junk data at offset {}", pos))?;
Ok(())
}
pub fn run_test(args: TestArgs) -> nod::Result<()> {
let mut failed = vec![];
for input in args.inputs {
match in_memory_test(&input, args.output.as_deref(), args.test_output.as_deref()) {
Ok(()) => {}
Err(e) => {
error!("Failed to generate disc image: {:?}", e);
failed.push((input, e));
}
}
}
if !failed.is_empty() {
error!("Failed to generate disc images:");
for (input, e) in failed {
error!(" {}: {:?}", input.display(), e);
}
std::process::exit(1);
}
Ok(())
}
/// Some games (mainly beta and sample discs) have junk data that doesn't match the game ID. This
/// function returns the correct game ID to use, if an override is needed.
fn get_override_junk_id(header: &DiscHeader) -> Option<[u8; 4]> {
match &header.game_id {
// Dairantou Smash Brothers DX (Japan) (Taikenban)
b"DALJ01" if header.disc_num == 0 && header.disc_version == 0 => Some(*b"DPIJ"),
// 2002 FIFA World Cup (Japan) (Jitsuen-you Sample)
b"DFIJ13" if header.disc_num == 0 && header.disc_version == 0 => Some(*b"GFIJ"),
// Disney's Magical Park (Japan) (Jitsuen-you Sample)
b"DMTJ18" if header.disc_num == 0 && header.disc_version == 0 => Some(*b"GMTJ"),
// Star Wars - Rogue Squadron II (Japan) (Jitsuen-you Sample)
b"DSWJ13" if header.disc_num == 0 && header.disc_version == 0 => Some(*b"GSWJ"),
// Homeland (Japan) (Rev 1) [T-En by DOL-Translations v20230606] [i]
b"GHEE91" if header.disc_num == 0 && header.disc_version == 1 => Some(*b"GHEJ"),
// Kururin Squash! (Japan) [T-En by DOL-Translations v2.0.0]
b"GKQE01" if header.disc_num == 0 && header.disc_version == 0 => Some(*b"GKQJ"),
// Lupin III - Lost Treasure Under the Sea (Japan) (Disc 1) [T-En by DOL-Translations v0.5.0] [i] [n]
b"GL3EE8" if header.disc_num == 0 && header.disc_version == 0 => Some(*b"GL3J"),
// Lupin III - Lost Treasure Under the Sea (Japan) (Disc 2) [T-En by DOL-Translations v0.5.0] [i] [n]
b"GL3EE8" if header.disc_num == 1 && header.disc_version == 0 => Some(*b"GL3J"),
// Taxi 3 - The Game (France) [T-En by DOL-Translations v20230801] [n]
b"GXQP41" if header.disc_num == 0 && header.disc_version == 0 => Some(*b"GXQF"),
// Donkey Konga 3 - Tabehoudai! Haru Mogitate 50-kyoku (Japan) [T-En by DOL-Translations v0.1.1] [i]
b"GY3E01" if header.disc_num == 0 && header.disc_version == 0 => Some(*b"GY3J"),
// Need for Speed - Underground (Europe) (Alt)
b"PZHP69" if header.disc_num == 0 && header.disc_version == 0 => Some(*b"GNDP"),
_ => None,
}
}
fn get_junk_id(header: &DiscHeader) -> Option<[u8; 4]> {
Some(match get_override_junk_id(header) {
Some(id) => {
info!("Using override junk ID: {:X?}", from_utf8(&id).unwrap());
id
}
None => *array_ref!(header.game_id, 0, 4),
})
}
fn sort_files(files: &mut [FileWriteInfo]) -> nod::Result<()> {
files.sort_unstable_by_key(|info| (info.offset, info.length));
for i in 1..files.len() {
let prev = &files[i - 1];
let cur = &files[i];
if cur.offset < prev.offset + prev.length {
return Err(nod::Error::Other(format!(
"File {} ({:#X}-{:#X}) overlaps with {} ({:#X}-{:#X})",
cur.name,
cur.offset,
cur.offset + cur.length,
prev.name,
prev.offset,
prev.offset + prev.length
)));
}
}
Ok(())
}
fn in_memory_test(
path: &Path,
output: Option<&Path>,
test_output: Option<&Path>,
) -> nod::Result<()> {
let start = Instant::now();
info!("Opening disc image '{}'", path.display());
let disc = DiscReader::new(path, &DiscOptions::default())?;
info!(
"Opened disc image '{}' (Disc {}, Revision {})",
disc.header().game_title_str(),
disc.header().disc_num + 1,
disc.header().disc_version
);
let Some(orig_crc32) = disc.meta().crc32 else {
return Err(nod::Error::Other("CRC32 not found in disc metadata".to_string()));
};
let mut partition =
disc.open_partition_kind(PartitionKind::Data, &PartitionOptions::default())?;
let meta = partition.meta()?;
// Build metadata
let mut file_infos = Vec::new();
let header = meta.disc_header();
let junk_id = get_junk_id(header);
let boot_header = meta.boot_header();
let fst = meta.fst()?;
file_infos.push(FileWriteInfo {
name: "sys/boot.bin".to_string(),
offset: 0,
length: BOOT_SIZE as u64,
});
file_infos.push(FileWriteInfo {
name: "sys/bi2.bin".to_string(),
offset: BOOT_SIZE as u64,
length: BI2_SIZE as u64,
});
file_infos.push(FileWriteInfo {
name: "sys/apploader.img".to_string(),
offset: BOOT_SIZE as u64 + BI2_SIZE as u64,
length: meta.raw_apploader.len() as u64,
});
let fst_offset = boot_header.fst_offset(false);
let dol_offset = boot_header.dol_offset(false);
if dol_offset < fst_offset {
file_infos.push(FileWriteInfo {
name: "sys/main.dol".to_string(),
offset: dol_offset,
length: meta.raw_dol.len() as u64,
});
} else {
let mut found = false;
for (_, node, name) in fst.iter() {
if !node.is_file() {
continue;
}
let offset = node.offset(false);
if offset == dol_offset {
info!("Using DOL from FST: {}", name);
found = true;
}
}
if !found {
return Err(nod::Error::Other("DOL not found in FST".to_string()));
}
}
let fst_size = boot_header.fst_size(false);
file_infos.push(FileWriteInfo {
name: "sys/fst.bin".to_string(),
offset: fst_offset,
length: fst_size,
});
// Collect files
let mut builder = GCPartitionBuilder::new(false, PartitionOverrides::default());
for (idx, node, path) in fst.iter() {
let offset = node.offset(false);
let length = node.length() as u64;
if node.is_dir() {
if length as usize == idx + 1 {
println!("Empty directory: {}", path);
}
continue;
}
if let Some(junk_id) = junk_id {
// Some games have junk data in place of files that were removed from the disc layout.
// This is a naive check to skip these files in our disc layout so that the junk data
// alignment is correct. This misses some cases where the junk data starts in the middle
// of a file, but handling those cases would require a more complex solution.
if length > 4
&& check_junk_data(partition.as_mut(), offset, length, junk_id, header.disc_num)?
{
warn!("Skipping junk data file: {} (size {})", path, length);
builder.add_junk_file(path);
continue;
}
}
builder.add_file(FileInfo {
name: path,
size: length,
offset: Some(offset),
alignment: None,
})?;
}
// Write files
info!("Writing disc image with {} files", file_infos.len());
for file in &file_infos {
builder.add_file(FileInfo {
name: file.name.clone(),
size: file.length,
offset: Some(file.offset),
alignment: None,
})?;
}
let writer = builder.build(|out: &mut dyn Write, name: &str| match name {
"sys/boot.bin" => out.write_all(meta.raw_boot.as_ref()),
"sys/bi2.bin" => out.write_all(meta.raw_bi2.as_ref()),
"sys/fst.bin" => out.write_all(meta.raw_fst.as_ref()),
"sys/apploader.img" => out.write_all(meta.raw_apploader.as_ref()),
"sys/main.dol" => out.write_all(meta.raw_dol.as_ref()),
path => {
let Some((_, node)) = fst.find(path) else {
return Err(io::Error::new(
io::ErrorKind::NotFound,
format!("File not found: {}", path),
));
};
let mut file = partition.open_file(node)?;
buf_copy(&mut file, out)?;
Ok(())
}
})?;
let disc_stream = writer.into_cloneable_stream(PartitionFileReader { partition, meta })?;
let disc_reader = DiscReader::new_stream(disc_stream, &DiscOptions::default())?;
let disc_writer = DiscWriter::new(disc_reader, &FormatOptions::default())?;
let process_options = ProcessOptions { digest_crc32: true, ..Default::default() };
let finalization = if let Some(output) = output {
let mut out = File::create(output)
.with_context(|| format!("Failed to create {}", output.display()))?;
let finalization =
disc_writer.process(|data, _, _| out.write_all(data.as_ref()), &process_options)?;
out.flush().context("Failed to flush output file")?;
finalization
} else {
disc_writer.process(|_, _, _| Ok(()), &process_options)?
};
let crc = finalization.crc32.unwrap();
info!("Generated disc image in {:?} (CRC32: {:08X})", start.elapsed(), crc);
if crc != orig_crc32 {
if let Some(test_output) = test_output {
let open_options = DiscOptions {
partition_encryption: PartitionEncryption::Original,
preloader_threads: 4,
};
convert_and_verify(
path,
Some(test_output),
false,
&open_options,
&FormatOptions::default(),
)?;
}
return Err(nod::Error::Other(format!(
"CRC32 mismatch: {:08X} != {:08X}",
crc, orig_crc32
)));
}
Ok(())
}
#[derive(Clone)]
struct PartitionFileReader {
partition: Box<dyn PartitionReader>,
meta: PartitionMeta,
}
impl FileCallback for PartitionFileReader {
fn read_file(&mut self, out: &mut [u8], name: &str, offset: u64) -> io::Result<()> {
let data: &[u8] = match name {
"sys/boot.bin" => self.meta.raw_boot.as_ref(),
"sys/bi2.bin" => self.meta.raw_bi2.as_ref(),
"sys/fst.bin" => self.meta.raw_fst.as_ref(),
"sys/apploader.img" => self.meta.raw_apploader.as_ref(),
"sys/main.dol" => self.meta.raw_dol.as_ref(),
path => {
let fst = self.meta.fst().map_err(io::Error::other)?;
let Some((_, node)) = fst.find(path) else {
return Err(io::Error::new(
io::ErrorKind::NotFound,
format!("File not found: {}", path),
));
};
let mut file = self.partition.open_file(node)?;
file.seek(SeekFrom::Start(offset))?;
file.read_exact(out)?;
return Ok(());
}
};
let offset = offset as usize;
let len = out.len().min(data.len() - offset);
out[..len].copy_from_slice(&data[offset..offset + len]);
Ok(())
}
}
/// Some disc files still exist in the FST, but were removed from the disc layout. These files had
/// junk data written in their place, since the disc creator did not know about them. To match the
/// original disc, we need to check for these files and remove them from our disc layout as well.
/// This ensures that the junk data alignment is correct.
fn check_junk_data(
partition: &mut dyn PartitionReader,
offset: u64,
len: u64,
junk_id: [u8; 4],
disc_num: u8,
) -> nod::Result<bool> {
if len == 0 {
return Ok(false);
}
partition
.seek(SeekFrom::Start(offset))
.with_context(|| format!("Seeking to offset {}", offset))?;
let mut lfg = LaggedFibonacci::default();
let mut pos = offset;
let mut remaining = len;
while remaining > 0 {
let file_buf = partition
.fill_buf()
.with_context(|| format!("Failed to read disc file at offset {}", offset))?;
let read_len = (file_buf.len() as u64).min(remaining) as usize;
if lfg.check_sector_chunked(&file_buf[..read_len], junk_id, disc_num, pos) != read_len {
return Ok(false);
}
pos += read_len as u64;
remaining -= read_len as u64;
partition.consume(read_len);
}
Ok(true)
}
pub struct HashStream<W> {
inner: W,
hasher: crc32fast::Hasher,
position: u64,
}
impl<W> HashStream<W> {
pub fn new(inner: W) -> Self { Self { inner, hasher: Default::default(), position: 0 } }
pub fn finish(self) -> u32 { self.hasher.finalize() }
}
impl<W> HashStream<W>
where W: Write
{
pub fn write_zeroes(&mut self, mut len: u64) -> io::Result<()> {
while len > 0 {
let write_len = len.min(SECTOR_SIZE as u64) as usize;
self.write_all(&ZERO_SECTOR[..write_len])?;
len -= write_len as u64;
}
Ok(())
}
}
impl<W> Write for HashStream<W>
where W: Write
{
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.hasher.update(buf);
self.position += buf.len() as u64;
self.inner.write(buf)
}
fn flush(&mut self) -> io::Result<()> { self.inner.flush() }
}
static ZERO_SECTOR: [u8; SECTOR_SIZE] = [0; SECTOR_SIZE];
impl<W> Seek for HashStream<W>
where W: Write
{
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
let new_position = match pos {
SeekFrom::Start(v) => v,
SeekFrom::Current(v) => self.position.saturating_add_signed(v),
SeekFrom::End(_) => {
return Err(io::Error::new(
io::ErrorKind::Unsupported,
"HashStream: SeekFrom::End is not supported".to_string(),
));
}
};
if new_position < self.position {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"HashStream: Cannot seek backwards".to_string(),
));
}
self.write_zeroes(new_position - self.position)?;
Ok(new_position)
}
fn stream_position(&mut self) -> io::Result<u64> { Ok(self.position) }
}

View File

@ -1,10 +1,14 @@
use std::path::{Path, PathBuf};
use argp::FromArgs;
use nod::{Disc, OpenOptions, SECTOR_SIZE};
use nod::{
disc::SECTOR_SIZE,
read::{DiscOptions, DiscReader, PartitionOptions},
};
use size::Size;
use tracing::info;
use crate::util::{display, shared::print_header};
use crate::util::{path_display, shared::print_header};
#[derive(FromArgs, Debug)]
/// Displays information about disc images.
@ -23,17 +27,17 @@ pub fn run(args: Args) -> nod::Result<()> {
}
fn info_file(path: &Path) -> nod::Result<()> {
log::info!("Loading {}", display(path));
let disc = Disc::new_with_options(path, &OpenOptions {
rebuild_encryption: false,
validate_hashes: false,
})?;
info!("Loading {}", path_display(path));
let disc = DiscReader::new(path, &DiscOptions::default())?;
let header = disc.header();
let meta = disc.meta();
print_header(header, &meta);
if header.is_wii() {
for (idx, info) in disc.partitions().iter().enumerate() {
let mut partition = disc.open_partition(idx, &PartitionOptions::default())?;
let meta = partition.meta()?;
println!();
println!("Partition {}", idx);
println!("\tType: {}", info.kind);
@ -41,62 +45,66 @@ fn info_file(path: &Path) -> nod::Result<()> {
println!("\tStart sector: {} (offset {:#X})", info.start_sector, offset);
let data_size =
(info.data_end_sector - info.data_start_sector) as u64 * SECTOR_SIZE as u64;
if info.has_encryption {
println!(
"\tEncrypted data offset / size: {:#X} / {:#X} ({})",
info.data_start_sector as u64 * SECTOR_SIZE as u64,
data_size,
Size::from_bytes(data_size)
);
} else {
println!(
"\tDecrypted data offset / size: {:#X} / {:#X} ({})",
offset,
data_size,
Size::from_bytes(data_size)
);
}
println!(
"\tData offset / size: {:#X} / {:#X} ({})",
info.data_start_sector as u64 * SECTOR_SIZE as u64,
data_size,
Size::from_bytes(data_size)
);
println!(
"\tTMD offset / size: {:#X} / {:#X}",
"\tTMD offset / size: {:#X} / {:#X}",
offset + info.header.tmd_off(),
info.header.tmd_size()
);
if let Some(content_metadata) = meta.content_metadata() {
for content in content_metadata {
println!(
"\t-> Content {:08X} size: {:#X} ({})",
content.content_index.get(),
content.size.get(),
Size::from_bytes(content.size.get()),
);
}
}
println!(
"\tCert offset / size: {:#X} / {:#X}",
"\tCert chain offset / size: {:#X} / {:#X}",
offset + info.header.cert_chain_off(),
info.header.cert_chain_size()
);
println!(
"\tH3 offset / size: {:#X} / {:#X}",
"\tH3 table offset / size: {:#X} / {:#X}",
offset + info.header.h3_table_off(),
info.header.h3_table_size()
);
let mut partition = disc.open_partition(idx)?;
let meta = partition.meta()?;
let tmd = meta.tmd_header();
let title_id_str = if let Some(tmd) = tmd {
format!(
"{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}",
tmd.title_id[0],
tmd.title_id[1],
tmd.title_id[2],
tmd.title_id[3],
tmd.title_id[4],
tmd.title_id[5],
tmd.title_id[6],
tmd.title_id[7]
)
hex::encode_upper(tmd.title_id)
} else {
"N/A".to_string()
};
println!("\tTitle: {}", info.disc_header.game_title_str());
println!("\tGame ID: {} ({})", info.disc_header.game_id_str(), title_id_str);
let part_disc_header = info.disc_header();
println!("\tTitle: {}", part_disc_header.game_title_str());
println!("\tGame ID: {} ({})", part_disc_header.game_id_str(), title_id_str);
println!(
"\tDisc {}, Revision {}",
info.disc_header.disc_num + 1,
info.disc_header.disc_version
part_disc_header.disc_num + 1,
part_disc_header.disc_version
);
}
} else if header.is_gamecube() {
// TODO
} else {
println!(
"Invalid GC/Wii magic: {:#010X}/{:#010X}",
header.gcn_magic.get(),
header.wii_magic.get()
);
println!("Invalid GC/Wii magic: {:#x?}/{:#x?}", header.gcn_magic, header.wii_magic);
}
println!();
Ok(())

View File

@ -1,5 +1,7 @@
pub mod convert;
pub mod dat;
pub mod extract;
// [WIP] Disc image building is incomplete and not yet exposed.
// pub mod r#gen;
pub mod info;
pub mod verify;

View File

@ -1,6 +1,10 @@
use std::path::PathBuf;
use argp::FromArgs;
use nod::{
read::{DiscOptions, PartitionEncryption},
write::FormatOptions,
};
use crate::util::{redump, shared::convert_and_verify};
@ -17,6 +21,12 @@ pub struct Args {
#[argp(option, short = 'd')]
/// path to DAT file(s) for verification (optional)
dat: Vec<PathBuf>,
#[argp(switch)]
/// decrypt Wii partition data
decrypt: bool,
#[argp(switch)]
/// encrypt Wii partition data
encrypt: bool,
}
pub fn run(args: Args) -> nod::Result<()> {
@ -24,8 +34,23 @@ pub fn run(args: Args) -> nod::Result<()> {
println!("Loading dat files...");
redump::load_dats(args.dat.iter().map(PathBuf::as_ref))?;
}
let cpus = num_cpus::get();
let options = DiscOptions {
partition_encryption: match (args.decrypt, args.encrypt) {
(true, false) => PartitionEncryption::ForceDecrypted,
(false, true) => PartitionEncryption::ForceEncrypted,
(false, false) => PartitionEncryption::Original,
(true, true) => {
return Err(nod::Error::Other(
"Both --decrypt and --encrypt specified".to_string(),
));
}
},
preloader_threads: 4.min(cpus),
};
let format_options = FormatOptions::default();
for file in &args.file {
convert_and_verify(file, None, args.md5)?;
convert_and_verify(file, None, args.md5, &options, &format_options)?;
println!();
}
Ok(())

View File

@ -3,22 +3,31 @@ use argp::FromArgs;
pub mod cmd;
pub(crate) mod util;
// Re-export nod
pub use nod;
#[derive(FromArgs, Debug)]
#[argp(subcommand)]
pub enum SubCommand {
Dat(cmd::dat::Args),
Info(cmd::info::Args),
Extract(cmd::extract::Args),
Convert(cmd::convert::Args),
Dat(cmd::dat::Args),
Extract(cmd::extract::Args),
// [WIP] Disc image building is incomplete and not yet exposed.
// Gen(cmd::gen::Args),
// GenTest(cmd::r#gen::TestArgs),
Info(cmd::info::Args),
Verify(cmd::verify::Args),
}
pub fn run(command: SubCommand) -> nod::Result<()> {
match command {
SubCommand::Dat(c_args) => cmd::dat::run(c_args),
SubCommand::Info(c_args) => cmd::info::run(c_args),
SubCommand::Convert(c_args) => cmd::convert::run(c_args),
SubCommand::Dat(c_args) => cmd::dat::run(c_args),
SubCommand::Extract(c_args) => cmd::extract::run(c_args),
// [WIP] Disc image building is incomplete and not yet exposed.
// SubCommand::Gen(c_args) => cmd::gen::run(c_args),
// SubCommand::GenTest(c_args) => cmd::r#gen::run_test(c_args),
SubCommand::Info(c_args) => cmd::info::run(c_args),
SubCommand::Verify(c_args) => cmd::verify::run(c_args),
}
}

View File

@ -1,13 +1,17 @@
mod argp_version;
// musl's allocator is very slow, so use mimalloc when targeting musl.
// Otherwise, use the system allocator to avoid extra code size.
#[cfg(target_env = "musl")]
#[global_allocator]
static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
use std::{env, error::Error, ffi::OsStr, fmt, path::PathBuf, str::FromStr};
use argp::{FromArgValue, FromArgs};
use enable_ansi_support::enable_ansi_support;
use nodtool::{run, SubCommand};
use nodtool::{SubCommand, run};
use supports_color::Stream;
use tracing::level_filters::LevelFilter;
use tracing_subscriber::EnvFilter;
#[derive(FromArgs, Debug)]
/// Tool for reading GameCube and Wii disc images.
@ -89,31 +93,47 @@ fn main() {
// Try to enable ANSI support on Windows.
let _ = enable_ansi_support();
// Disable isatty check for supports-color. (e.g. when used with ninja)
env::set_var("IGNORE_IS_TERMINAL", "1");
unsafe { env::set_var("IGNORE_IS_TERMINAL", "1") };
supports_color::on(Stream::Stdout).is_some_and(|c| c.has_basic)
};
let format =
tracing_subscriber::fmt::format().with_ansi(use_colors).with_target(false).without_time();
let builder = tracing_subscriber::fmt().event_format(format);
if let Some(level) = args.log_level {
builder
.with_max_level(match level {
LogLevel::Error => LevelFilter::ERROR,
LogLevel::Warn => LevelFilter::WARN,
LogLevel::Info => LevelFilter::INFO,
LogLevel::Debug => LevelFilter::DEBUG,
LogLevel::Trace => LevelFilter::TRACE,
})
.init();
} else {
builder
.with_env_filter(
EnvFilter::builder()
.with_default_directive(LevelFilter::INFO.into())
.from_env_lossy(),
)
.init();
#[cfg(feature = "tracy")]
{
use tracing_subscriber::layer::SubscriberExt;
tracing::subscriber::set_global_default(
tracing_subscriber::registry().with(tracing_tracy::TracyLayer::default()),
)
.expect("setup tracy layer");
}
#[cfg(not(feature = "tracy"))]
{
use tracing::level_filters::LevelFilter;
use tracing_subscriber::EnvFilter;
let format = tracing_subscriber::fmt::format()
.with_ansi(use_colors)
.with_target(false)
.without_time();
let builder = tracing_subscriber::fmt().event_format(format);
if let Some(level) = args.log_level {
builder
.with_max_level(match level {
LogLevel::Error => LevelFilter::ERROR,
LogLevel::Warn => LevelFilter::WARN,
LogLevel::Info => LevelFilter::INFO,
LogLevel::Debug => LevelFilter::DEBUG,
LogLevel::Trace => LevelFilter::TRACE,
})
.init();
} else {
builder
.with_env_filter(
EnvFilter::builder()
.with_default_directive(LevelFilter::INFO.into())
.from_env_lossy(),
)
.init();
}
}
let mut result = Ok(());
@ -125,8 +145,10 @@ fn main() {
result = result.and_then(|_| run(args.command));
if let Err(e) = result {
eprintln!("Failed: {}", e);
if let Some(source) = e.source() {
eprintln!("Caused by: {}", source);
let mut source = e.source();
while let Some(e) = source {
eprintln!("Caused by: {}", e);
source = e.source();
}
std::process::exit(1);
}

View File

@ -1,29 +1,4 @@
use std::{
fmt,
sync::{
mpsc::{sync_channel, SyncSender},
Arc,
},
thread,
thread::JoinHandle,
};
use digest::{Digest, Output};
pub type DigestThread = (SyncSender<Arc<[u8]>>, JoinHandle<DigestResult>);
pub fn digest_thread<H>() -> DigestThread
where H: Hasher + Send + 'static {
let (tx, rx) = sync_channel::<Arc<[u8]>>(1);
let handle = thread::spawn(move || {
let mut hasher = H::new();
while let Ok(data) = rx.recv() {
hasher.update(data.as_ref());
}
hasher.finalize()
});
(tx, handle)
}
use std::fmt;
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum DigestResult {
@ -48,49 +23,9 @@ impl fmt::Display for DigestResult {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
DigestResult::Crc32(crc) => write!(f, "{:08x}", crc),
DigestResult::Md5(md5) => write!(f, "{:032x}", <Output<md5::Md5>>::from(*md5)),
DigestResult::Sha1(sha1) => write!(f, "{:040x}", <Output<sha1::Sha1>>::from(*sha1)),
DigestResult::Md5(md5) => write!(f, "{}", hex::encode(md5)),
DigestResult::Sha1(sha1) => write!(f, "{}", hex::encode(sha1)),
DigestResult::Xxh64(xxh64) => write!(f, "{:016x}", xxh64),
}
}
}
pub trait Hasher {
fn new() -> Self;
fn finalize(self) -> DigestResult;
fn update(&mut self, data: &[u8]);
}
impl Hasher for md5::Md5 {
fn new() -> Self { Digest::new() }
fn finalize(self) -> DigestResult { DigestResult::Md5(Digest::finalize(self).into()) }
fn update(&mut self, data: &[u8]) { Digest::update(self, data) }
}
impl Hasher for sha1::Sha1 {
fn new() -> Self { Digest::new() }
fn finalize(self) -> DigestResult { DigestResult::Sha1(Digest::finalize(self).into()) }
fn update(&mut self, data: &[u8]) { Digest::update(self, data) }
}
impl Hasher for crc32fast::Hasher {
fn new() -> Self { crc32fast::Hasher::new() }
fn finalize(self) -> DigestResult { DigestResult::Crc32(crc32fast::Hasher::finalize(self)) }
fn update(&mut self, data: &[u8]) { crc32fast::Hasher::update(self, data) }
}
impl Hasher for xxhash_rust::xxh64::Xxh64 {
fn new() -> Self { xxhash_rust::xxh64::Xxh64::new(0) }
fn finalize(self) -> DigestResult {
DigestResult::Xxh64(xxhash_rust::xxh64::Xxh64::digest(&self))
}
fn update(&mut self, data: &[u8]) { xxhash_rust::xxh64::Xxh64::update(self, data) }
}

View File

@ -5,21 +5,21 @@ pub mod shared;
use std::{
fmt,
fmt::Write,
path::{Path, MAIN_SEPARATOR},
path::{MAIN_SEPARATOR, Path},
};
pub fn display(path: &Path) -> PathDisplay { PathDisplay { path } }
pub fn path_display(path: &Path) -> PathDisplay<'_> { PathDisplay { path } }
pub struct PathDisplay<'a> {
path: &'a Path,
}
impl<'a> fmt::Display for PathDisplay<'a> {
impl fmt::Display for PathDisplay<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut first = true;
for segment in self.path.iter() {
let segment_str = segment.to_string_lossy();
if segment_str == "." {
if segment_str == "/" || segment_str == "." {
continue;
}
if first {
@ -39,3 +39,15 @@ pub fn has_extension(filename: &Path, extension: &str) -> bool {
None => false,
}
}
/// Creates a fixed-size array reference from a slice.
macro_rules! array_ref {
($slice:expr, $offset:expr, $size:expr) => {{
#[inline(always)]
fn to_array<T>(slice: &[T]) -> &[T; $size] {
unsafe { &*(slice.as_ptr() as *const [_; $size]) }
}
to_array(&$slice[$offset..$offset + $size])
}};
}
pub(crate) use array_ref;

View File

@ -8,9 +8,11 @@ use std::{
};
use hex::deserialize as deserialize_hex;
use nod::{array_ref, Result};
use nod::Result;
use serde::Deserialize;
use zerocopy::{AsBytes, FromBytes, FromZeroes};
use zerocopy::{FromBytes, FromZeros, Immutable, IntoBytes, KnownLayout};
use crate::util::array_ref;
#[derive(Clone, Debug)]
pub struct GameResult<'a> {
@ -33,18 +35,15 @@ impl<'a> Iterator for EntryIter<'a> {
type Item = GameResult<'a>;
fn next(&mut self) -> Option<Self::Item> {
let header: &Header = Header::ref_from_prefix(self.data).unwrap();
let (header, remaining) = Header::ref_from_prefix(self.data).ok()?;
assert_eq!(header.entry_size as usize, size_of::<GameEntry>());
if self.index >= header.entry_count as usize {
return None;
}
let entries_size = header.entry_count as usize * size_of::<GameEntry>();
let entries: &[GameEntry] = GameEntry::slice_from(
&self.data[size_of::<Header>()..size_of::<Header>() + entries_size],
)
.unwrap();
let string_table: &[u8] = &self.data[size_of::<Header>() + entries_size..];
let entries = <[GameEntry]>::ref_from_bytes(&remaining[..entries_size]).ok()?;
let string_table = &self.data[size_of::<Header>() + entries_size..];
let entry = &entries[self.index];
let offset = entry.string_table_offset as usize;
@ -57,14 +56,12 @@ impl<'a> Iterator for EntryIter<'a> {
pub fn find_by_crc32(crc32: u32) -> Option<GameResult<'static>> {
let data = loaded_data();
let header: &Header = Header::ref_from_prefix(data).unwrap();
let (header, remaining) = Header::ref_from_prefix(data).ok()?;
assert_eq!(header.entry_size as usize, size_of::<GameEntry>());
let entries_size = header.entry_count as usize * size_of::<GameEntry>();
let entries: &[GameEntry] =
GameEntry::slice_from(&data[size_of::<Header>()..size_of::<Header>() + entries_size])
.unwrap();
let string_table: &[u8] = &data[size_of::<Header>() + entries_size..];
let (entries_buf, string_table) = remaining.split_at(entries_size);
let entries = <[GameEntry]>::ref_from_bytes(entries_buf).ok()?;
// Binary search by CRC32
let index = entries.binary_search_by_key(&crc32, |entry| entry.crc32).ok()?;
@ -84,7 +81,7 @@ fn loaded_data() -> &'static [u8] {
LOADED
.get_or_init(|| {
let size = zstd::zstd_safe::get_frame_content_size(BUILTIN).unwrap().unwrap() as usize;
let mut out = <u8>::new_box_slice_zeroed(size);
let mut out = <[u8]>::new_box_zeroed_with_elems(size).unwrap();
let out_size = zstd::bulk::Decompressor::new()
.unwrap()
.decompress_to_buffer(BUILTIN, out.as_mut())
@ -126,7 +123,7 @@ pub fn load_dats<'a>(paths: impl Iterator<Item = &'a Path>) -> Result<()> {
let entries_size = entries.len() * size_of::<GameEntry>();
let string_table_size = entries.iter().map(|(_, name)| name.len() + 4).sum::<usize>();
let total_size = size_of::<Header>() + entries_size + string_table_size;
let mut result = <u8>::new_box_slice_zeroed(total_size);
let mut result = <[u8]>::new_box_zeroed_with_elems(total_size)?;
let mut out = Cursor::new(result.as_mut());
// Write game entries
@ -137,7 +134,7 @@ pub fn load_dats<'a>(paths: impl Iterator<Item = &'a Path>) -> Result<()> {
for (entry, name) in &mut entries {
entry.string_table_offset = string_table_offset;
out.write_all(entry.as_bytes()).unwrap();
string_table_offset += name.as_bytes().len() as u32 + 4;
string_table_offset += name.len() as u32 + 4;
}
// Write string table
@ -152,7 +149,7 @@ pub fn load_dats<'a>(paths: impl Iterator<Item = &'a Path>) -> Result<()> {
}
// Keep in sync with build.rs
#[derive(Clone, Debug, AsBytes, FromBytes, FromZeroes)]
#[derive(Clone, Debug, IntoBytes, FromBytes, Immutable, KnownLayout)]
#[repr(C, align(4))]
struct Header {
entry_count: u32,
@ -160,7 +157,7 @@ struct Header {
}
// Keep in sync with build.rs
#[derive(Clone, Debug, AsBytes, FromBytes, FromZeroes)]
#[derive(Clone, Debug, IntoBytes, FromBytes, Immutable, KnownLayout)]
#[repr(C, align(4))]
struct GameEntry {
crc32: u32,

View File

@ -1,22 +1,21 @@
use std::{
cmp::min,
fmt,
fs::File,
io::{Read, Write},
io::{Seek, Write},
path::Path,
sync::{mpsc::sync_channel, Arc},
thread,
};
use indicatif::{ProgressBar, ProgressState, ProgressStyle};
use nod::{Compression, Disc, DiscHeader, DiscMeta, OpenOptions, Result, ResultContext};
use size::Size;
use zerocopy::FromZeroes;
use crate::util::{
digest::{digest_thread, DigestResult},
display, redump,
use nod::{
Result, ResultContext,
common::Compression,
disc::DiscHeader,
read::{DiscMeta, DiscOptions, DiscReader, PartitionEncryption},
write::{DiscWriter, DiscWriterWeight, FormatOptions, ProcessOptions},
};
use size::Size;
use crate::util::{digest::DigestResult, path_display, redump};
pub fn print_header(header: &DiscHeader, meta: &DiscMeta) {
println!("Format: {}", meta.format);
@ -29,50 +28,71 @@ pub fn print_header(header: &DiscHeader, meta: &DiscMeta) {
println!("Lossless: {}", meta.lossless);
println!(
"Verification data: {}",
meta.crc32.is_some()
|| meta.md5.is_some()
|| meta.sha1.is_some()
|| meta.xxhash64.is_some()
meta.crc32.is_some() || meta.md5.is_some() || meta.sha1.is_some() || meta.xxh64.is_some()
);
println!();
println!("Title: {}", header.game_title_str());
println!("Game ID: {}", header.game_id_str());
println!("Disc {}, Revision {}", header.disc_num + 1, header.disc_version);
if header.no_partition_hashes != 0 {
println!("[!] Disc has no hashes");
}
if header.no_partition_encryption != 0 {
if !header.has_partition_encryption() {
println!("[!] Disc is not encrypted");
}
if !header.has_partition_hashes() {
println!("[!] Disc has no hashes");
}
}
pub fn convert_and_verify(in_file: &Path, out_file: Option<&Path>, md5: bool) -> Result<()> {
println!("Loading {}", display(in_file));
let mut disc = Disc::new_with_options(in_file, &OpenOptions {
rebuild_encryption: true,
validate_hashes: false,
})?;
pub fn convert_and_verify(
in_file: &Path,
out_file: Option<&Path>,
md5: bool,
options: &DiscOptions,
format_options: &FormatOptions,
) -> Result<()> {
println!("Loading {}", path_display(in_file));
let disc = DiscReader::new(in_file, options)?;
let header = disc.header();
let meta = disc.meta();
print_header(header, &meta);
let disc_size = disc.disc_size();
let mut file = if let Some(out_file) = out_file {
Some(
File::create(out_file)
.with_context(|| format!("Creating file {}", display(out_file)))?,
.with_context(|| format!("Creating file {}", path_display(out_file)))?,
)
} else {
None
};
if out_file.is_some() {
println!("\nConverting...");
match options.partition_encryption {
PartitionEncryption::ForceEncrypted => {
println!("\nConverting to {} (encrypted)...", format_options.format)
}
PartitionEncryption::ForceDecrypted => {
println!("\nConverting to {} (decrypted)...", format_options.format)
}
_ => println!("\nConverting to {}...", format_options.format),
}
if format_options.compression != Compression::None {
println!("Compression: {}", format_options.compression);
}
if format_options.block_size > 0 {
println!("Block size: {}", Size::from_bytes(format_options.block_size));
}
} else {
println!("\nVerifying...");
match options.partition_encryption {
PartitionEncryption::ForceEncrypted => {
println!("\nVerifying (encrypted)...")
}
PartitionEncryption::ForceDecrypted => {
println!("\nVerifying (decrypted)...")
}
_ => println!("\nVerifying..."),
}
}
let pb = ProgressBar::new(disc_size);
let disc_writer = DiscWriter::new(disc, format_options)?;
let pb = ProgressBar::new(disc_writer.progress_bound());
pb.set_style(ProgressStyle::with_template("{spinner:.green} [{elapsed_precise}] [{wide_bar:.cyan/blue}] {bytes}/{total_bytes} ({bytes_per_sec}, {eta})")
.unwrap()
.with_key("eta", |state: &ProgressState, w: &mut dyn fmt::Write| {
@ -80,85 +100,71 @@ pub fn convert_and_verify(in_file: &Path, out_file: Option<&Path>, md5: bool) ->
})
.progress_chars("#>-"));
const BUFFER_SIZE: usize = 1015808; // LCM(0x8000, 0x7C00)
let digest_threads = if md5 {
vec![
digest_thread::<crc32fast::Hasher>(),
digest_thread::<md5::Md5>(),
digest_thread::<sha1::Sha1>(),
digest_thread::<xxhash_rust::xxh64::Xxh64>(),
]
} else {
vec![
digest_thread::<crc32fast::Hasher>(),
digest_thread::<sha1::Sha1>(),
digest_thread::<xxhash_rust::xxh64::Xxh64>(),
]
let cpus = num_cpus::get();
let processor_threads = match disc_writer.weight() {
DiscWriterWeight::Light => 0,
DiscWriterWeight::Medium => cpus / 2,
DiscWriterWeight::Heavy => cpus,
};
let (w_tx, w_rx) = sync_channel::<Arc<[u8]>>(1);
let w_thread = thread::spawn(move || {
let mut total_written = 0u64;
while let Ok(data) = w_rx.recv() {
let mut total_written = 0u64;
let finalization = disc_writer.process(
|data, pos, _| {
if let Some(file) = &mut file {
file.write_all(data.as_ref())
.with_context(|| {
format!("Writing {} bytes at offset {}", data.len(), total_written)
})
.unwrap();
file.write_all(data.as_ref())?;
}
total_written += data.len() as u64;
pb.set_position(total_written);
}
if let Some(mut file) = file {
file.flush().context("Flushing output file").unwrap();
}
pb.finish();
});
pb.set_position(pos);
Ok(())
},
&ProcessOptions {
processor_threads,
digest_crc32: true,
digest_md5: md5,
digest_sha1: true,
digest_xxh64: true,
},
)?;
pb.finish();
let mut total_read = 0u64;
let mut buf = <u8>::new_box_slice_zeroed(BUFFER_SIZE);
while total_read < disc_size {
let read = min(BUFFER_SIZE as u64, disc_size - total_read) as usize;
disc.read_exact(&mut buf[..read]).with_context(|| {
format!("Reading {} bytes at disc offset {}", BUFFER_SIZE, total_read)
})?;
let arc = Arc::<[u8]>::from(&buf[..read]);
for (tx, _) in &digest_threads {
tx.send(arc.clone()).map_err(|_| "Sending data to hash thread")?;
// Finalize disc writer
if !finalization.header.is_empty() {
if let Some(file) = &mut file {
file.rewind().context("Seeking to start of output file")?;
file.write_all(finalization.header.as_ref()).context("Writing header")?;
} else {
return Err(nod::Error::Other("No output file, but requires finalization".to_string()));
}
w_tx.send(arc).map_err(|_| "Sending data to write thread")?;
total_read += read as u64;
}
drop(w_tx); // Close channel
w_thread.join().unwrap();
if let Some(mut file) = file {
file.flush().context("Flushing output file")?;
}
println!();
if let Some(path) = out_file {
println!("Wrote {} to {}", Size::from_bytes(total_read), display(path));
println!("Wrote {} to {}", Size::from_bytes(total_written), path_display(path));
}
println!();
let mut crc32 = None;
let mut md5 = None;
let mut sha1 = None;
let mut xxh64 = None;
for (tx, handle) in digest_threads {
drop(tx); // Close channel
match handle.join().unwrap() {
DigestResult::Crc32(v) => crc32 = Some(v),
DigestResult::Md5(v) => md5 = Some(v),
DigestResult::Sha1(v) => sha1 = Some(v),
DigestResult::Xxh64(v) => xxh64 = Some(v),
}
}
let redump_entry = crc32.and_then(redump::find_by_crc32);
let expected_crc32 = meta.crc32.or(redump_entry.as_ref().map(|e| e.crc32));
let expected_md5 = meta.md5.or(redump_entry.as_ref().map(|e| e.md5));
let expected_sha1 = meta.sha1.or(redump_entry.as_ref().map(|e| e.sha1));
let expected_xxh64 = meta.xxhash64;
let mut redump_entry = None;
let mut expected_crc32 = None;
let mut expected_md5 = None;
let mut expected_sha1 = None;
let mut expected_xxh64 = None;
if options.partition_encryption == PartitionEncryption::Original {
// Use verification data in disc and check redump
redump_entry = finalization.crc32.and_then(redump::find_by_crc32);
expected_crc32 = meta.crc32.or(redump_entry.as_ref().map(|e| e.crc32));
expected_md5 = meta.md5.or(redump_entry.as_ref().map(|e| e.md5));
expected_sha1 = meta.sha1.or(redump_entry.as_ref().map(|e| e.sha1));
expected_xxh64 = meta.xxh64;
} else if options.partition_encryption == PartitionEncryption::ForceEncrypted {
// Ignore verification data in disc, but still check redump
redump_entry = finalization.crc32.and_then(redump::find_by_crc32);
expected_crc32 = redump_entry.as_ref().map(|e| e.crc32);
expected_md5 = redump_entry.as_ref().map(|e| e.md5);
expected_sha1 = redump_entry.as_ref().map(|e| e.sha1);
}
fn print_digest(value: DigestResult, expected: Option<DigestResult>) {
print!("{:<6}: ", value.name());
@ -174,36 +180,36 @@ pub fn convert_and_verify(in_file: &Path, out_file: Option<&Path>, md5: bool) ->
println!();
}
if let Some(entry) = &redump_entry {
let mut full_match = true;
if let Some(md5) = md5 {
if entry.md5 != md5 {
full_match = false;
if let Some(crc32) = finalization.crc32 {
if let Some(entry) = &redump_entry {
let mut full_match = true;
if let Some(md5) = finalization.md5 {
if entry.md5 != md5 {
full_match = false;
}
}
}
if let Some(sha1) = sha1 {
if entry.sha1 != sha1 {
full_match = false;
if let Some(sha1) = finalization.sha1 {
if entry.sha1 != sha1 {
full_match = false;
}
}
if full_match {
println!("Redump: {}", entry.name);
} else {
println!("Redump: {} ❓ (partial match)", entry.name);
}
}
if full_match {
println!("Redump: {}", entry.name);
} else {
println!("Redump: {} ❓ (partial match)", entry.name);
println!("Redump: Not found ❌");
}
} else {
println!("Redump: Not found ❌");
}
if let Some(crc32) = crc32 {
print_digest(DigestResult::Crc32(crc32), expected_crc32.map(DigestResult::Crc32));
}
if let Some(md5) = md5 {
if let Some(md5) = finalization.md5 {
print_digest(DigestResult::Md5(md5), expected_md5.map(DigestResult::Md5));
}
if let Some(sha1) = sha1 {
if let Some(sha1) = finalization.sha1 {
print_digest(DigestResult::Sha1(sha1), expected_sha1.map(DigestResult::Sha1));
}
if let Some(xxh64) = xxh64 {
if let Some(xxh64) = finalization.xxh64 {
print_digest(DigestResult::Xxh64(xxh64), expected_xxh64.map(DigestResult::Xxh64));
}
Ok(())

View File

@ -6,3 +6,4 @@ reorder_impl_items = true
use_field_init_shorthand = true
use_small_heuristics = "Max"
where_single_line = true
format_code_in_doc_comments = true