mirror of https://github.com/encounter/nod-rs.git
Finish WIA/RVZ, add WBFS, CISO & more
Generally a complete overhaul.
This commit is contained in:
parent
fff7b350b1
commit
ce9fbbf822
33
Cargo.toml
33
Cargo.toml
|
@ -11,8 +11,9 @@ readme = "README.md"
|
||||||
description = """
|
description = """
|
||||||
Rust library and CLI tool for reading GameCube and Wii disc images.
|
Rust library and CLI tool for reading GameCube and Wii disc images.
|
||||||
"""
|
"""
|
||||||
keywords = ["gamecube", "wii", "iso", "nfs", "gcm"]
|
keywords = ["gamecube", "wii", "iso", "nfs", "rvz"]
|
||||||
categories = ["command-line-utilities", "parser-implementations"]
|
categories = ["command-line-utilities", "parser-implementations"]
|
||||||
|
build = "build.rs"
|
||||||
|
|
||||||
[[bin]]
|
[[bin]]
|
||||||
name = "nodtool"
|
name = "nodtool"
|
||||||
|
@ -24,27 +25,37 @@ lto = "thin"
|
||||||
strip = "debuginfo"
|
strip = "debuginfo"
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["compress-bzip2", "compress-zstd"] #, "compress-lzma"
|
default = ["compress-bzip2", "compress-lzma", "compress-zstd"]
|
||||||
asm = ["md-5/asm", "sha1/asm"]
|
asm = ["md-5/asm", "sha1/asm"]
|
||||||
compress-bzip2 = ["bzip2"]
|
compress-bzip2 = ["bzip2"]
|
||||||
|
compress-lzma = ["liblzma"]
|
||||||
compress-zstd = ["zstd"]
|
compress-zstd = ["zstd"]
|
||||||
#compress-lzma = ["xz2"]
|
|
||||||
nightly = ["crc32fast/nightly"]
|
nightly = ["crc32fast/nightly"]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
aes = "0.8.3"
|
aes = "0.8.4"
|
||||||
argh = "0.1.12"
|
|
||||||
argh_derive = "0.1.12"
|
argh_derive = "0.1.12"
|
||||||
|
argp = "0.3.0"
|
||||||
base16ct = "0.2.0"
|
base16ct = "0.2.0"
|
||||||
binrw = "0.13.3"
|
bzip2 = { version = "0.4.4", features = ["static"], optional = true }
|
||||||
bytemuck = "1.14.1"
|
|
||||||
bzip2 = { version = "0.4.4", optional = true }
|
|
||||||
cbc = "0.1.2"
|
cbc = "0.1.2"
|
||||||
crc32fast = "1.3.2"
|
crc32fast = "1.4.0"
|
||||||
|
digest = "0.10.7"
|
||||||
|
enable-ansi-support = "0.2.1"
|
||||||
encoding_rs = "0.8.33"
|
encoding_rs = "0.8.33"
|
||||||
file-size = "1.0.3"
|
file-size = "1.0.3"
|
||||||
|
indicatif = "0.17.8"
|
||||||
|
itertools = "0.12.1"
|
||||||
|
liblzma = { git = "https://github.com/encounter/liblzma-rs.git", rev = "ce29b22", features = ["static"], optional = true }
|
||||||
|
log = "0.4.20"
|
||||||
md-5 = "0.10.6"
|
md-5 = "0.10.6"
|
||||||
|
rayon = "1.8.1"
|
||||||
sha1 = "0.10.6"
|
sha1 = "0.10.6"
|
||||||
thiserror = "1.0.56"
|
supports-color = "3.0.0"
|
||||||
xz2 = { version = "0.1.7", optional = true }
|
thiserror = "1.0.57"
|
||||||
|
tracing = "0.1.40"
|
||||||
|
tracing-attributes = "0.1.27"
|
||||||
|
tracing-subscriber = { version = "0.3.18", features = ["env-filter"] }
|
||||||
|
xxhash-rust = { version = "0.8.8", features = ["xxh64"] }
|
||||||
|
zerocopy = { version = "0.7.32", features = ["alloc", "derive"] }
|
||||||
zstd = { version = "0.13.0", optional = true }
|
zstd = { version = "0.13.0", optional = true }
|
||||||
|
|
54
README.md
54
README.md
|
@ -10,52 +10,68 @@
|
||||||
|
|
||||||
Library for traversing & reading GameCube and Wii disc images.
|
Library for traversing & reading GameCube and Wii disc images.
|
||||||
|
|
||||||
Based on the C++ library [nod](https://github.com/AxioDL/nod),
|
Originally based on the C++ library [nod](https://github.com/AxioDL/nod),
|
||||||
but does not currently support authoring.
|
but does not currently support authoring.
|
||||||
|
|
||||||
Currently supported file formats:
|
Currently supported file formats:
|
||||||
- ISO (GCM)
|
- ISO (GCM)
|
||||||
- WIA / RVZ
|
- WIA / RVZ
|
||||||
- WBFS
|
- WBFS
|
||||||
- NFS (Wii U VC files, e.g. `hif_000000.nfs`)
|
- CISO
|
||||||
|
- NFS (Wii U VC)
|
||||||
|
|
||||||
### CLI tool
|
## CLI tool
|
||||||
|
|
||||||
This crate includes a CLI tool `nodtool`, which can be used to extract disc images to a specified directory:
|
This crate includes a command-line tool called `nodtool`.
|
||||||
|
|
||||||
|
### info
|
||||||
|
|
||||||
|
Displays information about a disc image.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
nodtool info /path/to/game.iso
|
||||||
|
```
|
||||||
|
|
||||||
|
### extract
|
||||||
|
|
||||||
|
Extracts the contents of a disc image to a directory.
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
nodtool extract /path/to/game.iso [outdir]
|
nodtool extract /path/to/game.iso [outdir]
|
||||||
```
|
```
|
||||||
|
|
||||||
For Wii U VC titles, use `content/hif_*.nfs`:
|
For Wii U VC titles, use `content/hif_000000.nfs`:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
nodtool extract /path/to/game/content/hif_000000.nfs [outdir]
|
nodtool extract /path/to/game/content/hif_000000.nfs [outdir]
|
||||||
```
|
```
|
||||||
|
|
||||||
### Library example
|
### convert
|
||||||
|
|
||||||
|
Converts any supported format to raw ISO.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
nodtool convert /path/to/game.wia /path/to/game.iso
|
||||||
|
```
|
||||||
|
|
||||||
|
## Library example
|
||||||
|
|
||||||
Opening a disc image and reading a file:
|
Opening a disc image and reading a file:
|
||||||
|
|
||||||
```rust
|
```rust
|
||||||
use std::io::Read;
|
use std::io::Read;
|
||||||
|
|
||||||
use nod::{
|
use nod::{Disc, PartitionKind};
|
||||||
disc::{new_disc_base, PartHeader},
|
|
||||||
fst::NodeType,
|
|
||||||
io::{new_disc_io, DiscIOOptions},
|
|
||||||
};
|
|
||||||
|
|
||||||
fn main() -> nod::Result<()> {
|
fn main() -> nod::Result<()> {
|
||||||
let options = DiscIOOptions::default();
|
let disc = Disc::new("path/to/file.iso")?;
|
||||||
let mut disc_io = new_disc_io("path/to/file.iso".as_ref(), &options)?;
|
let mut partition = disc.open_partition_kind(PartitionKind::Data)?;
|
||||||
let disc_base = new_disc_base(disc_io.as_mut())?;
|
let meta = partition.meta()?;
|
||||||
let mut partition = disc_base.get_data_partition(disc_io.as_mut(), false)?;
|
let fst = meta.fst()?;
|
||||||
let header = partition.read_header()?;
|
if let Some((_, node)) = fst.find("/MP3/Worlds.txt") {
|
||||||
if let Some(NodeType::File(node)) = header.find_node("/MP3/Worlds.txt") {
|
|
||||||
let mut s = String::new();
|
let mut s = String::new();
|
||||||
partition
|
partition
|
||||||
.begin_file_stream(node)
|
.open_file(node)
|
||||||
.expect("Failed to open file stream")
|
.expect("Failed to open file stream")
|
||||||
.read_to_string(&mut s)
|
.read_to_string(&mut s)
|
||||||
.expect("Failed to read file");
|
.expect("Failed to read file");
|
||||||
|
@ -65,7 +81,7 @@ fn main() -> nod::Result<()> {
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
### License
|
## License
|
||||||
|
|
||||||
Licensed under either of
|
Licensed under either of
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,9 @@
|
||||||
|
fn main() {
|
||||||
|
let output = std::process::Command::new("git")
|
||||||
|
.args(["rev-parse", "HEAD"])
|
||||||
|
.output()
|
||||||
|
.expect("Failed to execute git");
|
||||||
|
let rev = String::from_utf8(output.stdout).expect("Failed to parse git output");
|
||||||
|
println!("cargo:rustc-env=GIT_COMMIT_SHA={rev}");
|
||||||
|
println!("cargo:rustc-rerun-if-changed=.git/HEAD");
|
||||||
|
}
|
|
@ -0,0 +1,64 @@
|
||||||
|
// Originally from https://gist.github.com/suluke/e0c672492126be0a4f3b4f0e1115d77c
|
||||||
|
//! Extend `argp` to be better integrated with the `cargo` ecosystem
|
||||||
|
//!
|
||||||
|
//! For now, this only adds a --version/-V option which causes early-exit.
|
||||||
|
use std::ffi::OsStr;
|
||||||
|
|
||||||
|
use argp::{parser::ParseGlobalOptions, EarlyExit, FromArgs, TopLevelCommand};
|
||||||
|
|
||||||
|
struct ArgsOrVersion<T>(T)
|
||||||
|
where T: FromArgs;
|
||||||
|
|
||||||
|
impl<T> TopLevelCommand for ArgsOrVersion<T> where T: FromArgs {}
|
||||||
|
|
||||||
|
impl<T> FromArgs for ArgsOrVersion<T>
|
||||||
|
where T: FromArgs
|
||||||
|
{
|
||||||
|
fn _from_args(
|
||||||
|
command_name: &[&str],
|
||||||
|
args: &[&OsStr],
|
||||||
|
parent: Option<&mut dyn ParseGlobalOptions>,
|
||||||
|
) -> Result<Self, EarlyExit> {
|
||||||
|
/// Also use argp for catching `--version`-only invocations
|
||||||
|
#[derive(FromArgs)]
|
||||||
|
struct Version {
|
||||||
|
/// Print version information and exit.
|
||||||
|
#[argp(switch, short = 'V')]
|
||||||
|
pub version: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
match Version::from_args(command_name, args) {
|
||||||
|
Ok(v) => {
|
||||||
|
if v.version {
|
||||||
|
println!(
|
||||||
|
"{} {} {}",
|
||||||
|
command_name.first().unwrap_or(&""),
|
||||||
|
env!("CARGO_PKG_VERSION"),
|
||||||
|
env!("GIT_COMMIT_SHA"),
|
||||||
|
);
|
||||||
|
std::process::exit(0);
|
||||||
|
} else {
|
||||||
|
// Pass through empty arguments
|
||||||
|
T::_from_args(command_name, args, parent).map(Self)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(exit) => match exit {
|
||||||
|
EarlyExit::Help(_help) => {
|
||||||
|
// TODO: Chain help info from Version
|
||||||
|
// For now, we just put the switch on T as well
|
||||||
|
T::from_args(command_name, &["--help"]).map(Self)
|
||||||
|
}
|
||||||
|
EarlyExit::Err(_) => T::_from_args(command_name, args, parent).map(Self),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a `FromArgs` type from the current process’s `env::args`.
|
||||||
|
///
|
||||||
|
/// This function will exit early from the current process if argument parsing was unsuccessful or if information like `--help` was requested.
|
||||||
|
/// Error messages will be printed to stderr, and `--help` output to stdout.
|
||||||
|
pub fn from_env<T>() -> T
|
||||||
|
where T: TopLevelCommand {
|
||||||
|
argp::parse_args_or_exit::<ArgsOrVersion<T>>(argp::DEFAULT).0
|
||||||
|
}
|
606
src/bin.rs
606
src/bin.rs
|
@ -1,82 +1,215 @@
|
||||||
|
mod argp_version;
|
||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
|
borrow::Cow,
|
||||||
|
env,
|
||||||
error::Error,
|
error::Error,
|
||||||
|
ffi::OsStr,
|
||||||
fs,
|
fs,
|
||||||
fs::File,
|
fs::File,
|
||||||
io,
|
io,
|
||||||
io::{BufWriter, Write},
|
io::{BufWriter, Read, Write},
|
||||||
path::{Path, PathBuf},
|
path::{Path, PathBuf},
|
||||||
|
str::FromStr,
|
||||||
|
sync::{
|
||||||
|
mpsc::{sync_channel, SyncSender},
|
||||||
|
Arc,
|
||||||
|
},
|
||||||
|
thread,
|
||||||
|
thread::JoinHandle,
|
||||||
};
|
};
|
||||||
|
|
||||||
use argh_derive::FromArgs;
|
use argp::{FromArgValue, FromArgs};
|
||||||
|
use digest::{Digest, Output};
|
||||||
|
use enable_ansi_support::enable_ansi_support;
|
||||||
|
use indicatif::{ProgressBar, ProgressState, ProgressStyle};
|
||||||
|
use itertools::Itertools;
|
||||||
use nod::{
|
use nod::{
|
||||||
disc::{new_disc_base, PartHeader, PartReadStream, PartitionType},
|
Disc, DiscHeader, Fst, Node, OpenOptions, PartitionBase, PartitionKind, PartitionMeta, Result,
|
||||||
fst::NodeType,
|
ResultContext,
|
||||||
io::{has_extension, new_disc_io, DiscIOOptions},
|
|
||||||
Result, ResultContext,
|
|
||||||
};
|
};
|
||||||
use sha1::Digest;
|
use supports_color::Stream;
|
||||||
|
use tracing::level_filters::LevelFilter;
|
||||||
|
use tracing_subscriber::EnvFilter;
|
||||||
|
use zerocopy::FromZeroes;
|
||||||
|
|
||||||
#[derive(FromArgs, Debug)]
|
#[derive(FromArgs, Debug)]
|
||||||
/// Tool for reading GameCube and Wii disc images.
|
/// Tool for reading GameCube and Wii disc images.
|
||||||
struct TopLevel {
|
struct TopLevel {
|
||||||
#[argh(subcommand)]
|
#[argp(subcommand)]
|
||||||
command: SubCommand,
|
command: SubCommand,
|
||||||
|
#[argp(option, short = 'C')]
|
||||||
|
/// Change working directory.
|
||||||
|
chdir: Option<PathBuf>,
|
||||||
|
#[argp(option, short = 'L')]
|
||||||
|
/// Minimum logging level. (Default: info)
|
||||||
|
/// Possible values: error, warn, info, debug, trace
|
||||||
|
log_level: Option<LogLevel>,
|
||||||
|
#[allow(unused)]
|
||||||
|
#[argp(switch, short = 'V')]
|
||||||
|
/// Print version information and exit.
|
||||||
|
version: bool,
|
||||||
|
#[argp(switch)]
|
||||||
|
/// Disable color output. (env: NO_COLOR)
|
||||||
|
no_color: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(FromArgs, Debug)]
|
#[derive(FromArgs, Debug)]
|
||||||
#[argh(subcommand)]
|
#[argp(subcommand)]
|
||||||
enum SubCommand {
|
enum SubCommand {
|
||||||
|
Info(InfoArgs),
|
||||||
Extract(ExtractArgs),
|
Extract(ExtractArgs),
|
||||||
Convert(ConvertArgs),
|
Convert(ConvertArgs),
|
||||||
Verify(VerifyArgs),
|
Verify(VerifyArgs),
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(FromArgs, Debug)]
|
#[derive(FromArgs, Debug)]
|
||||||
/// Extract a disc image.
|
/// Displays information about a disc image.
|
||||||
#[argh(subcommand, name = "extract")]
|
#[argp(subcommand, name = "info")]
|
||||||
struct ExtractArgs {
|
struct InfoArgs {
|
||||||
#[argh(positional)]
|
#[argp(positional)]
|
||||||
/// path to disc image (ISO or NFS)
|
/// path to disc image
|
||||||
file: PathBuf,
|
file: PathBuf,
|
||||||
#[argh(positional)]
|
}
|
||||||
|
|
||||||
|
#[derive(FromArgs, Debug)]
|
||||||
|
/// Extract a disc image.
|
||||||
|
#[argp(subcommand, name = "extract")]
|
||||||
|
struct ExtractArgs {
|
||||||
|
#[argp(positional)]
|
||||||
|
/// path to disc image
|
||||||
|
file: PathBuf,
|
||||||
|
#[argp(positional)]
|
||||||
/// output directory (optional)
|
/// output directory (optional)
|
||||||
out: Option<PathBuf>,
|
out: Option<PathBuf>,
|
||||||
#[argh(switch, short = 'q')]
|
#[argp(switch, short = 'q')]
|
||||||
/// quiet output
|
/// quiet output
|
||||||
quiet: bool,
|
quiet: bool,
|
||||||
#[argh(switch, short = 'h')]
|
#[argp(switch, short = 'h')]
|
||||||
/// validate disc hashes (Wii only)
|
/// validate disc hashes (Wii only)
|
||||||
validate: bool,
|
validate: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(FromArgs, Debug)]
|
#[derive(FromArgs, Debug)]
|
||||||
/// Extract a disc image.
|
/// Converts a disc image to ISO.
|
||||||
#[argh(subcommand, name = "convert")]
|
#[argp(subcommand, name = "convert")]
|
||||||
struct ConvertArgs {
|
struct ConvertArgs {
|
||||||
#[argh(positional)]
|
#[argp(positional)]
|
||||||
/// path to disc image
|
/// path to disc image
|
||||||
file: PathBuf,
|
file: PathBuf,
|
||||||
#[argh(positional)]
|
#[argp(positional)]
|
||||||
/// output ISO file
|
/// output ISO file
|
||||||
out: PathBuf,
|
out: PathBuf,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(FromArgs, Debug)]
|
#[derive(FromArgs, Debug)]
|
||||||
/// Verifies a disc image.
|
/// Verifies a disc image.
|
||||||
#[argh(subcommand, name = "verify")]
|
#[argp(subcommand, name = "verify")]
|
||||||
struct VerifyArgs {
|
struct VerifyArgs {
|
||||||
#[argh(positional)]
|
#[argp(positional)]
|
||||||
/// path to disc image
|
/// path to disc image
|
||||||
file: PathBuf,
|
file: PathBuf,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
|
||||||
|
enum LogLevel {
|
||||||
|
Error,
|
||||||
|
Warn,
|
||||||
|
Info,
|
||||||
|
Debug,
|
||||||
|
Trace,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FromStr for LogLevel {
|
||||||
|
type Err = ();
|
||||||
|
|
||||||
|
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
|
||||||
|
Ok(match s {
|
||||||
|
"error" => Self::Error,
|
||||||
|
"warn" => Self::Warn,
|
||||||
|
"info" => Self::Info,
|
||||||
|
"debug" => Self::Debug,
|
||||||
|
"trace" => Self::Trace,
|
||||||
|
_ => return Err(()),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ToString for LogLevel {
|
||||||
|
fn to_string(&self) -> String {
|
||||||
|
match self {
|
||||||
|
LogLevel::Error => "error",
|
||||||
|
LogLevel::Warn => "warn",
|
||||||
|
LogLevel::Info => "info",
|
||||||
|
LogLevel::Debug => "debug",
|
||||||
|
LogLevel::Trace => "trace",
|
||||||
|
}
|
||||||
|
.to_string()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FromArgValue for LogLevel {
|
||||||
|
fn from_arg_value(value: &OsStr) -> std::result::Result<Self, String> {
|
||||||
|
String::from_arg_value(value)
|
||||||
|
.and_then(|s| Self::from_str(&s).map_err(|_| "Invalid log level".to_string()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Duplicated from supports-color so we can check early.
|
||||||
|
fn env_no_color() -> bool {
|
||||||
|
match env::var("NO_COLOR").as_deref() {
|
||||||
|
Ok("") | Ok("0") | Err(_) => false,
|
||||||
|
Ok(_) => true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
let args: TopLevel = argh::from_env();
|
let args: TopLevel = argp_version::from_env();
|
||||||
let result = match args.command {
|
let use_colors = if args.no_color || env_no_color() {
|
||||||
|
false
|
||||||
|
} else {
|
||||||
|
// Try to enable ANSI support on Windows.
|
||||||
|
let _ = enable_ansi_support();
|
||||||
|
// Disable isatty check for supports-color. (e.g. when used with ninja)
|
||||||
|
env::set_var("IGNORE_IS_TERMINAL", "1");
|
||||||
|
supports_color::on(Stream::Stdout).is_some_and(|c| c.has_basic)
|
||||||
|
};
|
||||||
|
|
||||||
|
let format =
|
||||||
|
tracing_subscriber::fmt::format().with_ansi(use_colors).with_target(false).without_time();
|
||||||
|
let builder = tracing_subscriber::fmt().event_format(format);
|
||||||
|
if let Some(level) = args.log_level {
|
||||||
|
builder
|
||||||
|
.with_max_level(match level {
|
||||||
|
LogLevel::Error => LevelFilter::ERROR,
|
||||||
|
LogLevel::Warn => LevelFilter::WARN,
|
||||||
|
LogLevel::Info => LevelFilter::INFO,
|
||||||
|
LogLevel::Debug => LevelFilter::DEBUG,
|
||||||
|
LogLevel::Trace => LevelFilter::TRACE,
|
||||||
|
})
|
||||||
|
.init();
|
||||||
|
} else {
|
||||||
|
builder
|
||||||
|
.with_env_filter(
|
||||||
|
EnvFilter::builder()
|
||||||
|
.with_default_directive(LevelFilter::INFO.into())
|
||||||
|
.from_env_lossy(),
|
||||||
|
)
|
||||||
|
.init();
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut result = Ok(());
|
||||||
|
if let Some(dir) = &args.chdir {
|
||||||
|
result = env::set_current_dir(dir).map_err(|e| {
|
||||||
|
nod::Error::Io(format!("Failed to change working directory to '{}'", dir.display()), e)
|
||||||
|
});
|
||||||
|
}
|
||||||
|
result = result.and_then(|_| match args.command {
|
||||||
|
SubCommand::Info(c_args) => info(c_args),
|
||||||
SubCommand::Convert(c_args) => convert(c_args),
|
SubCommand::Convert(c_args) => convert(c_args),
|
||||||
SubCommand::Extract(c_args) => extract(c_args),
|
SubCommand::Extract(c_args) => extract(c_args),
|
||||||
SubCommand::Verify(c_args) => verify(c_args),
|
SubCommand::Verify(c_args) => verify(c_args),
|
||||||
};
|
});
|
||||||
if let Err(e) = result {
|
if let Err(e) = result {
|
||||||
eprintln!("Failed: {}", e);
|
eprintln!("Failed: {}", e);
|
||||||
if let Some(source) = e.source() {
|
if let Some(source) = e.source() {
|
||||||
|
@ -86,32 +219,108 @@ fn main() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn print_header(header: &DiscHeader) {
|
||||||
|
println!("Name: {}", header.game_title_str());
|
||||||
|
println!("Game ID: {}", header.game_id_str());
|
||||||
|
println!("Disc {}, Revision {}", header.disc_num + 1, header.disc_version);
|
||||||
|
if header.no_partition_hashes != 0 {
|
||||||
|
println!("[!] Disc has no hashes");
|
||||||
|
}
|
||||||
|
if header.no_partition_encryption != 0 {
|
||||||
|
println!("[!] Disc is not encrypted");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn info(args: InfoArgs) -> Result<()> {
|
||||||
|
let disc = Disc::new_with_options(args.file, &OpenOptions {
|
||||||
|
rebuild_hashes: false,
|
||||||
|
validate_hashes: false,
|
||||||
|
rebuild_encryption: false,
|
||||||
|
})?;
|
||||||
|
let header = disc.header();
|
||||||
|
print_header(header);
|
||||||
|
|
||||||
|
if header.is_wii() {
|
||||||
|
for (idx, info) in disc.partitions().iter().enumerate() {
|
||||||
|
println!();
|
||||||
|
println!("Partition {}:{}", info.group_index, info.part_index);
|
||||||
|
println!("\tType: {}", info.kind);
|
||||||
|
println!("\tPartition offset: {:#X}", info.part_offset);
|
||||||
|
println!(
|
||||||
|
"\tData offset / size: {:#X} / {:#X} ({})",
|
||||||
|
info.part_offset + info.data_offset,
|
||||||
|
info.data_size,
|
||||||
|
file_size::fit_4(info.data_size)
|
||||||
|
);
|
||||||
|
if let Some(header) = &info.header {
|
||||||
|
println!(
|
||||||
|
"\tTMD offset / size: {:#X} / {:#X}",
|
||||||
|
info.part_offset + header.tmd_off(),
|
||||||
|
header.tmd_size()
|
||||||
|
);
|
||||||
|
println!(
|
||||||
|
"\tCert offset / size: {:#X} / {:#X}",
|
||||||
|
info.part_offset + header.cert_chain_off(),
|
||||||
|
header.cert_chain_size()
|
||||||
|
);
|
||||||
|
println!(
|
||||||
|
"\tH3 offset / size: {:#X} / {:#X}",
|
||||||
|
info.part_offset + header.h3_table_off(),
|
||||||
|
header.h3_table_size()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut partition = disc.open_partition(idx)?;
|
||||||
|
let meta = partition.meta()?;
|
||||||
|
let header = meta.header();
|
||||||
|
let tmd = meta.tmd_header();
|
||||||
|
let title_id_str = if let Some(tmd) = tmd {
|
||||||
|
format!(
|
||||||
|
"{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}",
|
||||||
|
tmd.title_id[0],
|
||||||
|
tmd.title_id[1],
|
||||||
|
tmd.title_id[2],
|
||||||
|
tmd.title_id[3],
|
||||||
|
tmd.title_id[4],
|
||||||
|
tmd.title_id[5],
|
||||||
|
tmd.title_id[6],
|
||||||
|
tmd.title_id[7]
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
"N/A".to_string()
|
||||||
|
};
|
||||||
|
println!("\tName: {}", header.game_title_str());
|
||||||
|
println!("\tGame ID: {} ({})", header.game_id_str(), title_id_str);
|
||||||
|
println!("\tDisc {}, Revision {}", header.disc_num + 1, header.disc_version);
|
||||||
|
}
|
||||||
|
} else if header.is_gamecube() {
|
||||||
|
// TODO
|
||||||
|
} else {
|
||||||
|
println!(
|
||||||
|
"Invalid GC/Wii magic: {:#010X}/{:#010X}",
|
||||||
|
header.gcn_magic.get(),
|
||||||
|
header.wii_magic.get()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
fn convert(args: ConvertArgs) -> Result<()> { convert_and_verify(&args.file, Some(&args.out)) }
|
fn convert(args: ConvertArgs) -> Result<()> { convert_and_verify(&args.file, Some(&args.out)) }
|
||||||
|
|
||||||
fn verify(args: VerifyArgs) -> Result<()> { convert_and_verify(&args.file, None) }
|
fn verify(args: VerifyArgs) -> Result<()> { convert_and_verify(&args.file, None) }
|
||||||
|
|
||||||
fn convert_and_verify(in_file: &Path, out_file: Option<&Path>) -> Result<()> {
|
fn convert_and_verify(in_file: &Path, out_file: Option<&Path>) -> Result<()> {
|
||||||
println!("Loading {}", in_file.display());
|
println!("Loading {}", in_file.display());
|
||||||
let mut disc_io = new_disc_io(in_file, &DiscIOOptions { rebuild_hashes: true })?;
|
let disc = Disc::new_with_options(in_file, &OpenOptions {
|
||||||
let disc_base = new_disc_base(disc_io.as_mut())?;
|
rebuild_hashes: true,
|
||||||
let header = disc_base.get_header();
|
validate_hashes: false,
|
||||||
println!(
|
rebuild_encryption: true,
|
||||||
"\nGame ID: {}{}{}{}{}{}",
|
})?;
|
||||||
header.game_id[0] as char,
|
let header = disc.header();
|
||||||
header.game_id[1] as char,
|
print_header(header);
|
||||||
header.game_id[2] as char,
|
|
||||||
header.game_id[3] as char,
|
|
||||||
header.game_id[4] as char,
|
|
||||||
header.game_id[5] as char
|
|
||||||
);
|
|
||||||
println!("Game title: {}", header.game_title);
|
|
||||||
println!("Disc num: {}", header.disc_num);
|
|
||||||
println!("Disc version: {}", header.disc_version);
|
|
||||||
|
|
||||||
let mut stream = disc_io.begin_read_stream(0).context("Creating disc read stream")?;
|
let meta = disc.meta()?;
|
||||||
let mut crc = crc32fast::Hasher::new();
|
let mut stream = disc.open()?.take(disc.disc_size());
|
||||||
let mut md5 = md5::Md5::new();
|
|
||||||
let mut sha1 = sha1::Sha1::new();
|
|
||||||
|
|
||||||
let mut file = if let Some(out_file) = out_file {
|
let mut file = if let Some(out_file) = out_file {
|
||||||
Some(
|
Some(
|
||||||
|
@ -122,39 +331,130 @@ fn convert_and_verify(in_file: &Path, out_file: Option<&Path>) -> Result<()> {
|
||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
|
println!("\nHashing...");
|
||||||
|
let pb = ProgressBar::new(stream.limit());
|
||||||
|
pb.set_style(ProgressStyle::with_template("{spinner:.green} [{elapsed_precise}] [{wide_bar:.cyan/blue}] {bytes}/{total_bytes} ({bytes_per_sec}, {eta})")
|
||||||
|
.unwrap()
|
||||||
|
.with_key("eta", |state: &ProgressState, w: &mut dyn std::fmt::Write| {
|
||||||
|
write!(w, "{:.1}s", state.eta().as_secs_f64()).unwrap()
|
||||||
|
})
|
||||||
|
.progress_chars("#>-"));
|
||||||
|
|
||||||
const BUFFER_SIZE: usize = 1015808; // LCM(0x8000, 0x7C00)
|
const BUFFER_SIZE: usize = 1015808; // LCM(0x8000, 0x7C00)
|
||||||
let mut buf = vec![0u8; BUFFER_SIZE];
|
let digest_threads = [
|
||||||
|
digest_thread::<crc32fast::Hasher>(),
|
||||||
|
digest_thread::<md5::Md5>(),
|
||||||
|
digest_thread::<sha1::Sha1>(),
|
||||||
|
digest_thread::<xxhash_rust::xxh64::Xxh64>(),
|
||||||
|
];
|
||||||
|
|
||||||
|
let (w_tx, w_rx) = sync_channel::<Arc<[u8]>>(1);
|
||||||
|
let w_thread = thread::spawn(move || {
|
||||||
|
let mut total_written = 0u64;
|
||||||
|
while let Ok(data) = w_rx.recv() {
|
||||||
|
if let Some(file) = &mut file {
|
||||||
|
file.write_all(data.as_ref())
|
||||||
|
.with_context(|| {
|
||||||
|
format!("Writing {} bytes at offset {}", data.len(), total_written)
|
||||||
|
})
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
total_written += data.len() as u64;
|
||||||
|
pb.set_position(total_written);
|
||||||
|
}
|
||||||
|
if let Some(mut file) = file {
|
||||||
|
file.flush().context("Flushing output file").unwrap();
|
||||||
|
}
|
||||||
|
pb.finish();
|
||||||
|
});
|
||||||
|
|
||||||
let mut total_read = 0u64;
|
let mut total_read = 0u64;
|
||||||
|
let mut buf = <u8>::new_box_slice_zeroed(BUFFER_SIZE);
|
||||||
loop {
|
loop {
|
||||||
let read = stream.read(&mut buf).with_context(|| {
|
let read = stream.read(buf.as_mut()).with_context(|| {
|
||||||
format!("Reading {} bytes at disc offset {}", BUFFER_SIZE, total_read)
|
format!("Reading {} bytes at disc offset {}", BUFFER_SIZE, total_read)
|
||||||
})?;
|
})?;
|
||||||
if read == 0 {
|
if read == 0 {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
let slice = &buf[..read];
|
|
||||||
crc.update(slice);
|
let arc = Arc::<[u8]>::from(&buf[..read]);
|
||||||
md5.update(slice);
|
for (tx, _) in &digest_threads {
|
||||||
sha1.update(slice);
|
tx.send(arc.clone()).map_err(|_| "Sending data to hash thread")?;
|
||||||
if let Some(file) = &mut file {
|
|
||||||
file.write_all(slice).with_context(|| {
|
|
||||||
format!("Writing {} bytes at offset {}", slice.len(), total_read)
|
|
||||||
})?;
|
|
||||||
}
|
}
|
||||||
|
w_tx.send(arc).map_err(|_| "Sending data to write thread")?;
|
||||||
total_read += read as u64;
|
total_read += read as u64;
|
||||||
}
|
}
|
||||||
|
drop(w_tx); // Close channel
|
||||||
|
w_thread.join().unwrap();
|
||||||
|
|
||||||
|
println!();
|
||||||
|
if let Some(path) = out_file {
|
||||||
|
println!("Wrote {} to {}", file_size::fit_4(total_read), path.display());
|
||||||
|
}
|
||||||
|
|
||||||
println!();
|
println!();
|
||||||
println!("CRC32: {:08x}", crc.finalize());
|
for (tx, handle) in digest_threads.into_iter() {
|
||||||
println!("MD5: {:032x}", md5.finalize());
|
drop(tx); // Close channel
|
||||||
println!("SHA-1: {:040x}", sha1.finalize());
|
match handle.join().unwrap() {
|
||||||
if let (Some(path), Some(file)) = (out_file, &mut file) {
|
DigestResult::Crc32(crc) => {
|
||||||
file.flush().context("Flushing output file")?;
|
print!("CRC32: {:08x}", crc);
|
||||||
println!("Wrote {} to {}", file_size::fit_4(total_read), path.display());
|
if let Some(expected_crc) = meta.crc32 {
|
||||||
|
if expected_crc != crc {
|
||||||
|
print!(" ❌ (expected: {:08x})", expected_crc);
|
||||||
|
} else {
|
||||||
|
print!(" ✅");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
println!();
|
||||||
|
}
|
||||||
|
DigestResult::Md5(md5) => {
|
||||||
|
print!("MD5: {:032x}", md5);
|
||||||
|
if let Some(expected_md5) = meta.md5 {
|
||||||
|
let expected_md5 = <Output<md5::Md5>>::from(expected_md5);
|
||||||
|
if expected_md5 != md5 {
|
||||||
|
print!(" ❌ (expected: {:032x})", expected_md5);
|
||||||
|
} else {
|
||||||
|
print!(" ✅");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
println!();
|
||||||
|
}
|
||||||
|
DigestResult::Sha1(sha1) => {
|
||||||
|
print!("SHA-1: {:040x}", sha1);
|
||||||
|
if let Some(expected_sha1) = meta.sha1 {
|
||||||
|
let expected_sha1 = <Output<sha1::Sha1>>::from(expected_sha1);
|
||||||
|
if expected_sha1 != sha1 {
|
||||||
|
print!(" ❌ (expected: {:040x})", expected_sha1);
|
||||||
|
} else {
|
||||||
|
print!(" ✅");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
println!();
|
||||||
|
}
|
||||||
|
DigestResult::Xxh64(xxh64) => {
|
||||||
|
print!("XXH64: {:016x}", xxh64);
|
||||||
|
if let Some(expected_xxh64) = meta.xxhash64 {
|
||||||
|
if expected_xxh64 != xxh64 {
|
||||||
|
print!(" ❌ (expected: {:016x})", expected_xxh64);
|
||||||
|
} else {
|
||||||
|
print!(" ✅");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
println!();
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn has_extension(filename: &Path, extension: &str) -> bool {
|
||||||
|
match filename.extension() {
|
||||||
|
Some(ext) => ext.eq_ignore_ascii_case(extension),
|
||||||
|
None => false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn extract(args: ExtractArgs) -> Result<()> {
|
fn extract(args: ExtractArgs) -> Result<()> {
|
||||||
let output_dir: PathBuf;
|
let output_dir: PathBuf;
|
||||||
if let Some(dir) = args.out {
|
if let Some(dir) = args.out {
|
||||||
|
@ -169,24 +469,54 @@ fn extract(args: ExtractArgs) -> Result<()> {
|
||||||
} else {
|
} else {
|
||||||
output_dir = args.file.with_extension("");
|
output_dir = args.file.with_extension("");
|
||||||
}
|
}
|
||||||
let mut disc_io = new_disc_io(&args.file, &DiscIOOptions { rebuild_hashes: args.validate })?;
|
let disc = Disc::new_with_options(&args.file, &OpenOptions {
|
||||||
let disc_base = new_disc_base(disc_io.as_mut())?;
|
rebuild_hashes: args.validate,
|
||||||
let mut partition =
|
validate_hashes: args.validate,
|
||||||
disc_base.get_partition(disc_io.as_mut(), PartitionType::Data, args.validate)?;
|
rebuild_encryption: false,
|
||||||
let header = partition.read_header()?;
|
})?;
|
||||||
extract_sys_files(header.as_ref(), &output_dir.join("sys"), args.quiet)?;
|
let is_wii = disc.header().is_wii();
|
||||||
extract_node(header.root_node(), partition.as_mut(), &output_dir.join("files"), args.quiet)?;
|
let mut partition = disc.open_partition_kind(PartitionKind::Data)?;
|
||||||
|
let meta = partition.meta()?;
|
||||||
|
extract_sys_files(meta.as_ref(), &output_dir.join("sys"), args.quiet)?;
|
||||||
|
|
||||||
|
// Extract FST
|
||||||
|
let files_dir = output_dir.join("files");
|
||||||
|
let fst = Fst::new(&meta.raw_fst)?;
|
||||||
|
let mut path_segments = Vec::<(Cow<str>, usize)>::new();
|
||||||
|
for (idx, node, name) in fst.iter() {
|
||||||
|
// Remove ended path segments
|
||||||
|
let mut new_size = 0;
|
||||||
|
for (_, end) in path_segments.iter() {
|
||||||
|
if *end == idx {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
new_size += 1;
|
||||||
|
}
|
||||||
|
path_segments.truncate(new_size);
|
||||||
|
|
||||||
|
// Add the new path segment
|
||||||
|
let end = if node.is_dir() { node.length(false) as usize } else { idx + 1 };
|
||||||
|
path_segments.push((name?, end));
|
||||||
|
|
||||||
|
let path = path_segments.iter().map(|(name, _)| name.as_ref()).join("/");
|
||||||
|
if node.is_dir() {
|
||||||
|
fs::create_dir_all(files_dir.join(&path))
|
||||||
|
.with_context(|| format!("Creating directory {}", path))?;
|
||||||
|
} else {
|
||||||
|
extract_node(node, partition.as_mut(), &files_dir, &path, is_wii, args.quiet)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn extract_sys_files(header: &dyn PartHeader, out_dir: &Path, quiet: bool) -> Result<()> {
|
fn extract_sys_files(data: &PartitionMeta, out_dir: &Path, quiet: bool) -> Result<()> {
|
||||||
fs::create_dir_all(out_dir)
|
fs::create_dir_all(out_dir)
|
||||||
.with_context(|| format!("Creating output directory {}", out_dir.display()))?;
|
.with_context(|| format!("Creating output directory {}", out_dir.display()))?;
|
||||||
extract_file(header.boot_bytes(), &out_dir.join("boot.bin"), quiet)?;
|
extract_file(&data.raw_boot, &out_dir.join("boot.bin"), quiet)?;
|
||||||
extract_file(header.bi2_bytes(), &out_dir.join("bi2.bin"), quiet)?;
|
extract_file(&data.raw_bi2, &out_dir.join("bi2.bin"), quiet)?;
|
||||||
extract_file(header.apploader_bytes(), &out_dir.join("apploader.img"), quiet)?;
|
extract_file(&data.raw_apploader, &out_dir.join("apploader.img"), quiet)?;
|
||||||
extract_file(header.fst_bytes(), &out_dir.join("fst.bin"), quiet)?;
|
extract_file(&data.raw_fst, &out_dir.join("fst.bin"), quiet)?;
|
||||||
extract_file(header.dol_bytes(), &out_dir.join("main.dol"), quiet)?;
|
extract_file(&data.raw_dol, &out_dir.join("main.dol"), quiet)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -203,53 +533,93 @@ fn extract_file(bytes: &[u8], out_path: &Path, quiet: bool) -> Result<()> {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn extract_node(
|
fn extract_node(
|
||||||
node: &NodeType,
|
node: &Node,
|
||||||
partition: &mut dyn PartReadStream,
|
partition: &mut dyn PartitionBase,
|
||||||
base_path: &Path,
|
base_path: &Path,
|
||||||
|
name: &str,
|
||||||
|
is_wii: bool,
|
||||||
quiet: bool,
|
quiet: bool,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
match node {
|
let file_path = base_path.join(name);
|
||||||
NodeType::File(v) => {
|
if !quiet {
|
||||||
let mut file_path = base_path.to_path_buf();
|
println!(
|
||||||
file_path.push(v.name.as_str());
|
"Extracting {} (size: {})",
|
||||||
if !quiet {
|
file_path.display(),
|
||||||
println!(
|
file_size::fit_4(node.length(is_wii))
|
||||||
"Extracting {} (size: {})",
|
);
|
||||||
file_path.display(),
|
|
||||||
file_size::fit_4(v.length as u64)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
let file = File::create(&file_path)
|
|
||||||
.with_context(|| format!("Creating file {}", file_path.display()))?;
|
|
||||||
let mut buf_writer = BufWriter::with_capacity(partition.ideal_buffer_size(), file);
|
|
||||||
let mut stream = partition.begin_file_stream(v).with_context(|| {
|
|
||||||
format!(
|
|
||||||
"Opening file {} on disc for reading (offset {}, size {})",
|
|
||||||
v.name, v.offset, v.length
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
io::copy(&mut stream, &mut buf_writer)
|
|
||||||
.with_context(|| format!("Extracting file {}", file_path.display()))?;
|
|
||||||
buf_writer.flush().with_context(|| format!("Flushing file {}", file_path.display()))?;
|
|
||||||
}
|
|
||||||
NodeType::Directory(v, c) => {
|
|
||||||
if v.name.is_empty() {
|
|
||||||
fs::create_dir_all(base_path).with_context(|| {
|
|
||||||
format!("Creating output directory {}", base_path.display())
|
|
||||||
})?;
|
|
||||||
for x in c {
|
|
||||||
extract_node(x, partition, base_path, quiet)?;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
let mut new_base = base_path.to_path_buf();
|
|
||||||
new_base.push(v.name.as_str());
|
|
||||||
fs::create_dir_all(&new_base)
|
|
||||||
.with_context(|| format!("Creating output directory {}", new_base.display()))?;
|
|
||||||
for x in c {
|
|
||||||
extract_node(x, partition, new_base.as_path(), quiet)?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
let file = File::create(&file_path)
|
||||||
|
.with_context(|| format!("Creating file {}", file_path.display()))?;
|
||||||
|
let mut w = BufWriter::with_capacity(partition.ideal_buffer_size(), file);
|
||||||
|
let mut r = partition.open_file(node).with_context(|| {
|
||||||
|
format!(
|
||||||
|
"Opening file {} on disc for reading (offset {}, size {})",
|
||||||
|
name,
|
||||||
|
node.offset(is_wii),
|
||||||
|
node.length(is_wii)
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
io::copy(&mut r, &mut w).with_context(|| format!("Extracting file {}", file_path.display()))?;
|
||||||
|
w.flush().with_context(|| format!("Flushing file {}", file_path.display()))?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn digest_thread<H>() -> (SyncSender<Arc<[u8]>>, JoinHandle<DigestResult>)
|
||||||
|
where H: Hasher + Send + 'static {
|
||||||
|
let (tx, rx) = sync_channel::<Arc<[u8]>>(1);
|
||||||
|
let handle = thread::spawn(move || {
|
||||||
|
let mut hasher = H::new();
|
||||||
|
while let Ok(data) = rx.recv() {
|
||||||
|
hasher.update(data.as_ref());
|
||||||
|
}
|
||||||
|
hasher.finalize()
|
||||||
|
});
|
||||||
|
(tx, handle)
|
||||||
|
}
|
||||||
|
|
||||||
|
enum DigestResult {
|
||||||
|
Crc32(u32),
|
||||||
|
Md5(Output<md5::Md5>),
|
||||||
|
Sha1(Output<sha1::Sha1>),
|
||||||
|
Xxh64(u64),
|
||||||
|
}
|
||||||
|
|
||||||
|
trait Hasher {
|
||||||
|
fn new() -> Self;
|
||||||
|
fn finalize(self) -> DigestResult;
|
||||||
|
fn update(&mut self, data: &[u8]);
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Hasher for md5::Md5 {
|
||||||
|
fn new() -> Self { Digest::new() }
|
||||||
|
|
||||||
|
fn finalize(self) -> DigestResult { DigestResult::Md5(Digest::finalize(self)) }
|
||||||
|
|
||||||
|
fn update(&mut self, data: &[u8]) { Digest::update(self, data) }
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Hasher for sha1::Sha1 {
|
||||||
|
fn new() -> Self { Digest::new() }
|
||||||
|
|
||||||
|
fn finalize(self) -> DigestResult { DigestResult::Sha1(Digest::finalize(self)) }
|
||||||
|
|
||||||
|
fn update(&mut self, data: &[u8]) { Digest::update(self, data) }
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Hasher for crc32fast::Hasher {
|
||||||
|
fn new() -> Self { crc32fast::Hasher::new() }
|
||||||
|
|
||||||
|
fn finalize(self) -> DigestResult { DigestResult::Crc32(crc32fast::Hasher::finalize(self)) }
|
||||||
|
|
||||||
|
fn update(&mut self, data: &[u8]) { crc32fast::Hasher::update(self, data) }
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Hasher for xxhash_rust::xxh64::Xxh64 {
|
||||||
|
fn new() -> Self { xxhash_rust::xxh64::Xxh64::new(0) }
|
||||||
|
|
||||||
|
fn finalize(self) -> DigestResult {
|
||||||
|
DigestResult::Xxh64(xxhash_rust::xxh64::Xxh64::digest(&self))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn update(&mut self, data: &[u8]) { xxhash_rust::xxh64::Xxh64::update(self, data) }
|
||||||
|
}
|
||||||
|
|
248
src/disc/gcn.rs
248
src/disc/gcn.rs
|
@ -1,77 +1,109 @@
|
||||||
use std::{
|
use std::{
|
||||||
io,
|
io,
|
||||||
io::{Cursor, Read, Seek, SeekFrom},
|
io::{Read, Seek, SeekFrom},
|
||||||
|
mem::size_of,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use zerocopy::FromBytes;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
|
array_ref,
|
||||||
disc::{
|
disc::{
|
||||||
AppLoaderHeader, DiscBase, DiscIO, DolHeader, Header, PartHeader, PartReadStream,
|
AppLoaderHeader, DiscBase, DiscHeader, DiscIO, DolHeader, PartitionBase, PartitionHeader,
|
||||||
PartitionHeader, PartitionType, SECTOR_SIZE,
|
PartitionInfo, PartitionKind, PartitionMeta, BI2_SIZE, BOOT_SIZE, MINI_DVD_SIZE,
|
||||||
|
SECTOR_SIZE,
|
||||||
},
|
},
|
||||||
fst::{find_node, read_fst, Node, NodeKind, NodeType},
|
fst::{Node, NodeKind},
|
||||||
streams::{ReadStream, SharedWindowedReadStream},
|
streams::{ReadStream, SharedWindowedReadStream},
|
||||||
util::{
|
util::{
|
||||||
div_rem,
|
div_rem,
|
||||||
reader::{read_bytes, FromReader},
|
reader::{read_from, read_vec},
|
||||||
},
|
},
|
||||||
Error, Result, ResultContext,
|
Error, OpenOptions, Result, ResultContext,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub(crate) struct DiscGCN {
|
pub(crate) struct DiscGCN {
|
||||||
pub(crate) header: Header,
|
pub(crate) header: DiscHeader,
|
||||||
|
pub(crate) disc_size: u64,
|
||||||
|
// pub(crate) junk_start: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DiscGCN {
|
impl DiscGCN {
|
||||||
pub(crate) fn new(header: Header) -> Result<DiscGCN> { Ok(DiscGCN { header }) }
|
pub(crate) fn new(
|
||||||
|
_stream: &mut dyn ReadStream,
|
||||||
|
header: DiscHeader,
|
||||||
|
disc_size: Option<u64>,
|
||||||
|
) -> Result<DiscGCN> {
|
||||||
|
// stream.seek(SeekFrom::Start(size_of::<DiscHeader>() as u64)).context("Seeking to partition header")?;
|
||||||
|
// let partition_header: PartitionHeader = read_from(stream).context("Reading partition header")?;
|
||||||
|
// let junk_start = partition_header.fst_off(false) + partition_header.fst_sz(false);
|
||||||
|
Ok(DiscGCN { header, disc_size: disc_size.unwrap_or(MINI_DVD_SIZE) /*, junk_start*/ })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn open_partition<'a>(disc_io: &'a dyn DiscIO) -> Result<Box<dyn PartitionBase + 'a>> {
|
||||||
|
let stream = disc_io.open()?;
|
||||||
|
Ok(Box::new(PartitionGC { stream, offset: 0, cur_block: u32::MAX, buf: [0; SECTOR_SIZE] }))
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DiscBase for DiscGCN {
|
impl DiscBase for DiscGCN {
|
||||||
fn get_header(&self) -> &Header { &self.header }
|
fn header(&self) -> &DiscHeader { &self.header }
|
||||||
|
|
||||||
fn get_data_partition<'a>(
|
fn partitions(&self) -> Vec<PartitionInfo> {
|
||||||
&self,
|
vec![PartitionInfo {
|
||||||
disc_io: &'a mut dyn DiscIO,
|
group_index: 0,
|
||||||
_validate_hashes: bool,
|
part_index: 0,
|
||||||
) -> Result<Box<dyn PartReadStream + 'a>> {
|
part_offset: 0,
|
||||||
let stream = disc_io.begin_read_stream(0).context("Opening data partition stream")?;
|
kind: PartitionKind::Data,
|
||||||
Ok(Box::from(GCPartReadStream {
|
data_offset: 0,
|
||||||
stream,
|
data_size: self.disc_size,
|
||||||
offset: 0,
|
header: None,
|
||||||
cur_block: u32::MAX,
|
lfg_seed: *array_ref!(self.header.game_id, 0, 4),
|
||||||
buf: [0; SECTOR_SIZE],
|
// junk_start: self.junk_start,
|
||||||
}))
|
}]
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_partition<'a>(
|
fn open_partition<'a>(
|
||||||
&self,
|
&self,
|
||||||
disc_io: &'a mut dyn DiscIO,
|
disc_io: &'a dyn DiscIO,
|
||||||
part_type: PartitionType,
|
index: usize,
|
||||||
_validate_hashes: bool,
|
_options: &OpenOptions,
|
||||||
) -> Result<Box<dyn PartReadStream + 'a>> {
|
) -> Result<Box<dyn PartitionBase + 'a>> {
|
||||||
if part_type == PartitionType::Data {
|
if index != 0 {
|
||||||
Ok(Box::from(GCPartReadStream {
|
return Err(Error::DiscFormat(format!(
|
||||||
stream: disc_io.begin_read_stream(0).context("Opening partition read stream")?,
|
"Invalid partition index {} for GameCube disc",
|
||||||
offset: 0,
|
index
|
||||||
cur_block: u32::MAX,
|
)));
|
||||||
buf: [0; SECTOR_SIZE],
|
}
|
||||||
}))
|
open_partition(disc_io)
|
||||||
} else {
|
}
|
||||||
Err(Error::DiscFormat(format!(
|
|
||||||
|
fn open_partition_kind<'a>(
|
||||||
|
&self,
|
||||||
|
disc_io: &'a dyn DiscIO,
|
||||||
|
part_type: PartitionKind,
|
||||||
|
_options: &OpenOptions,
|
||||||
|
) -> Result<Box<dyn PartitionBase + 'a>> {
|
||||||
|
if part_type != PartitionKind::Data {
|
||||||
|
return Err(Error::DiscFormat(format!(
|
||||||
"Invalid partition type {:?} for GameCube disc",
|
"Invalid partition type {:?} for GameCube disc",
|
||||||
part_type
|
part_type
|
||||||
)))
|
)));
|
||||||
}
|
}
|
||||||
|
open_partition(disc_io)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn disc_size(&self) -> u64 { self.disc_size }
|
||||||
}
|
}
|
||||||
|
|
||||||
struct GCPartReadStream<'a> {
|
struct PartitionGC<'a> {
|
||||||
stream: Box<dyn ReadStream + 'a>,
|
stream: Box<dyn ReadStream + 'a>,
|
||||||
offset: u64,
|
offset: u64,
|
||||||
cur_block: u32,
|
cur_block: u32,
|
||||||
buf: [u8; SECTOR_SIZE],
|
buf: [u8; SECTOR_SIZE],
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> Read for GCPartReadStream<'a> {
|
impl<'a> Read for PartitionGC<'a> {
|
||||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||||
let (block, block_offset) = div_rem(self.offset, SECTOR_SIZE as u64);
|
let (block, block_offset) = div_rem(self.offset, SECTOR_SIZE as u64);
|
||||||
let mut block = block as u32;
|
let mut block = block as u32;
|
||||||
|
@ -104,12 +136,12 @@ impl<'a> Read for GCPartReadStream<'a> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> Seek for GCPartReadStream<'a> {
|
impl<'a> Seek for PartitionGC<'a> {
|
||||||
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
|
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
|
||||||
self.offset = match pos {
|
self.offset = match pos {
|
||||||
SeekFrom::Start(v) => v,
|
SeekFrom::Start(v) => v,
|
||||||
SeekFrom::End(v) => (self.stable_stream_len()? as i64 + v) as u64,
|
SeekFrom::End(v) => self.stable_stream_len()?.saturating_add_signed(v),
|
||||||
SeekFrom::Current(v) => (self.offset as i64 + v) as u64,
|
SeekFrom::Current(v) => self.offset.saturating_add_signed(v),
|
||||||
};
|
};
|
||||||
let block = self.offset / SECTOR_SIZE as u64;
|
let block = self.offset / SECTOR_SIZE as u64;
|
||||||
if block as u32 != self.cur_block {
|
if block as u32 != self.cur_block {
|
||||||
|
@ -122,138 +154,94 @@ impl<'a> Seek for GCPartReadStream<'a> {
|
||||||
fn stream_position(&mut self) -> io::Result<u64> { Ok(self.offset) }
|
fn stream_position(&mut self) -> io::Result<u64> { Ok(self.offset) }
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> ReadStream for GCPartReadStream<'a> {
|
impl<'a> ReadStream for PartitionGC<'a> {
|
||||||
fn stable_stream_len(&mut self) -> io::Result<u64> { self.stream.stable_stream_len() }
|
fn stable_stream_len(&mut self) -> io::Result<u64> { self.stream.stable_stream_len() }
|
||||||
|
|
||||||
fn as_dyn(&mut self) -> &mut dyn ReadStream { self }
|
fn as_dyn(&mut self) -> &mut dyn ReadStream { self }
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> PartReadStream for GCPartReadStream<'a> {
|
impl<'a> PartitionBase for PartitionGC<'a> {
|
||||||
fn begin_file_stream(&mut self, node: &Node) -> io::Result<SharedWindowedReadStream> {
|
fn meta(&mut self) -> Result<Box<PartitionMeta>> {
|
||||||
assert_eq!(node.kind, NodeKind::File);
|
self.seek(SeekFrom::Start(0)).context("Seeking to partition header")?;
|
||||||
self.new_window(node.offset as u64, node.length as u64)
|
read_part_header(self, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn read_header(&mut self) -> Result<Box<dyn PartHeader>> {
|
fn open_file(&mut self, node: &Node) -> io::Result<SharedWindowedReadStream> {
|
||||||
self.seek(SeekFrom::Start(0)).context("Seeking to partition header")?;
|
assert_eq!(node.kind(), NodeKind::File);
|
||||||
Ok(Box::from(read_part_header(self)?))
|
self.new_window(node.offset(false), node.length(false))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn ideal_buffer_size(&self) -> usize { SECTOR_SIZE }
|
fn ideal_buffer_size(&self) -> usize { SECTOR_SIZE }
|
||||||
}
|
}
|
||||||
|
|
||||||
const BOOT_SIZE: usize = Header::STATIC_SIZE + PartitionHeader::STATIC_SIZE;
|
pub(crate) fn read_part_header<R>(reader: &mut R, is_wii: bool) -> Result<Box<PartitionMeta>>
|
||||||
const BI2_SIZE: usize = 0x2000;
|
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub(crate) struct GCPartition {
|
|
||||||
raw_boot: [u8; BOOT_SIZE],
|
|
||||||
raw_bi2: [u8; BI2_SIZE],
|
|
||||||
raw_apploader: Vec<u8>,
|
|
||||||
raw_fst: Vec<u8>,
|
|
||||||
raw_dol: Vec<u8>,
|
|
||||||
// Parsed
|
|
||||||
header: Header,
|
|
||||||
partition_header: PartitionHeader,
|
|
||||||
apploader_header: AppLoaderHeader,
|
|
||||||
root_node: NodeType,
|
|
||||||
dol_header: DolHeader,
|
|
||||||
}
|
|
||||||
|
|
||||||
fn read_part_header<R>(reader: &mut R) -> Result<GCPartition>
|
|
||||||
where R: Read + Seek + ?Sized {
|
where R: Read + Seek + ?Sized {
|
||||||
// boot.bin
|
// boot.bin
|
||||||
let raw_boot = <[u8; BOOT_SIZE]>::from_reader(reader).context("Reading boot.bin")?;
|
let raw_boot: [u8; BOOT_SIZE] = read_from(reader).context("Reading boot.bin")?;
|
||||||
let mut boot_bytes = raw_boot.as_slice();
|
let partition_header = PartitionHeader::ref_from(&raw_boot[size_of::<DiscHeader>()..]).unwrap();
|
||||||
let header = Header::from_reader(&mut boot_bytes).context("Parsing disc header")?;
|
|
||||||
let partition_header =
|
|
||||||
PartitionHeader::from_reader(&mut boot_bytes).context("Parsing partition header")?;
|
|
||||||
debug_assert_eq!(boot_bytes.len(), 0, "failed to consume boot.bin");
|
|
||||||
|
|
||||||
// bi2.bin
|
// bi2.bin
|
||||||
let raw_bi2 = <[u8; BI2_SIZE]>::from_reader(reader).context("Reading bi2.bin")?;
|
let raw_bi2: [u8; BI2_SIZE] = read_from(reader).context("Reading bi2.bin")?;
|
||||||
|
|
||||||
// apploader.bin
|
// apploader.bin
|
||||||
let mut raw_apploader =
|
let mut raw_apploader: Vec<u8> =
|
||||||
read_bytes(reader, AppLoaderHeader::STATIC_SIZE).context("Reading apploader header")?;
|
read_vec(reader, size_of::<AppLoaderHeader>()).context("Reading apploader header")?;
|
||||||
let apploader_header = AppLoaderHeader::from_reader(&mut raw_apploader.as_slice())
|
let apploader_header = AppLoaderHeader::ref_from(raw_apploader.as_slice()).unwrap();
|
||||||
.context("Parsing apploader header")?;
|
|
||||||
raw_apploader.resize(
|
raw_apploader.resize(
|
||||||
AppLoaderHeader::STATIC_SIZE
|
size_of::<AppLoaderHeader>()
|
||||||
+ apploader_header.size as usize
|
+ apploader_header.size.get() as usize
|
||||||
+ apploader_header.trailer_size as usize,
|
+ apploader_header.trailer_size.get() as usize,
|
||||||
0,
|
0,
|
||||||
);
|
);
|
||||||
reader
|
reader
|
||||||
.read_exact(&mut raw_apploader[AppLoaderHeader::STATIC_SIZE..])
|
.read_exact(&mut raw_apploader[size_of::<AppLoaderHeader>()..])
|
||||||
.context("Reading apploader")?;
|
.context("Reading apploader")?;
|
||||||
|
|
||||||
// fst.bin
|
// fst.bin
|
||||||
reader
|
reader
|
||||||
.seek(SeekFrom::Start(partition_header.fst_off as u64))
|
.seek(SeekFrom::Start(partition_header.fst_off(is_wii)))
|
||||||
.context("Seeking to FST offset")?;
|
.context("Seeking to FST offset")?;
|
||||||
let raw_fst = read_bytes(reader, partition_header.fst_sz as usize).with_context(|| {
|
let raw_fst: Vec<u8> = read_vec(reader, partition_header.fst_sz(is_wii) as usize)
|
||||||
format!(
|
.with_context(|| {
|
||||||
"Reading partition FST (offset {}, size {})",
|
format!(
|
||||||
partition_header.fst_off, partition_header.fst_sz
|
"Reading partition FST (offset {}, size {})",
|
||||||
)
|
partition_header.fst_off, partition_header.fst_sz
|
||||||
})?;
|
)
|
||||||
let root_node = read_fst(&mut Cursor::new(&*raw_fst))?;
|
})?;
|
||||||
|
|
||||||
// main.dol
|
// main.dol
|
||||||
reader
|
reader
|
||||||
.seek(SeekFrom::Start(partition_header.dol_off as u64))
|
.seek(SeekFrom::Start(partition_header.dol_off(is_wii)))
|
||||||
.context("Seeking to DOL offset")?;
|
.context("Seeking to DOL offset")?;
|
||||||
let mut raw_dol = read_bytes(reader, DolHeader::STATIC_SIZE).context("Reading DOL header")?;
|
let mut raw_dol: Vec<u8> =
|
||||||
let dol_header =
|
read_vec(reader, size_of::<DolHeader>()).context("Reading DOL header")?;
|
||||||
DolHeader::from_reader(&mut raw_dol.as_slice()).context("Parsing DOL header")?;
|
let dol_header = DolHeader::ref_from(raw_dol.as_slice()).unwrap();
|
||||||
let dol_size = dol_header
|
let dol_size = dol_header
|
||||||
.text_offs
|
.text_offs
|
||||||
.iter()
|
.iter()
|
||||||
.zip(&dol_header.text_sizes)
|
.zip(&dol_header.text_sizes)
|
||||||
.map(|(offs, size)| offs + size)
|
.map(|(offs, size)| offs.get() + size.get())
|
||||||
.chain(
|
.chain(
|
||||||
dol_header.data_offs.iter().zip(&dol_header.data_sizes).map(|(offs, size)| offs + size),
|
dol_header
|
||||||
|
.data_offs
|
||||||
|
.iter()
|
||||||
|
.zip(&dol_header.data_sizes)
|
||||||
|
.map(|(offs, size)| offs.get() + size.get()),
|
||||||
)
|
)
|
||||||
.max()
|
.max()
|
||||||
.unwrap_or(DolHeader::STATIC_SIZE as u32);
|
.unwrap_or(size_of::<DolHeader>() as u32);
|
||||||
raw_dol.resize(dol_size as usize, 0);
|
raw_dol.resize(dol_size as usize, 0);
|
||||||
reader.read_exact(&mut raw_dol[DolHeader::STATIC_SIZE..]).context("Reading DOL")?;
|
reader.read_exact(&mut raw_dol[size_of::<DolHeader>()..]).context("Reading DOL")?;
|
||||||
|
|
||||||
Ok(GCPartition {
|
Ok(Box::new(PartitionMeta {
|
||||||
raw_boot,
|
raw_boot,
|
||||||
raw_bi2,
|
raw_bi2,
|
||||||
raw_apploader,
|
raw_apploader,
|
||||||
raw_fst,
|
raw_fst,
|
||||||
raw_dol,
|
raw_dol,
|
||||||
header,
|
raw_ticket: None,
|
||||||
partition_header,
|
raw_tmd: None,
|
||||||
apploader_header,
|
raw_cert_chain: None,
|
||||||
root_node,
|
raw_h3_table: None,
|
||||||
dol_header,
|
}))
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PartHeader for GCPartition {
|
|
||||||
fn root_node(&self) -> &NodeType { &self.root_node }
|
|
||||||
|
|
||||||
fn find_node(&self, path: &str) -> Option<&NodeType> { find_node(&self.root_node, path) }
|
|
||||||
|
|
||||||
fn boot_bytes(&self) -> &[u8] { &self.raw_boot }
|
|
||||||
|
|
||||||
fn bi2_bytes(&self) -> &[u8] { &self.raw_bi2 }
|
|
||||||
|
|
||||||
fn apploader_bytes(&self) -> &[u8] { &self.raw_apploader }
|
|
||||||
|
|
||||||
fn fst_bytes(&self) -> &[u8] { &self.raw_fst }
|
|
||||||
|
|
||||||
fn dol_bytes(&self) -> &[u8] { &self.raw_dol }
|
|
||||||
|
|
||||||
fn disc_header(&self) -> &Header { &self.header }
|
|
||||||
|
|
||||||
fn partition_header(&self) -> &PartitionHeader { &self.partition_header }
|
|
||||||
|
|
||||||
fn apploader_header(&self) -> &AppLoaderHeader { &self.apploader_header }
|
|
||||||
|
|
||||||
fn dol_header(&self) -> &DolHeader { &self.dol_header }
|
|
||||||
}
|
}
|
||||||
|
|
589
src/disc/mod.rs
589
src/disc/mod.rs
|
@ -1,311 +1,332 @@
|
||||||
//! Disc type related logic (GameCube, Wii)
|
//! Disc type related logic (GameCube, Wii)
|
||||||
|
|
||||||
use std::{ffi::CStr, fmt::Debug, io, io::Read};
|
use std::{
|
||||||
|
borrow::Cow,
|
||||||
|
ffi::CStr,
|
||||||
|
fmt::{Debug, Display, Formatter},
|
||||||
|
io,
|
||||||
|
mem::size_of,
|
||||||
|
str::from_utf8,
|
||||||
|
};
|
||||||
|
|
||||||
|
use zerocopy::{big_endian::*, AsBytes, FromBytes, FromZeroes};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
disc::{gcn::DiscGCN, wii::DiscWii},
|
disc::{
|
||||||
fst::{Node, NodeType},
|
gcn::DiscGCN,
|
||||||
|
wii::{DiscWii, Ticket, TmdHeader, WiiPartitionHeader},
|
||||||
|
},
|
||||||
|
fst::Node,
|
||||||
io::DiscIO,
|
io::DiscIO,
|
||||||
|
static_assert,
|
||||||
streams::{ReadStream, SharedWindowedReadStream},
|
streams::{ReadStream, SharedWindowedReadStream},
|
||||||
util::reader::{skip_bytes, struct_size, FromReader},
|
util::reader::read_from,
|
||||||
Error, Result, ResultContext,
|
Error, Fst, OpenOptions, Result, ResultContext,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub(crate) mod gcn;
|
pub(crate) mod gcn;
|
||||||
pub(crate) mod wii;
|
pub(crate) mod wii;
|
||||||
|
|
||||||
|
pub(crate) const SECTOR_SIZE: usize = 0x8000;
|
||||||
|
|
||||||
/// Shared GameCube & Wii disc header
|
/// Shared GameCube & Wii disc header
|
||||||
#[derive(Clone, Debug, PartialEq)]
|
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||||||
pub struct Header {
|
#[repr(C, align(4))]
|
||||||
|
pub struct DiscHeader {
|
||||||
/// Game ID (e.g. GM8E01 for Metroid Prime)
|
/// Game ID (e.g. GM8E01 for Metroid Prime)
|
||||||
pub game_id: [u8; 6],
|
pub game_id: [u8; 6],
|
||||||
/// Used in multi-disc games
|
/// Used in multi-disc games
|
||||||
pub disc_num: u8,
|
pub disc_num: u8,
|
||||||
/// Disc version
|
/// Disc version
|
||||||
pub disc_version: u8,
|
pub disc_version: u8,
|
||||||
/// Audio streaming enabled (bool)
|
/// Audio streaming enabled
|
||||||
pub audio_streaming: u8,
|
pub audio_streaming: u8,
|
||||||
/// Audio streaming buffer size
|
/// Audio streaming buffer size
|
||||||
pub audio_stream_buf_size: u8,
|
pub audio_stream_buf_size: u8,
|
||||||
|
/// Padding
|
||||||
|
_pad1: [u8; 14],
|
||||||
/// If this is a Wii disc, this will be 0x5D1C9EA3
|
/// If this is a Wii disc, this will be 0x5D1C9EA3
|
||||||
pub wii_magic: u32,
|
pub wii_magic: U32,
|
||||||
/// If this is a GameCube disc, this will be 0xC2339F3D
|
/// If this is a GameCube disc, this will be 0xC2339F3D
|
||||||
pub gcn_magic: u32,
|
pub gcn_magic: U32,
|
||||||
/// Game title
|
/// Game title
|
||||||
pub game_title: String,
|
pub game_title: [u8; 64],
|
||||||
/// Disable hash verification
|
/// If 1, disc omits partition hashes
|
||||||
pub disable_hash_verification: u8,
|
pub no_partition_hashes: u8,
|
||||||
/// Disable disc encryption and H3 hash table loading and verification
|
/// If 1, disc omits partition encryption
|
||||||
pub disable_disc_enc: u8,
|
pub no_partition_encryption: u8,
|
||||||
|
/// Padding
|
||||||
|
_pad2: [u8; 926],
|
||||||
}
|
}
|
||||||
|
|
||||||
fn from_c_str(bytes: &[u8]) -> io::Result<String> {
|
static_assert!(size_of::<DiscHeader>() == 0x400);
|
||||||
CStr::from_bytes_until_nul(bytes)
|
|
||||||
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?
|
|
||||||
.to_str()
|
|
||||||
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
|
|
||||||
.map(|s| s.to_string())
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FromReader for Header {
|
impl DiscHeader {
|
||||||
type Args<'a> = ();
|
/// Game ID as a string.
|
||||||
|
pub fn game_id_str(&self) -> &str { from_utf8(&self.game_id).unwrap_or("[invalid]") }
|
||||||
|
|
||||||
const STATIC_SIZE: usize = 0x400;
|
/// Game title as a string.
|
||||||
|
pub fn game_title_str(&self) -> &str {
|
||||||
fn from_reader_args<R>(reader: &mut R, _args: Self::Args<'_>) -> io::Result<Self>
|
CStr::from_bytes_until_nul(&self.game_title)
|
||||||
where R: Read + ?Sized {
|
.ok()
|
||||||
let game_id = <[u8; 6]>::from_reader(reader)?;
|
.and_then(|c| c.to_str().ok())
|
||||||
let disc_num = u8::from_reader(reader)?;
|
.unwrap_or("[invalid]")
|
||||||
let disc_version = u8::from_reader(reader)?;
|
|
||||||
let audio_streaming = u8::from_reader(reader)?;
|
|
||||||
let audio_stream_buf_size = u8::from_reader(reader)?;
|
|
||||||
skip_bytes::<14, _>(reader)?; // padding
|
|
||||||
let wii_magic = u32::from_reader(reader)?;
|
|
||||||
let gcn_magic = u32::from_reader(reader)?;
|
|
||||||
let game_title = from_c_str(&<[u8; 64]>::from_reader(reader)?)?;
|
|
||||||
let disable_hash_verification = u8::from_reader(reader)?;
|
|
||||||
let disable_disc_enc = u8::from_reader(reader)?;
|
|
||||||
skip_bytes::<926, _>(reader)?; // padding
|
|
||||||
Ok(Self {
|
|
||||||
game_id,
|
|
||||||
disc_num,
|
|
||||||
disc_version,
|
|
||||||
audio_streaming,
|
|
||||||
audio_stream_buf_size,
|
|
||||||
wii_magic,
|
|
||||||
gcn_magic,
|
|
||||||
game_title,
|
|
||||||
disable_hash_verification,
|
|
||||||
disable_disc_enc,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Whether this is a GameCube disc.
|
||||||
|
pub fn is_gamecube(&self) -> bool { self.gcn_magic.get() == 0xC2339F3D }
|
||||||
|
|
||||||
|
/// Whether this is a Wii disc.
|
||||||
|
pub fn is_wii(&self) -> bool { self.wii_magic.get() == 0x5D1C9EA3 }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Partition header
|
/// Partition header
|
||||||
#[derive(Clone, Debug, PartialEq)]
|
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||||||
|
#[repr(C, align(4))]
|
||||||
pub struct PartitionHeader {
|
pub struct PartitionHeader {
|
||||||
/// Debug monitor offset
|
/// Debug monitor offset
|
||||||
pub debug_mon_off: u32,
|
pub debug_mon_off: U32,
|
||||||
/// Debug monitor load address
|
/// Debug monitor load address
|
||||||
pub debug_load_addr: u32,
|
pub debug_load_addr: U32,
|
||||||
|
/// Padding
|
||||||
|
_pad1: [u8; 0x18],
|
||||||
/// Offset to main DOL (Wii: >> 2)
|
/// Offset to main DOL (Wii: >> 2)
|
||||||
pub dol_off: u32,
|
pub dol_off: U32,
|
||||||
/// Offset to file system table (Wii: >> 2)
|
/// Offset to file system table (Wii: >> 2)
|
||||||
pub fst_off: u32,
|
pub fst_off: U32,
|
||||||
/// File system size
|
/// File system size (Wii: >> 2)
|
||||||
pub fst_sz: u32,
|
pub fst_sz: U32,
|
||||||
/// File system max size
|
/// File system max size (Wii: >> 2)
|
||||||
pub fst_max_sz: u32,
|
pub fst_max_sz: U32,
|
||||||
/// File system table load address
|
/// File system table load address
|
||||||
pub fst_memory_address: u32,
|
pub fst_memory_address: U32,
|
||||||
/// User position
|
/// User position
|
||||||
pub user_position: u32,
|
pub user_position: U32,
|
||||||
/// User size
|
/// User size
|
||||||
pub user_sz: u32,
|
pub user_sz: U32,
|
||||||
|
/// Padding
|
||||||
|
_pad2: [u8; 4],
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FromReader for PartitionHeader {
|
static_assert!(size_of::<PartitionHeader>() == 0x40);
|
||||||
type Args<'a> = ();
|
|
||||||
|
|
||||||
const STATIC_SIZE: usize = struct_size([
|
impl PartitionHeader {
|
||||||
u32::STATIC_SIZE, // debug_mon_off
|
pub fn dol_off(&self, is_wii: bool) -> u64 {
|
||||||
u32::STATIC_SIZE, // debug_load_addr
|
if is_wii {
|
||||||
0x18, // padding
|
self.dol_off.get() as u64 * 4
|
||||||
u32::STATIC_SIZE, // dol_off
|
} else {
|
||||||
u32::STATIC_SIZE, // fst_off
|
self.dol_off.get() as u64
|
||||||
u32::STATIC_SIZE, // fst_sz
|
}
|
||||||
u32::STATIC_SIZE, // fst_max_sz
|
}
|
||||||
u32::STATIC_SIZE, // fst_memory_address
|
|
||||||
u32::STATIC_SIZE, // user_position
|
|
||||||
u32::STATIC_SIZE, // user_sz
|
|
||||||
4, // padding
|
|
||||||
]);
|
|
||||||
|
|
||||||
fn from_reader_args<R>(reader: &mut R, _args: Self::Args<'_>) -> io::Result<Self>
|
pub fn fst_off(&self, is_wii: bool) -> u64 {
|
||||||
where R: Read + ?Sized {
|
if is_wii {
|
||||||
let debug_mon_off = u32::from_reader(reader)?;
|
self.fst_off.get() as u64 * 4
|
||||||
let debug_load_addr = u32::from_reader(reader)?;
|
} else {
|
||||||
skip_bytes::<0x18, _>(reader)?; // padding
|
self.fst_off.get() as u64
|
||||||
let dol_off = u32::from_reader(reader)?;
|
}
|
||||||
let fst_off = u32::from_reader(reader)?;
|
}
|
||||||
let fst_sz = u32::from_reader(reader)?;
|
|
||||||
let fst_max_sz = u32::from_reader(reader)?;
|
pub fn fst_sz(&self, is_wii: bool) -> u64 {
|
||||||
let fst_memory_address = u32::from_reader(reader)?;
|
if is_wii {
|
||||||
let user_position = u32::from_reader(reader)?;
|
self.fst_sz.get() as u64 * 4
|
||||||
let user_sz = u32::from_reader(reader)?;
|
} else {
|
||||||
skip_bytes::<4, _>(reader)?; // padding
|
self.fst_sz.get() as u64
|
||||||
Ok(Self {
|
}
|
||||||
debug_mon_off,
|
}
|
||||||
debug_load_addr,
|
|
||||||
dol_off,
|
pub fn fst_max_sz(&self, is_wii: bool) -> u64 {
|
||||||
fst_off,
|
if is_wii {
|
||||||
fst_sz,
|
self.fst_max_sz.get() as u64 * 4
|
||||||
fst_max_sz,
|
} else {
|
||||||
fst_memory_address,
|
self.fst_max_sz.get() as u64
|
||||||
user_position,
|
}
|
||||||
user_sz,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
/// Apploader header
|
||||||
|
#[derive(Debug, PartialEq, Clone, FromBytes, FromZeroes, AsBytes)]
|
||||||
|
#[repr(C, align(4))]
|
||||||
pub struct AppLoaderHeader {
|
pub struct AppLoaderHeader {
|
||||||
pub date: String,
|
/// Apploader build date
|
||||||
pub entry_point: u32,
|
pub date: [u8; 16],
|
||||||
pub size: u32,
|
/// Entry point
|
||||||
pub trailer_size: u32,
|
pub entry_point: U32,
|
||||||
|
/// Apploader size
|
||||||
|
pub size: U32,
|
||||||
|
/// Apploader trailer size
|
||||||
|
pub trailer_size: U32,
|
||||||
|
/// Padding
|
||||||
|
_pad: [u8; 4],
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FromReader for AppLoaderHeader {
|
impl AppLoaderHeader {
|
||||||
type Args<'a> = ();
|
/// Apploader build date as a string
|
||||||
|
pub fn date_str(&self) -> Option<&str> {
|
||||||
const STATIC_SIZE: usize = struct_size([
|
CStr::from_bytes_until_nul(&self.date).ok().and_then(|c| c.to_str().ok())
|
||||||
16, // date
|
|
||||||
u32::STATIC_SIZE, // entry_point
|
|
||||||
u32::STATIC_SIZE, // size
|
|
||||||
u32::STATIC_SIZE, // trailer_size
|
|
||||||
4, // padding
|
|
||||||
]);
|
|
||||||
|
|
||||||
fn from_reader_args<R>(reader: &mut R, _args: Self::Args<'_>) -> io::Result<Self>
|
|
||||||
where R: Read + ?Sized {
|
|
||||||
let date = from_c_str(&<[u8; 16]>::from_reader(reader)?)?;
|
|
||||||
let entry_point = u32::from_reader(reader)?;
|
|
||||||
let size = u32::from_reader(reader)?;
|
|
||||||
let trailer_size = u32::from_reader(reader)?;
|
|
||||||
skip_bytes::<4, _>(reader)?; // padding
|
|
||||||
Ok(Self { date, entry_point, size, trailer_size })
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Maximum number of text sections in a DOL
|
||||||
pub const DOL_MAX_TEXT_SECTIONS: usize = 7;
|
pub const DOL_MAX_TEXT_SECTIONS: usize = 7;
|
||||||
|
/// Maximum number of data sections in a DOL
|
||||||
pub const DOL_MAX_DATA_SECTIONS: usize = 11;
|
pub const DOL_MAX_DATA_SECTIONS: usize = 11;
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
/// DOL header
|
||||||
|
#[derive(Debug, Clone, FromBytes, FromZeroes)]
|
||||||
pub struct DolHeader {
|
pub struct DolHeader {
|
||||||
pub text_offs: [u32; DOL_MAX_TEXT_SECTIONS],
|
/// Text section offsets
|
||||||
pub data_offs: [u32; DOL_MAX_DATA_SECTIONS],
|
pub text_offs: [U32; DOL_MAX_TEXT_SECTIONS],
|
||||||
pub text_addrs: [u32; DOL_MAX_TEXT_SECTIONS],
|
/// Data section offsets
|
||||||
pub data_addrs: [u32; DOL_MAX_DATA_SECTIONS],
|
pub data_offs: [U32; DOL_MAX_DATA_SECTIONS],
|
||||||
pub text_sizes: [u32; DOL_MAX_TEXT_SECTIONS],
|
/// Text section addresses
|
||||||
pub data_sizes: [u32; DOL_MAX_DATA_SECTIONS],
|
pub text_addrs: [U32; DOL_MAX_TEXT_SECTIONS],
|
||||||
pub bss_addr: u32,
|
/// Data section addresses
|
||||||
pub bss_size: u32,
|
pub data_addrs: [U32; DOL_MAX_DATA_SECTIONS],
|
||||||
pub entry_point: u32,
|
/// Text section sizes
|
||||||
|
pub text_sizes: [U32; DOL_MAX_TEXT_SECTIONS],
|
||||||
|
/// Data section sizes
|
||||||
|
pub data_sizes: [U32; DOL_MAX_DATA_SECTIONS],
|
||||||
|
/// BSS address
|
||||||
|
pub bss_addr: U32,
|
||||||
|
/// BSS size
|
||||||
|
pub bss_size: U32,
|
||||||
|
/// Entry point
|
||||||
|
pub entry_point: U32,
|
||||||
|
/// Padding
|
||||||
|
_pad: [u8; 0x1C],
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FromReader for DolHeader {
|
static_assert!(size_of::<DolHeader>() == 0x100);
|
||||||
type Args<'a> = ();
|
|
||||||
|
|
||||||
const STATIC_SIZE: usize = 0x100;
|
|
||||||
|
|
||||||
fn from_reader_args<R>(reader: &mut R, _args: Self::Args<'_>) -> io::Result<Self>
|
|
||||||
where R: Read + ?Sized {
|
|
||||||
let result = Self {
|
|
||||||
text_offs: <_>::from_reader(reader)?,
|
|
||||||
data_offs: <_>::from_reader(reader)?,
|
|
||||||
text_addrs: <_>::from_reader(reader)?,
|
|
||||||
data_addrs: <_>::from_reader(reader)?,
|
|
||||||
text_sizes: <_>::from_reader(reader)?,
|
|
||||||
data_sizes: <_>::from_reader(reader)?,
|
|
||||||
bss_addr: <_>::from_reader(reader)?,
|
|
||||||
bss_size: <_>::from_reader(reader)?,
|
|
||||||
entry_point: <_>::from_reader(reader)?,
|
|
||||||
};
|
|
||||||
skip_bytes::<0x1C, _>(reader)?; // padding
|
|
||||||
Ok(result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
/// Partition type
|
||||||
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
|
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
|
||||||
pub enum PartitionType {
|
pub enum PartitionKind {
|
||||||
Data,
|
Data,
|
||||||
Update,
|
Update,
|
||||||
Channel,
|
Channel,
|
||||||
|
Other(u32),
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) const SECTOR_SIZE: usize = 0x8000;
|
impl Display for PartitionKind {
|
||||||
|
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||||
|
match self {
|
||||||
|
Self::Data => write!(f, "Data"),
|
||||||
|
Self::Update => write!(f, "Update"),
|
||||||
|
Self::Channel => write!(f, "Channel"),
|
||||||
|
Self::Other(v) => {
|
||||||
|
let bytes = v.to_be_bytes();
|
||||||
|
write!(f, "Other ({:08X}, {})", v, String::from_utf8_lossy(&bytes))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PartitionKind {
|
||||||
|
/// Returns the directory name for the partition kind.
|
||||||
|
pub fn dir_name(&self) -> Cow<str> {
|
||||||
|
match self {
|
||||||
|
Self::Data => Cow::Borrowed("DATA"),
|
||||||
|
Self::Update => Cow::Borrowed("UPDATE"),
|
||||||
|
Self::Channel => Cow::Borrowed("CHANNEL"),
|
||||||
|
Self::Other(v) => {
|
||||||
|
let bytes = v.to_be_bytes();
|
||||||
|
Cow::Owned(format!("P-{}", String::from_utf8_lossy(&bytes)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<u32> for PartitionKind {
|
||||||
|
fn from(v: u32) -> Self {
|
||||||
|
match v {
|
||||||
|
0 => Self::Data,
|
||||||
|
1 => Self::Update,
|
||||||
|
2 => Self::Channel,
|
||||||
|
v => Self::Other(v),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Information about a GameCube or Wii disc partition.
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct PartitionInfo {
|
||||||
|
/// Partition group index
|
||||||
|
pub group_index: u32,
|
||||||
|
/// Partition index within the group
|
||||||
|
pub part_index: u32,
|
||||||
|
/// Partition offset within disc
|
||||||
|
pub part_offset: u64,
|
||||||
|
/// Partition kind
|
||||||
|
pub kind: PartitionKind,
|
||||||
|
/// Data offset within partition
|
||||||
|
pub data_offset: u64,
|
||||||
|
/// Data size
|
||||||
|
pub data_size: u64,
|
||||||
|
/// Raw Wii partition header
|
||||||
|
pub header: Option<WiiPartitionHeader>,
|
||||||
|
/// Lagged Fibonacci generator seed (for junk data)
|
||||||
|
pub lfg_seed: [u8; 4],
|
||||||
|
// /// Junk data start offset
|
||||||
|
// pub junk_start: u64,
|
||||||
|
}
|
||||||
|
|
||||||
/// Contains a disc's header & partition information.
|
/// Contains a disc's header & partition information.
|
||||||
pub trait DiscBase: Send + Sync {
|
pub trait DiscBase: Send + Sync {
|
||||||
/// Retrieves the disc's header.
|
/// Retrieves the disc's header.
|
||||||
fn get_header(&self) -> &Header;
|
fn header(&self) -> &DiscHeader;
|
||||||
|
|
||||||
/// Opens a new partition read stream for the first data partition.
|
/// A list of partitions on the disc.
|
||||||
|
fn partitions(&self) -> Vec<PartitionInfo>;
|
||||||
|
|
||||||
|
/// Opens a new, decrypted partition read stream for the specified partition index.
|
||||||
///
|
///
|
||||||
/// `validate_hashes`: Validate Wii disc hashes while reading (slow!)
|
/// `validate_hashes`: Validate Wii disc hashes while reading (slow!)
|
||||||
///
|
fn open_partition<'a>(
|
||||||
/// # Examples
|
|
||||||
///
|
|
||||||
/// Basic usage:
|
|
||||||
/// ```no_run
|
|
||||||
/// use nod::{
|
|
||||||
/// disc::new_disc_base,
|
|
||||||
/// io::{new_disc_io, DiscIOOptions},
|
|
||||||
/// };
|
|
||||||
///
|
|
||||||
/// # fn main() -> nod::Result<()> {
|
|
||||||
/// let options = DiscIOOptions::default();
|
|
||||||
/// let mut disc_io = new_disc_io("path/to/file.iso".as_ref(), &options)?;
|
|
||||||
/// let disc_base = new_disc_base(disc_io.as_mut())?;
|
|
||||||
/// let mut partition = disc_base.get_data_partition(disc_io.as_mut(), false)?;
|
|
||||||
/// # Ok(())
|
|
||||||
/// # }
|
|
||||||
/// ```
|
|
||||||
fn get_data_partition<'a>(
|
|
||||||
&self,
|
&self,
|
||||||
disc_io: &'a mut dyn DiscIO,
|
disc_io: &'a dyn DiscIO,
|
||||||
validate_hashes: bool,
|
index: usize,
|
||||||
) -> Result<Box<dyn PartReadStream + 'a>>;
|
options: &OpenOptions,
|
||||||
|
) -> Result<Box<dyn PartitionBase + 'a>>;
|
||||||
|
|
||||||
/// Opens a new partition read stream for the first partition matching
|
/// Opens a new partition read stream for the first partition matching
|
||||||
/// the specified type.
|
/// the specified type.
|
||||||
///
|
///
|
||||||
/// `validate_hashes`: Validate Wii disc hashes while reading (slow!)
|
/// `validate_hashes`: Validate Wii disc hashes while reading (slow!)
|
||||||
fn get_partition<'a>(
|
fn open_partition_kind<'a>(
|
||||||
&self,
|
&self,
|
||||||
disc_io: &'a mut dyn DiscIO,
|
disc_io: &'a dyn DiscIO,
|
||||||
part_type: PartitionType,
|
part_type: PartitionKind,
|
||||||
validate_hashes: bool,
|
options: &OpenOptions,
|
||||||
) -> Result<Box<dyn PartReadStream + 'a>>;
|
) -> Result<Box<dyn PartitionBase + 'a>>;
|
||||||
|
|
||||||
|
/// The disc's size in bytes, or an estimate if not stored by the format.
|
||||||
|
fn disc_size(&self) -> u64;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a new [`DiscBase`] instance.
|
/// Creates a new [`DiscBase`] instance.
|
||||||
///
|
pub fn new(disc_io: &mut dyn DiscIO) -> Result<Box<dyn DiscBase>> {
|
||||||
/// # Examples
|
let disc_size = disc_io.disc_size();
|
||||||
///
|
let mut stream = disc_io.open()?;
|
||||||
/// Basic usage:
|
let header: DiscHeader = read_from(stream.as_mut()).context("Reading disc header")?;
|
||||||
/// ```no_run
|
if header.is_wii() {
|
||||||
/// use nod::{
|
Ok(Box::new(DiscWii::new(stream.as_mut(), header, disc_size)?))
|
||||||
/// disc::new_disc_base,
|
} else if header.is_gamecube() {
|
||||||
/// io::{new_disc_io, DiscIOOptions},
|
Ok(Box::new(DiscGCN::new(stream.as_mut(), header, disc_size)?))
|
||||||
/// };
|
|
||||||
///
|
|
||||||
/// # fn main() -> nod::Result<()> {
|
|
||||||
/// let options = DiscIOOptions::default();
|
|
||||||
/// let mut disc_io = new_disc_io("path/to/file.iso".as_ref(), &options)?;
|
|
||||||
/// let disc_base = new_disc_base(disc_io.as_mut())?;
|
|
||||||
/// disc_base.get_header();
|
|
||||||
/// # Ok(())
|
|
||||||
/// # }
|
|
||||||
/// ```
|
|
||||||
pub fn new_disc_base(disc_io: &mut dyn DiscIO) -> Result<Box<dyn DiscBase>> {
|
|
||||||
let mut stream = disc_io.begin_read_stream(0).context("Opening disc stream")?;
|
|
||||||
let header_bytes =
|
|
||||||
<[u8; Header::STATIC_SIZE]>::from_reader(&mut stream).context("Reading disc header")?;
|
|
||||||
let header =
|
|
||||||
Header::from_reader(&mut header_bytes.as_slice()).context("Parsing disc header")?;
|
|
||||||
if header.wii_magic == 0x5D1C9EA3 {
|
|
||||||
Ok(Box::from(DiscWii::new(stream.as_mut(), header)?))
|
|
||||||
} else if header.gcn_magic == 0xC2339F3D {
|
|
||||||
Ok(Box::from(DiscGCN::new(header)?))
|
|
||||||
} else {
|
} else {
|
||||||
Err(Error::DiscFormat(format!("Invalid GC/Wii magic: {:#010X}", header.wii_magic)))
|
Err(Error::DiscFormat(format!(
|
||||||
|
"Invalid GC/Wii magic: {:#010X}/{:#010X}",
|
||||||
|
header.gcn_magic.get(),
|
||||||
|
header.wii_magic.get()
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// An open read stream for a disc partition.
|
/// An open read stream for a disc partition.
|
||||||
pub trait PartReadStream: ReadStream {
|
pub trait PartitionBase: ReadStream {
|
||||||
|
/// Reads the partition header and file system table.
|
||||||
|
fn meta(&mut self) -> Result<Box<PartitionMeta>>;
|
||||||
|
|
||||||
/// Seeks the read stream to the specified file system node
|
/// Seeks the read stream to the specified file system node
|
||||||
/// and returns a windowed stream.
|
/// and returns a windowed stream.
|
||||||
///
|
///
|
||||||
|
@ -315,22 +336,17 @@ pub trait PartReadStream: ReadStream {
|
||||||
/// ```no_run
|
/// ```no_run
|
||||||
/// use std::io::Read;
|
/// use std::io::Read;
|
||||||
///
|
///
|
||||||
/// use nod::{
|
/// use nod::{Disc, PartitionKind};
|
||||||
/// disc::{new_disc_base, PartHeader},
|
|
||||||
/// fst::NodeType,
|
|
||||||
/// io::{new_disc_io, DiscIOOptions},
|
|
||||||
/// };
|
|
||||||
///
|
///
|
||||||
/// fn main() -> nod::Result<()> {
|
/// fn main() -> nod::Result<()> {
|
||||||
/// let options = DiscIOOptions::default();
|
/// let disc = Disc::new("path/to/file.iso")?;
|
||||||
/// let mut disc_io = new_disc_io("path/to/file.iso".as_ref(), &options)?;
|
/// let mut partition = disc.open_partition_kind(PartitionKind::Data)?;
|
||||||
/// let disc_base = new_disc_base(disc_io.as_mut())?;
|
/// let meta = partition.meta()?;
|
||||||
/// let mut partition = disc_base.get_data_partition(disc_io.as_mut(), false)?;
|
/// let fst = meta.fst()?;
|
||||||
/// let header = partition.read_header()?;
|
/// if let Some((_, node)) = fst.find("/MP3/Worlds.txt") {
|
||||||
/// if let Some(NodeType::File(node)) = header.find_node("/MP3/Worlds.txt") {
|
|
||||||
/// let mut s = String::new();
|
/// let mut s = String::new();
|
||||||
/// partition
|
/// partition
|
||||||
/// .begin_file_stream(node)
|
/// .open_file(node)
|
||||||
/// .expect("Failed to open file stream")
|
/// .expect("Failed to open file stream")
|
||||||
/// .read_to_string(&mut s)
|
/// .read_to_string(&mut s)
|
||||||
/// .expect("Failed to read file");
|
/// .expect("Failed to read file");
|
||||||
|
@ -339,10 +355,7 @@ pub trait PartReadStream: ReadStream {
|
||||||
/// Ok(())
|
/// Ok(())
|
||||||
/// }
|
/// }
|
||||||
/// ```
|
/// ```
|
||||||
fn begin_file_stream(&mut self, node: &Node) -> io::Result<SharedWindowedReadStream>;
|
fn open_file(&mut self, node: &Node) -> io::Result<SharedWindowedReadStream>;
|
||||||
|
|
||||||
/// Reads the partition header and file system table.
|
|
||||||
fn read_header(&mut self) -> Result<Box<dyn PartHeader>>;
|
|
||||||
|
|
||||||
/// The ideal size for buffered reads from this partition.
|
/// The ideal size for buffered reads from this partition.
|
||||||
/// GameCube discs have a data block size of 0x8000,
|
/// GameCube discs have a data block size of 0x8000,
|
||||||
|
@ -350,64 +363,60 @@ pub trait PartReadStream: ReadStream {
|
||||||
fn ideal_buffer_size(&self) -> usize;
|
fn ideal_buffer_size(&self) -> usize;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Disc partition header with file system table.
|
/// Size of the disc header and partition header (boot.bin)
|
||||||
pub trait PartHeader: Debug + Send + Sync {
|
pub const BOOT_SIZE: usize = size_of::<DiscHeader>() + size_of::<PartitionHeader>();
|
||||||
/// The root node for the filesystem.
|
/// Size of the debug and region information (bi2.bin)
|
||||||
fn root_node(&self) -> &NodeType;
|
pub const BI2_SIZE: usize = 0x2000;
|
||||||
|
|
||||||
/// Finds a particular file or directory by path.
|
|
||||||
///
|
|
||||||
/// # Examples
|
|
||||||
///
|
|
||||||
/// Basic usage:
|
|
||||||
/// ```no_run
|
|
||||||
/// use nod::{
|
|
||||||
/// disc::{new_disc_base, PartHeader},
|
|
||||||
/// fst::NodeType,
|
|
||||||
/// io::{new_disc_io, DiscIOOptions},
|
|
||||||
/// };
|
|
||||||
///
|
|
||||||
/// fn main() -> nod::Result<()> {
|
|
||||||
/// let options = DiscIOOptions::default();
|
|
||||||
/// let mut disc_io = new_disc_io("path/to/file.iso".as_ref(), &options)?;
|
|
||||||
/// let disc_base = new_disc_base(disc_io.as_mut())?;
|
|
||||||
/// let mut partition = disc_base.get_data_partition(disc_io.as_mut(), false)?;
|
|
||||||
/// let header = partition.read_header()?;
|
|
||||||
/// if let Some(NodeType::File(node)) = header.find_node("/MP1/Metroid1.pak") {
|
|
||||||
/// println!("{}", node.name);
|
|
||||||
/// }
|
|
||||||
/// if let Some(NodeType::Directory(node, children)) = header.find_node("/MP1") {
|
|
||||||
/// println!("Number of files: {}", children.len());
|
|
||||||
/// }
|
|
||||||
/// Ok(())
|
|
||||||
/// }
|
|
||||||
/// ```
|
|
||||||
fn find_node(&self, path: &str) -> Option<&NodeType>;
|
|
||||||
|
|
||||||
|
/// Disc partition metadata
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct PartitionMeta {
|
||||||
/// Disc and partition header (boot.bin)
|
/// Disc and partition header (boot.bin)
|
||||||
fn boot_bytes(&self) -> &[u8];
|
pub raw_boot: [u8; BOOT_SIZE],
|
||||||
|
|
||||||
/// Debug and region information (bi2.bin)
|
/// Debug and region information (bi2.bin)
|
||||||
fn bi2_bytes(&self) -> &[u8];
|
pub raw_bi2: [u8; BI2_SIZE],
|
||||||
|
|
||||||
/// Apploader (apploader.bin)
|
/// Apploader (apploader.bin)
|
||||||
fn apploader_bytes(&self) -> &[u8];
|
pub raw_apploader: Vec<u8>,
|
||||||
|
|
||||||
/// File system table (fst.bin)
|
/// File system table (fst.bin)
|
||||||
fn fst_bytes(&self) -> &[u8];
|
pub raw_fst: Vec<u8>,
|
||||||
|
|
||||||
/// Main binary (main.dol)
|
/// Main binary (main.dol)
|
||||||
fn dol_bytes(&self) -> &[u8];
|
pub raw_dol: Vec<u8>,
|
||||||
|
/// Ticket (ticket.bin, Wii only)
|
||||||
/// Disc header
|
pub raw_ticket: Option<Vec<u8>>,
|
||||||
fn disc_header(&self) -> &Header;
|
/// TMD (tmd.bin, Wii only)
|
||||||
|
pub raw_tmd: Option<Vec<u8>>,
|
||||||
/// Partition header
|
/// Certificate chain (cert.bin, Wii only)
|
||||||
fn partition_header(&self) -> &PartitionHeader;
|
pub raw_cert_chain: Option<Vec<u8>>,
|
||||||
|
/// H3 hash table (h3.bin, Wii only)
|
||||||
/// Apploader header
|
pub raw_h3_table: Option<Vec<u8>>,
|
||||||
fn apploader_header(&self) -> &AppLoaderHeader;
|
|
||||||
|
|
||||||
/// DOL header
|
|
||||||
fn dol_header(&self) -> &DolHeader;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl PartitionMeta {
|
||||||
|
pub fn header(&self) -> &DiscHeader {
|
||||||
|
DiscHeader::ref_from(&self.raw_boot[..size_of::<DiscHeader>()]).unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn partition_header(&self) -> &PartitionHeader {
|
||||||
|
PartitionHeader::ref_from(&self.raw_boot[size_of::<DiscHeader>()..]).unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn apploader_header(&self) -> &AppLoaderHeader {
|
||||||
|
AppLoaderHeader::ref_from_prefix(&self.raw_apploader).unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn fst(&self) -> Result<Fst, &'static str> { Fst::new(&self.raw_fst) }
|
||||||
|
|
||||||
|
pub fn dol_header(&self) -> &DolHeader { DolHeader::ref_from_prefix(&self.raw_dol).unwrap() }
|
||||||
|
|
||||||
|
pub fn ticket(&self) -> Option<&Ticket> {
|
||||||
|
self.raw_ticket.as_ref().and_then(|v| Ticket::ref_from(v))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn tmd_header(&self) -> Option<&TmdHeader> {
|
||||||
|
self.raw_tmd.as_ref().and_then(|v| TmdHeader::ref_from_prefix(v))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const MINI_DVD_SIZE: u64 = 1_459_978_240;
|
||||||
|
pub const SL_DVD_SIZE: u64 = 4_699_979_776;
|
||||||
|
pub const DL_DVD_SIZE: u64 = 8_511_160_320;
|
||||||
|
|
768
src/disc/wii.rs
768
src/disc/wii.rs
|
@ -1,443 +1,442 @@
|
||||||
use std::{
|
use std::{
|
||||||
io,
|
io,
|
||||||
io::{Read, Seek, SeekFrom},
|
io::{Read, Seek, SeekFrom},
|
||||||
|
mem::size_of,
|
||||||
};
|
};
|
||||||
|
|
||||||
use aes::{
|
|
||||||
cipher::{block_padding::NoPadding, BlockDecryptMut, KeyIvInit},
|
|
||||||
Aes128, Block,
|
|
||||||
};
|
|
||||||
use sha1::{digest, Digest, Sha1};
|
use sha1::{digest, Digest, Sha1};
|
||||||
|
use zerocopy::{big_endian::*, AsBytes, FromBytes, FromZeroes};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
array_ref,
|
array_ref,
|
||||||
disc::{
|
disc::{
|
||||||
AppLoaderHeader, DiscBase, DiscIO, DolHeader, Header, PartHeader, PartReadStream,
|
gcn::read_part_header, DiscBase, DiscHeader, DiscIO, PartitionBase, PartitionInfo,
|
||||||
PartitionHeader, PartitionType, SECTOR_SIZE,
|
PartitionKind, PartitionMeta, DL_DVD_SIZE, MINI_DVD_SIZE, SECTOR_SIZE, SL_DVD_SIZE,
|
||||||
},
|
},
|
||||||
fst::{find_node, Node, NodeKind, NodeType},
|
fst::{Node, NodeKind},
|
||||||
streams::{wrap_windowed, OwningWindowedReadStream, ReadStream, SharedWindowedReadStream},
|
io::{aes_decrypt, KeyBytes},
|
||||||
|
static_assert,
|
||||||
|
streams::{wrap_windowed, ReadStream, SharedWindowedReadStream},
|
||||||
util::{
|
util::{
|
||||||
div_rem,
|
div_rem,
|
||||||
reader::{skip_bytes, struct_size, FromReader},
|
reader::{read_from, read_vec},
|
||||||
},
|
},
|
||||||
Error, Result, ResultContext,
|
Error, OpenOptions, PartitionHeader, Result, ResultContext,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub(crate) const HASHES_SIZE: usize = 0x400;
|
pub(crate) const HASHES_SIZE: usize = 0x400;
|
||||||
pub(crate) const BLOCK_SIZE: usize = SECTOR_SIZE - HASHES_SIZE; // 0x7C00
|
pub(crate) const BLOCK_SIZE: usize = SECTOR_SIZE - HASHES_SIZE; // 0x7C00
|
||||||
|
|
||||||
/// AES-128-CBC decryptor
|
|
||||||
type Aes128Cbc = cbc::Decryptor<Aes128>;
|
|
||||||
|
|
||||||
#[rustfmt::skip]
|
#[rustfmt::skip]
|
||||||
const COMMON_KEYS: [[u8; 16]; 2] = [
|
const COMMON_KEYS: [KeyBytes; 2] = [
|
||||||
/* Normal */
|
/* Normal */
|
||||||
[0xeb, 0xe4, 0x2a, 0x22, 0x5e, 0x85, 0x93, 0xe4, 0x48, 0xd9, 0xc5, 0x45, 0x73, 0x81, 0xaa, 0xf7],
|
[0xeb, 0xe4, 0x2a, 0x22, 0x5e, 0x85, 0x93, 0xe4, 0x48, 0xd9, 0xc5, 0x45, 0x73, 0x81, 0xaa, 0xf7],
|
||||||
/* Korean */
|
/* Korean */
|
||||||
[0x63, 0xb8, 0x2b, 0xb4, 0xf4, 0x61, 0x4e, 0x2e, 0x13, 0xf2, 0xfe, 0xfb, 0xba, 0x4c, 0x9b, 0x7e],
|
[0x63, 0xb8, 0x2b, 0xb4, 0xf4, 0x61, 0x4e, 0x2e, 0x13, 0xf2, 0xfe, 0xfb, 0xba, 0x4c, 0x9b, 0x7e],
|
||||||
];
|
];
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
|
#[derive(Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||||||
enum SigType {
|
#[repr(C, align(4))]
|
||||||
Rsa4096,
|
struct WiiPartEntry {
|
||||||
Rsa2048,
|
offset: U32,
|
||||||
EllipticalCurve,
|
kind: U32,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FromReader for SigType {
|
static_assert!(size_of::<WiiPartEntry>() == 8);
|
||||||
type Args<'a> = ();
|
|
||||||
|
|
||||||
const STATIC_SIZE: usize = u32::STATIC_SIZE;
|
impl WiiPartEntry {
|
||||||
|
fn offset(&self) -> u64 { (self.offset.get() as u64) << 2 }
|
||||||
fn from_reader_args<R>(reader: &mut R, _args: Self::Args<'_>) -> io::Result<Self>
|
|
||||||
where R: Read + ?Sized {
|
|
||||||
match u32::from_reader(reader)? {
|
|
||||||
0x00010000 => Ok(SigType::Rsa4096),
|
|
||||||
0x00010001 => Ok(SigType::Rsa2048),
|
|
||||||
0x00010002 => Ok(SigType::EllipticalCurve),
|
|
||||||
_ => Err(io::Error::new(io::ErrorKind::InvalidData, "invalid signature type")),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SigType {
|
|
||||||
fn size(self) -> usize {
|
|
||||||
match self {
|
|
||||||
SigType::Rsa4096 => 512,
|
|
||||||
SigType::Rsa2048 => 256,
|
|
||||||
SigType::EllipticalCurve => 64,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
|
|
||||||
enum KeyType {
|
|
||||||
Rsa4096,
|
|
||||||
Rsa2048,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FromReader for KeyType {
|
|
||||||
type Args<'a> = ();
|
|
||||||
|
|
||||||
const STATIC_SIZE: usize = u32::STATIC_SIZE;
|
|
||||||
|
|
||||||
fn from_reader_args<R>(reader: &mut R, _args: Self::Args<'_>) -> io::Result<Self>
|
|
||||||
where R: Read + ?Sized {
|
|
||||||
match u32::from_reader(reader)? {
|
|
||||||
0x00000000 => Ok(KeyType::Rsa4096),
|
|
||||||
0x00000001 => Ok(KeyType::Rsa2048),
|
|
||||||
_ => Err(io::Error::new(io::ErrorKind::InvalidData, "invalid key type")),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl KeyType {
|
|
||||||
fn size(self) -> usize {
|
|
||||||
match self {
|
|
||||||
KeyType::Rsa4096 => 512,
|
|
||||||
KeyType::Rsa2048 => 256,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, PartialEq)]
|
#[derive(Debug, PartialEq)]
|
||||||
struct WiiPart {
|
pub(crate) struct WiiPartInfo {
|
||||||
// #[br(map = |x: u32| (x as u64) << 2)]
|
pub(crate) group_idx: u32,
|
||||||
part_data_off: u64,
|
pub(crate) part_idx: u32,
|
||||||
part_type: PartitionType,
|
pub(crate) offset: u64,
|
||||||
// #[br(restore_position, args(part_data_off))]
|
pub(crate) kind: PartitionKind,
|
||||||
part_header: WiiPartitionHeader,
|
pub(crate) header: WiiPartitionHeader,
|
||||||
|
pub(crate) junk_id: [u8; 4],
|
||||||
|
pub(crate) junk_start: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, PartialEq)]
|
const WII_PART_GROUP_OFF: u64 = 0x40000;
|
||||||
struct WiiPartInfo {
|
|
||||||
// #[br(seek_before = SeekFrom::Start(0x40000))]
|
#[derive(Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||||||
part_count: u32,
|
#[repr(C, align(4))]
|
||||||
// #[br(map = |x: u32| (x as u64) << 2)]
|
struct WiiPartGroup {
|
||||||
part_info_off: u64,
|
part_count: U32,
|
||||||
// #[br(seek_before = SeekFrom::Start(part_info_off), count = part_count)]
|
part_entry_off: U32,
|
||||||
parts: Vec<WiiPart>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Default)]
|
static_assert!(size_of::<WiiPartGroup>() == 8);
|
||||||
struct TicketTimeLimit {
|
|
||||||
enable_time_limit: u32,
|
impl WiiPartGroup {
|
||||||
time_limit: u32,
|
fn part_entry_off(&self) -> u64 { (self.part_entry_off.get() as u64) << 2 }
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FromReader for TicketTimeLimit {
|
#[derive(Debug, Clone, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||||||
type Args<'a> = ();
|
#[repr(C, align(4))]
|
||||||
|
pub struct SignedHeader {
|
||||||
const STATIC_SIZE: usize = struct_size([
|
/// Signature type, always 0x00010001 (RSA-2048)
|
||||||
u32::STATIC_SIZE, // enable_time_limit
|
pub sig_type: U32,
|
||||||
u32::STATIC_SIZE, // time_limit
|
/// RSA-2048 signature
|
||||||
]);
|
pub sig: [u8; 256],
|
||||||
|
_pad: [u8; 60],
|
||||||
fn from_reader_args<R>(reader: &mut R, _args: Self::Args<'_>) -> io::Result<Self>
|
|
||||||
where R: Read + ?Sized {
|
|
||||||
let enable_time_limit = u32::from_reader(reader)?;
|
|
||||||
let time_limit = u32::from_reader(reader)?;
|
|
||||||
Ok(TicketTimeLimit { enable_time_limit, time_limit })
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, PartialEq)]
|
static_assert!(size_of::<SignedHeader>() == 0x140);
|
||||||
struct Ticket {
|
|
||||||
sig_type: SigType,
|
#[derive(Debug, Clone, PartialEq, Default, FromBytes, FromZeroes, AsBytes)]
|
||||||
sig: [u8; 256],
|
#[repr(C, align(4))]
|
||||||
sig_issuer: [u8; 64],
|
pub struct TicketTimeLimit {
|
||||||
ecdh: [u8; 60],
|
pub enable_time_limit: U32,
|
||||||
enc_key: [u8; 16],
|
pub time_limit: U32,
|
||||||
ticket_id: [u8; 8],
|
|
||||||
console_id: [u8; 4],
|
|
||||||
title_id: [u8; 8],
|
|
||||||
ticket_version: u16,
|
|
||||||
permitted_titles_mask: u32,
|
|
||||||
permit_mask: u32,
|
|
||||||
title_export_allowed: u8,
|
|
||||||
common_key_idx: u8,
|
|
||||||
content_access_permissions: [u8; 64],
|
|
||||||
time_limits: [TicketTimeLimit; 8],
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FromReader for Ticket {
|
static_assert!(size_of::<TicketTimeLimit>() == 8);
|
||||||
type Args<'a> = ();
|
|
||||||
|
|
||||||
const STATIC_SIZE: usize = 0x2A4;
|
#[derive(Debug, Clone, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||||||
|
#[repr(C, align(4))]
|
||||||
fn from_reader_args<R>(reader: &mut R, _args: Self::Args<'_>) -> io::Result<Self>
|
pub struct Ticket {
|
||||||
where R: Read + ?Sized {
|
pub header: SignedHeader,
|
||||||
let sig_type = SigType::from_reader(reader)?;
|
pub sig_issuer: [u8; 64],
|
||||||
let sig = <[u8; 256]>::from_reader(reader)?;
|
pub ecdh: [u8; 60],
|
||||||
skip_bytes::<0x3C, _>(reader)?;
|
pub version: u8,
|
||||||
let sig_issuer = <[u8; 64]>::from_reader(reader)?;
|
_pad1: U16,
|
||||||
let ecdh = <[u8; 60]>::from_reader(reader)?;
|
pub title_key: KeyBytes,
|
||||||
skip_bytes::<3, _>(reader)?;
|
_pad2: u8,
|
||||||
let enc_key = <[u8; 16]>::from_reader(reader)?;
|
pub ticket_id: [u8; 8],
|
||||||
skip_bytes::<1, _>(reader)?;
|
pub console_id: [u8; 4],
|
||||||
let ticket_id = <[u8; 8]>::from_reader(reader)?;
|
pub title_id: [u8; 8],
|
||||||
let console_id = <[u8; 4]>::from_reader(reader)?;
|
_pad3: U16,
|
||||||
let title_id = <[u8; 8]>::from_reader(reader)?;
|
pub ticket_title_version: U16,
|
||||||
skip_bytes::<2, _>(reader)?;
|
pub permitted_titles_mask: U32,
|
||||||
let ticket_version = u16::from_reader(reader)?;
|
pub permit_mask: U32,
|
||||||
let permitted_titles_mask = u32::from_reader(reader)?;
|
pub title_export_allowed: u8,
|
||||||
let permit_mask = u32::from_reader(reader)?;
|
pub common_key_idx: u8,
|
||||||
let title_export_allowed = u8::from_reader(reader)?;
|
_pad4: [u8; 48],
|
||||||
let common_key_idx = u8::from_reader(reader)?;
|
pub content_access_permissions: [u8; 64],
|
||||||
skip_bytes::<48, _>(reader)?;
|
_pad5: [u8; 2],
|
||||||
let content_access_permissions = <[u8; 64]>::from_reader(reader)?;
|
pub time_limits: [TicketTimeLimit; 8],
|
||||||
let time_limits = [
|
|
||||||
TicketTimeLimit::from_reader(reader)?,
|
|
||||||
TicketTimeLimit::from_reader(reader)?,
|
|
||||||
TicketTimeLimit::from_reader(reader)?,
|
|
||||||
TicketTimeLimit::from_reader(reader)?,
|
|
||||||
TicketTimeLimit::from_reader(reader)?,
|
|
||||||
TicketTimeLimit::from_reader(reader)?,
|
|
||||||
TicketTimeLimit::from_reader(reader)?,
|
|
||||||
TicketTimeLimit::from_reader(reader)?,
|
|
||||||
];
|
|
||||||
Ok(Ticket {
|
|
||||||
sig_type,
|
|
||||||
sig,
|
|
||||||
sig_issuer,
|
|
||||||
ecdh,
|
|
||||||
enc_key,
|
|
||||||
ticket_id,
|
|
||||||
console_id,
|
|
||||||
title_id,
|
|
||||||
ticket_version,
|
|
||||||
permitted_titles_mask,
|
|
||||||
permit_mask,
|
|
||||||
title_export_allowed,
|
|
||||||
common_key_idx,
|
|
||||||
content_access_permissions,
|
|
||||||
time_limits,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, PartialEq)]
|
static_assert!(size_of::<Ticket>() == 0x2A4);
|
||||||
struct TmdContent {
|
|
||||||
id: u32,
|
#[derive(Debug, Clone, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||||||
index: u16,
|
#[repr(C, align(4))]
|
||||||
content_type: u16,
|
pub struct TmdHeader {
|
||||||
size: u64,
|
pub header: SignedHeader,
|
||||||
hash: [u8; 20],
|
pub sig_issuer: [u8; 64],
|
||||||
|
pub version: u8,
|
||||||
|
pub ca_crl_version: u8,
|
||||||
|
pub signer_crl_version: u8,
|
||||||
|
pub is_vwii: u8,
|
||||||
|
pub ios_id: [u8; 8],
|
||||||
|
pub title_id: [u8; 8],
|
||||||
|
pub title_type: u32,
|
||||||
|
pub group_id: U16,
|
||||||
|
_pad1: [u8; 2],
|
||||||
|
pub region: U16,
|
||||||
|
pub ratings: KeyBytes,
|
||||||
|
_pad2: [u8; 12],
|
||||||
|
pub ipc_mask: [u8; 12],
|
||||||
|
_pad3: [u8; 18],
|
||||||
|
pub access_flags: U32,
|
||||||
|
pub title_version: U16,
|
||||||
|
pub num_contents: U16,
|
||||||
|
pub boot_idx: U16,
|
||||||
|
pub minor_version: U16,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, PartialEq)]
|
static_assert!(size_of::<TmdHeader>() == 0x1E4);
|
||||||
struct Tmd {
|
|
||||||
sig_type: SigType,
|
pub const H3_TABLE_SIZE: usize = 0x18000;
|
||||||
// #[br(count = 256)]
|
|
||||||
sig: Vec<u8>,
|
#[derive(Debug, Clone, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||||||
// #[br(pad_before = 60, count = 64)]
|
#[repr(C, align(4))]
|
||||||
sig_issuer: Vec<u8>,
|
pub struct WiiPartitionHeader {
|
||||||
version: u8,
|
pub ticket: Ticket,
|
||||||
ca_crl_version: u8,
|
tmd_size: U32,
|
||||||
signer_crl_version: u8,
|
tmd_off: U32,
|
||||||
// #[br(pad_before = 1)]
|
cert_chain_size: U32,
|
||||||
ios_id_major: u32,
|
cert_chain_off: U32,
|
||||||
ios_id_minor: u32,
|
h3_table_off: U32,
|
||||||
title_id_major: u32,
|
data_off: U32,
|
||||||
title_id_minor: [u8; 4],
|
data_size: U32,
|
||||||
title_type: u32,
|
|
||||||
group_id: u16,
|
|
||||||
// #[br(pad_before = 62)]
|
|
||||||
access_flags: u32,
|
|
||||||
title_version: u16,
|
|
||||||
num_contents: u16,
|
|
||||||
// #[br(pad_after = 2)]
|
|
||||||
boot_idx: u16,
|
|
||||||
// #[br(count = num_contents)]
|
|
||||||
contents: Vec<TmdContent>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, PartialEq)]
|
static_assert!(size_of::<WiiPartitionHeader>() == 0x2C0);
|
||||||
struct Certificate {
|
|
||||||
sig_type: SigType,
|
|
||||||
// #[br(count = sig_size(sig_type))]
|
|
||||||
sig: Vec<u8>,
|
|
||||||
// #[br(pad_before = 60, count = 64)]
|
|
||||||
issuer: Vec<u8>,
|
|
||||||
key_type: KeyType,
|
|
||||||
// #[br(count = 64)]
|
|
||||||
subject: Vec<u8>,
|
|
||||||
// #[br(count = key_size(key_type))]
|
|
||||||
key: Vec<u8>,
|
|
||||||
modulus: u32,
|
|
||||||
// #[br(pad_after = 52)]
|
|
||||||
pub_exp: u32,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq)]
|
impl WiiPartitionHeader {
|
||||||
// #[br(import(partition_off: u64))]
|
pub fn tmd_size(&self) -> u64 { self.tmd_size.get() as u64 }
|
||||||
struct WiiPartitionHeader {
|
|
||||||
// #[br(seek_before = SeekFrom::Start(partition_off))]
|
|
||||||
ticket: Ticket,
|
|
||||||
tmd_size: u32,
|
|
||||||
// #[br(map = |x: u32| ((x as u64) << 2) + partition_off)]
|
|
||||||
tmd_off: u64,
|
|
||||||
cert_chain_size: u32,
|
|
||||||
// #[br(map = |x: u32| ((x as u64) << 2) + partition_off)]
|
|
||||||
cert_chain_off: u64,
|
|
||||||
// #[br(map = |x: u32| ((x as u64) << 2) + partition_off)]
|
|
||||||
global_hash_table_off: u64,
|
|
||||||
// #[br(map = |x: u32| ((x as u64) << 2) + partition_off)]
|
|
||||||
data_off: u64,
|
|
||||||
// #[br(map = |x: u32| (x as u64) << 2)]
|
|
||||||
data_size: u64,
|
|
||||||
|
|
||||||
// #[br(seek_before = SeekFrom::Start(tmd_off))]
|
pub fn tmd_off(&self) -> u64 { (self.tmd_off.get() as u64) << 2 }
|
||||||
tmd: Tmd,
|
|
||||||
// #[br(seek_before = SeekFrom::Start(cert_chain_off))]
|
|
||||||
ca_cert: Certificate,
|
|
||||||
tmd_cert: Certificate,
|
|
||||||
ticket_cert: Certificate,
|
|
||||||
// #[br(seek_before = SeekFrom::Start(global_hash_table_off), count = 0x18000)]
|
|
||||||
h3_data: Vec<u8>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FromReader for WiiPartitionHeader {
|
pub fn cert_chain_size(&self) -> u64 { self.cert_chain_size.get() as u64 }
|
||||||
type Args<'a> = u64;
|
|
||||||
|
|
||||||
const STATIC_SIZE: usize = Ticket::STATIC_SIZE;
|
pub fn cert_chain_off(&self) -> u64 { (self.cert_chain_off.get() as u64) << 2 }
|
||||||
|
|
||||||
fn from_reader_args<R>(reader: &mut R, args: Self::Args<'_>) -> io::Result<Self>
|
pub fn h3_table_off(&self) -> u64 { (self.h3_table_off.get() as u64) << 2 }
|
||||||
where R: Read + ?Sized {
|
|
||||||
todo!()
|
pub fn h3_table_size(&self) -> u64 { H3_TABLE_SIZE as u64 }
|
||||||
}
|
|
||||||
|
pub fn data_off(&self) -> u64 { (self.data_off.get() as u64) << 2 }
|
||||||
|
|
||||||
|
pub fn data_size(&self) -> u64 { (self.data_size.get() as u64) << 2 }
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) struct DiscWii {
|
pub(crate) struct DiscWii {
|
||||||
header: Header,
|
header: DiscHeader,
|
||||||
part_info: WiiPartInfo,
|
part_info: Vec<WiiPartInfo>,
|
||||||
|
disc_size: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DiscWii {
|
impl DiscWii {
|
||||||
pub(crate) fn new(mut stream: &mut dyn ReadStream, header: Header) -> Result<DiscWii> {
|
pub(crate) fn new(
|
||||||
let mut disc = DiscWii { header, part_info: todo!() }; // stream.read_be()?
|
stream: &mut dyn ReadStream,
|
||||||
disc.decrypt_partition_keys()?;
|
header: DiscHeader,
|
||||||
Ok(disc)
|
disc_size: Option<u64>,
|
||||||
|
) -> Result<Self> {
|
||||||
|
let part_info = read_partition_info(stream)?;
|
||||||
|
// Guess disc size if not provided
|
||||||
|
let disc_size = disc_size.unwrap_or_else(|| guess_disc_size(&part_info));
|
||||||
|
Ok(Self { header, part_info, disc_size })
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DiscWii {
|
pub(crate) fn read_partition_info(stream: &mut dyn ReadStream) -> Result<Vec<WiiPartInfo>> {
|
||||||
pub(crate) fn decrypt_partition_keys(&mut self) -> Result<()> {
|
stream.seek(SeekFrom::Start(WII_PART_GROUP_OFF)).context("Seeking to partition groups")?;
|
||||||
for part in self.part_info.parts.as_mut_slice() {
|
let part_groups: [WiiPartGroup; 4] = read_from(stream).context("Reading partition groups")?;
|
||||||
let ticket = &mut part.part_header.ticket;
|
let mut part_info = Vec::new();
|
||||||
let mut iv: [u8; 16] = [0; 16];
|
for (group_idx, group) in part_groups.iter().enumerate() {
|
||||||
iv[..8].copy_from_slice(&ticket.title_id);
|
let part_count = group.part_count.get();
|
||||||
Aes128Cbc::new(&COMMON_KEYS[ticket.common_key_idx as usize].into(), &iv.into())
|
if part_count == 0 {
|
||||||
.decrypt_padded_mut::<NoPadding>(&mut ticket.enc_key)?;
|
continue;
|
||||||
|
}
|
||||||
|
stream
|
||||||
|
.seek(SeekFrom::Start(group.part_entry_off()))
|
||||||
|
.with_context(|| format!("Seeking to partition group {group_idx}"))?;
|
||||||
|
let entries: Vec<WiiPartEntry> = read_vec(stream, part_count as usize)
|
||||||
|
.with_context(|| format!("Reading partition group {group_idx}"))?;
|
||||||
|
for (part_idx, entry) in entries.iter().enumerate() {
|
||||||
|
let offset = entry.offset();
|
||||||
|
stream
|
||||||
|
.seek(SeekFrom::Start(offset))
|
||||||
|
.with_context(|| format!("Seeking to partition data {group_idx}:{part_idx}"))?;
|
||||||
|
let mut header: WiiPartitionHeader = read_from(stream)
|
||||||
|
.with_context(|| format!("Reading partition header {group_idx}:{part_idx}"))?;
|
||||||
|
|
||||||
|
// Decrypt title key
|
||||||
|
let mut iv: KeyBytes = [0; 16];
|
||||||
|
iv[..8].copy_from_slice(&header.ticket.title_id);
|
||||||
|
let common_key =
|
||||||
|
COMMON_KEYS.get(header.ticket.common_key_idx as usize).ok_or(Error::DiscFormat(
|
||||||
|
format!("unknown common key index {}", header.ticket.common_key_idx),
|
||||||
|
))?;
|
||||||
|
aes_decrypt(common_key, iv, &mut header.ticket.title_key);
|
||||||
|
|
||||||
|
// Open partition stream and read junk data seed
|
||||||
|
let inner = stream
|
||||||
|
.new_window(offset + header.data_off(), header.data_size())
|
||||||
|
.context("Wrapping partition stream")?;
|
||||||
|
let mut stream = PartitionWii {
|
||||||
|
header: header.clone(),
|
||||||
|
tmd: vec![],
|
||||||
|
cert_chain: vec![],
|
||||||
|
h3_table: vec![],
|
||||||
|
stream: Box::new(inner),
|
||||||
|
key: Some(header.ticket.title_key),
|
||||||
|
offset: 0,
|
||||||
|
cur_block: 0,
|
||||||
|
buf: [0; SECTOR_SIZE],
|
||||||
|
validate_hashes: false,
|
||||||
|
};
|
||||||
|
let junk_id: [u8; 4] = read_from(&mut stream).context("Reading junk seed bytes")?;
|
||||||
|
stream
|
||||||
|
.seek(SeekFrom::Start(size_of::<DiscHeader>() as u64))
|
||||||
|
.context("Seeking to partition header")?;
|
||||||
|
let part_header: PartitionHeader =
|
||||||
|
read_from(&mut stream).context("Reading partition header")?;
|
||||||
|
let junk_start = part_header.fst_off(true) + part_header.fst_sz(true);
|
||||||
|
|
||||||
|
// log::debug!(
|
||||||
|
// "Partition: {:?} - {:?}: {:?}",
|
||||||
|
// offset + header.data_off(),
|
||||||
|
// header.data_size(),
|
||||||
|
// header.ticket.title_key
|
||||||
|
// );
|
||||||
|
|
||||||
|
part_info.push(WiiPartInfo {
|
||||||
|
group_idx: group_idx as u32,
|
||||||
|
part_idx: part_idx as u32,
|
||||||
|
offset,
|
||||||
|
kind: entry.kind.get().into(),
|
||||||
|
header,
|
||||||
|
junk_id,
|
||||||
|
junk_start,
|
||||||
|
});
|
||||||
}
|
}
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
Ok(part_info)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn guess_disc_size(part_info: &[WiiPartInfo]) -> u64 {
|
||||||
|
let max_offset = part_info
|
||||||
|
.iter()
|
||||||
|
.flat_map(|v| {
|
||||||
|
[
|
||||||
|
v.offset + v.header.tmd_off() + v.header.tmd_size(),
|
||||||
|
v.offset + v.header.cert_chain_off() + v.header.cert_chain_size(),
|
||||||
|
v.offset + v.header.h3_table_off() + v.header.h3_table_size(),
|
||||||
|
v.offset + v.header.data_off() + v.header.data_size(),
|
||||||
|
]
|
||||||
|
})
|
||||||
|
.max()
|
||||||
|
.unwrap_or(0x50000);
|
||||||
|
if max_offset <= MINI_DVD_SIZE && !part_info.iter().any(|v| v.kind == PartitionKind::Data) {
|
||||||
|
// Datel disc
|
||||||
|
MINI_DVD_SIZE
|
||||||
|
} else if max_offset < SL_DVD_SIZE {
|
||||||
|
SL_DVD_SIZE
|
||||||
|
} else {
|
||||||
|
DL_DVD_SIZE
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn open_partition<'a>(
|
||||||
|
part: &WiiPartInfo,
|
||||||
|
disc_io: &'a dyn DiscIO,
|
||||||
|
options: &OpenOptions,
|
||||||
|
header: &DiscHeader,
|
||||||
|
) -> Result<Box<dyn PartitionBase + 'a>> {
|
||||||
|
let data_off = part.offset + part.header.data_off();
|
||||||
|
let has_crypto = header.no_partition_encryption == 0;
|
||||||
|
let mut base = disc_io.open()?;
|
||||||
|
|
||||||
|
base.seek(SeekFrom::Start(part.offset + part.header.tmd_off()))
|
||||||
|
.context("Seeking to TMD offset")?;
|
||||||
|
let tmd: Vec<u8> =
|
||||||
|
read_vec(&mut base, part.header.tmd_size() as usize).context("Reading TMD")?;
|
||||||
|
|
||||||
|
base.seek(SeekFrom::Start(part.offset + part.header.cert_chain_off()))
|
||||||
|
.context("Seeking to cert chain offset")?;
|
||||||
|
let cert_chain: Vec<u8> = read_vec(&mut base, part.header.cert_chain_size() as usize)
|
||||||
|
.context("Reading cert chain")?;
|
||||||
|
|
||||||
|
base.seek(SeekFrom::Start(part.offset + part.header.h3_table_off()))
|
||||||
|
.context("Seeking to H3 table offset")?;
|
||||||
|
let h3_table: Vec<u8> = read_vec(&mut base, H3_TABLE_SIZE).context("Reading H3 table")?;
|
||||||
|
|
||||||
|
let stream = wrap_windowed(base, data_off, part.header.data_size()).with_context(|| {
|
||||||
|
format!("Wrapping {}:{} partition stream", part.group_idx, part.part_idx)
|
||||||
|
})?;
|
||||||
|
Ok(Box::new(PartitionWii {
|
||||||
|
header: part.header.clone(),
|
||||||
|
tmd,
|
||||||
|
cert_chain,
|
||||||
|
h3_table,
|
||||||
|
stream: Box::new(stream),
|
||||||
|
key: has_crypto.then_some(part.header.ticket.title_key),
|
||||||
|
offset: 0,
|
||||||
|
cur_block: u32::MAX,
|
||||||
|
buf: [0; SECTOR_SIZE],
|
||||||
|
validate_hashes: options.validate_hashes && header.no_partition_hashes == 0,
|
||||||
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DiscBase for DiscWii {
|
impl DiscBase for DiscWii {
|
||||||
fn get_header(&self) -> &Header { &self.header }
|
fn header(&self) -> &DiscHeader { &self.header }
|
||||||
|
|
||||||
fn get_data_partition<'a>(
|
fn partitions(&self) -> Vec<PartitionInfo> {
|
||||||
&self,
|
self.part_info
|
||||||
disc_io: &'a mut dyn DiscIO,
|
|
||||||
validate_hashes: bool,
|
|
||||||
) -> Result<Box<dyn PartReadStream + 'a>> {
|
|
||||||
let part = self
|
|
||||||
.part_info
|
|
||||||
.parts
|
|
||||||
.iter()
|
.iter()
|
||||||
.find(|v| v.part_type == PartitionType::Data)
|
.map(|v| PartitionInfo {
|
||||||
.ok_or_else(|| Error::DiscFormat("Failed to locate data partition".to_string()))?;
|
group_index: v.group_idx,
|
||||||
let data_off = part.part_header.data_off;
|
part_index: v.part_idx,
|
||||||
let has_crypto = disc_io.has_wii_crypto();
|
part_offset: v.offset,
|
||||||
let base = disc_io
|
kind: v.kind,
|
||||||
.begin_read_stream(data_off)
|
data_offset: v.header.data_off(),
|
||||||
.map_err(|e| Error::Io("Opening data partition stream".to_string(), e))?;
|
data_size: v.header.data_size(),
|
||||||
let stream = wrap_windowed(base, data_off, part.part_header.data_size)
|
header: Some(v.header.clone()),
|
||||||
.context("Wrapping data partition stream")?;
|
lfg_seed: v.junk_id,
|
||||||
let result = Box::new(WiiPartReadStream {
|
// junk_start: v.junk_start,
|
||||||
stream,
|
})
|
||||||
crypto: if has_crypto { Some(part.part_header.ticket.enc_key) } else { None },
|
.collect()
|
||||||
offset: 0,
|
|
||||||
cur_block: u32::MAX,
|
|
||||||
buf: [0; 0x8000],
|
|
||||||
validate_hashes,
|
|
||||||
});
|
|
||||||
Ok(result)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_partition<'a>(
|
fn open_partition<'a>(
|
||||||
&self,
|
&self,
|
||||||
disc_io: &'a mut dyn DiscIO,
|
disc_io: &'a dyn DiscIO,
|
||||||
part_type: PartitionType,
|
index: usize,
|
||||||
validate_hashes: bool,
|
options: &OpenOptions,
|
||||||
) -> Result<Box<dyn PartReadStream + 'a>> {
|
) -> Result<Box<dyn PartitionBase + 'a>> {
|
||||||
let part =
|
let part = self.part_info.get(index).ok_or_else(|| {
|
||||||
self.part_info.parts.iter().find(|v| v.part_type == part_type).ok_or_else(|| {
|
Error::DiscFormat(format!("Failed to locate partition index {}", index))
|
||||||
Error::DiscFormat(format!("Failed to locate {:?} partition", part_type))
|
})?;
|
||||||
})?;
|
open_partition(part, disc_io, options, &self.header)
|
||||||
let data_off = part.part_header.data_off;
|
|
||||||
let has_crypto = disc_io.has_wii_crypto();
|
|
||||||
let base = disc_io
|
|
||||||
.begin_read_stream(data_off)
|
|
||||||
.with_context(|| format!("Opening {:?} partition stream", part_type))?;
|
|
||||||
let stream = wrap_windowed(base, data_off, part.part_header.data_size)
|
|
||||||
.with_context(|| format!("Wrapping {:?} partition stream", part_type))?;
|
|
||||||
let result = Box::new(WiiPartReadStream {
|
|
||||||
stream,
|
|
||||||
crypto: if has_crypto { Some(part.part_header.ticket.enc_key) } else { None },
|
|
||||||
offset: 0,
|
|
||||||
cur_block: u32::MAX,
|
|
||||||
buf: [0; 0x8000],
|
|
||||||
validate_hashes,
|
|
||||||
});
|
|
||||||
Ok(result)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn open_partition_kind<'a>(
|
||||||
|
&self,
|
||||||
|
disc_io: &'a dyn DiscIO,
|
||||||
|
part_type: PartitionKind,
|
||||||
|
options: &OpenOptions,
|
||||||
|
) -> Result<Box<dyn PartitionBase + 'a>> {
|
||||||
|
let part = self.part_info.iter().find(|&v| v.kind == part_type).ok_or_else(|| {
|
||||||
|
Error::DiscFormat(format!("Failed to locate {:?} partition", part_type))
|
||||||
|
})?;
|
||||||
|
open_partition(part, disc_io, options, &self.header)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn disc_size(&self) -> u64 { self.disc_size }
|
||||||
}
|
}
|
||||||
|
|
||||||
struct WiiPartReadStream<'a> {
|
struct PartitionWii<'a> {
|
||||||
stream: OwningWindowedReadStream<'a>,
|
header: WiiPartitionHeader,
|
||||||
crypto: Option<[u8; 16]>,
|
tmd: Vec<u8>,
|
||||||
|
cert_chain: Vec<u8>,
|
||||||
|
h3_table: Vec<u8>,
|
||||||
|
|
||||||
|
stream: Box<dyn ReadStream + 'a>,
|
||||||
|
key: Option<KeyBytes>,
|
||||||
offset: u64,
|
offset: u64,
|
||||||
cur_block: u32,
|
cur_block: u32,
|
||||||
buf: [u8; SECTOR_SIZE],
|
buf: [u8; SECTOR_SIZE],
|
||||||
validate_hashes: bool,
|
validate_hashes: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> PartReadStream for WiiPartReadStream<'a> {
|
impl<'a> PartitionBase for PartitionWii<'a> {
|
||||||
fn begin_file_stream(&mut self, node: &Node) -> io::Result<SharedWindowedReadStream> {
|
fn meta(&mut self) -> Result<Box<PartitionMeta>> {
|
||||||
assert_eq!(node.kind, NodeKind::File);
|
self.seek(SeekFrom::Start(0)).context("Seeking to partition header")?;
|
||||||
self.new_window((node.offset as u64) << 2, node.length as u64)
|
let mut meta = read_part_header(self, true)?;
|
||||||
|
meta.raw_ticket = Some(self.header.ticket.as_bytes().to_vec());
|
||||||
|
meta.raw_tmd = Some(self.tmd.clone());
|
||||||
|
meta.raw_cert_chain = Some(self.cert_chain.clone());
|
||||||
|
meta.raw_h3_table = Some(self.h3_table.clone());
|
||||||
|
Ok(meta)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn read_header(&mut self) -> Result<Box<dyn PartHeader>> {
|
fn open_file(&mut self, node: &Node) -> io::Result<SharedWindowedReadStream> {
|
||||||
self.seek(SeekFrom::Start(0)).context("Seeking to partition header")?;
|
assert_eq!(node.kind(), NodeKind::File);
|
||||||
todo!()
|
self.new_window(node.offset(true), node.length(true))
|
||||||
// Ok(Box::from(self.read_be::<WiiPartition>()?))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn ideal_buffer_size(&self) -> usize { BLOCK_SIZE }
|
fn ideal_buffer_size(&self) -> usize { BLOCK_SIZE }
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn as_digest(slice: &[u8; 20]) -> digest::Output<Sha1> { (*slice).into() }
|
pub(crate) fn as_digest(slice: &[u8; 20]) -> digest::Output<Sha1> { (*slice).into() }
|
||||||
|
|
||||||
fn decrypt_block(part: &mut WiiPartReadStream, cluster: u32) -> io::Result<()> {
|
fn decrypt_block(part: &mut PartitionWii, cluster: u32) -> io::Result<()> {
|
||||||
part.stream.read_exact(&mut part.buf)?;
|
part.stream.read_exact(&mut part.buf)?;
|
||||||
if let Some(key) = &part.crypto {
|
if let Some(key) = &part.key {
|
||||||
// Fetch IV before decrypting header
|
// Fetch IV before decrypting header
|
||||||
let iv_bytes = array_ref![part.buf, 0x3d0, 16];
|
let iv = *array_ref![part.buf, 0x3d0, 16];
|
||||||
let iv = Block::from(*iv_bytes);
|
|
||||||
// Don't need to decrypt header if we're not validating hashes
|
// Don't need to decrypt header if we're not validating hashes
|
||||||
if part.validate_hashes {
|
if part.validate_hashes {
|
||||||
Aes128Cbc::new(key.into(), &Block::from([0; 16]))
|
aes_decrypt(key, [0; 16], &mut part.buf[..HASHES_SIZE]);
|
||||||
.decrypt_padded_mut::<NoPadding>(&mut part.buf[..HASHES_SIZE])
|
|
||||||
.expect("Failed to decrypt header");
|
|
||||||
}
|
}
|
||||||
Aes128Cbc::new(key.into(), &iv)
|
aes_decrypt(key, iv, &mut part.buf[HASHES_SIZE..]);
|
||||||
.decrypt_padded_mut::<NoPadding>(&mut part.buf[HASHES_SIZE..])
|
|
||||||
.expect("Failed to decrypt block");
|
|
||||||
}
|
}
|
||||||
if part.validate_hashes {
|
if part.validate_hashes {
|
||||||
let (mut group, sub_group) = div_rem(cluster as usize, 8);
|
let (mut group, sub_group) = div_rem(cluster as usize, 8);
|
||||||
|
@ -449,7 +448,13 @@ fn decrypt_block(part: &mut WiiPartReadStream, cluster: u32) -> io::Result<()> {
|
||||||
let expected = as_digest(array_ref![part.buf, i * 20, 20]);
|
let expected = as_digest(array_ref![part.buf, i * 20, 20]);
|
||||||
let output = hash.finalize();
|
let output = hash.finalize();
|
||||||
if output != expected {
|
if output != expected {
|
||||||
panic!("Invalid H0 hash! (block {:?}) {:x}\n\texpected {:x}", i, output, expected);
|
return Err(io::Error::new(
|
||||||
|
io::ErrorKind::InvalidData,
|
||||||
|
format!(
|
||||||
|
"Invalid H0 hash! (block {:?}) {:x}\n\texpected {:x}",
|
||||||
|
i, output, expected
|
||||||
|
),
|
||||||
|
));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// H1 hash
|
// H1 hash
|
||||||
|
@ -459,10 +464,13 @@ fn decrypt_block(part: &mut WiiPartReadStream, cluster: u32) -> io::Result<()> {
|
||||||
let expected = as_digest(array_ref![part.buf, 0x280 + sub_group * 20, 20]);
|
let expected = as_digest(array_ref![part.buf, 0x280 + sub_group * 20, 20]);
|
||||||
let output = hash.finalize();
|
let output = hash.finalize();
|
||||||
if output != expected {
|
if output != expected {
|
||||||
panic!(
|
return Err(io::Error::new(
|
||||||
"Invalid H1 hash! (subgroup {:?}) {:x}\n\texpected {:x}",
|
io::ErrorKind::InvalidData,
|
||||||
sub_group, output, expected
|
format!(
|
||||||
);
|
"Invalid H1 hash! (subgroup {:?}) {:x}\n\texpected {:x}",
|
||||||
|
sub_group, output, expected
|
||||||
|
),
|
||||||
|
));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// H2 hash
|
// H2 hash
|
||||||
|
@ -472,17 +480,20 @@ fn decrypt_block(part: &mut WiiPartReadStream, cluster: u32) -> io::Result<()> {
|
||||||
let expected = as_digest(array_ref![part.buf, 0x340 + group * 20, 20]);
|
let expected = as_digest(array_ref![part.buf, 0x340 + group * 20, 20]);
|
||||||
let output = hash.finalize();
|
let output = hash.finalize();
|
||||||
if output != expected {
|
if output != expected {
|
||||||
panic!(
|
return Err(io::Error::new(
|
||||||
"Invalid H2 hash! (group {:?}) {:x}\n\texpected {:x}",
|
io::ErrorKind::InvalidData,
|
||||||
group, output, expected
|
format!(
|
||||||
);
|
"Invalid H2 hash! (group {:?}) {:x}\n\texpected {:x}",
|
||||||
|
group, output, expected
|
||||||
|
),
|
||||||
|
));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> Read for WiiPartReadStream<'a> {
|
impl<'a> Read for PartitionWii<'a> {
|
||||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||||
let (block, block_offset) = div_rem(self.offset, BLOCK_SIZE as u64);
|
let (block, block_offset) = div_rem(self.offset, BLOCK_SIZE as u64);
|
||||||
let mut block = block as u32;
|
let mut block = block as u32;
|
||||||
|
@ -521,16 +532,16 @@ fn to_block_size(v: u64) -> u64 {
|
||||||
(v / SECTOR_SIZE as u64) * BLOCK_SIZE as u64 + (v % SECTOR_SIZE as u64)
|
(v / SECTOR_SIZE as u64) * BLOCK_SIZE as u64 + (v % SECTOR_SIZE as u64)
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> Seek for WiiPartReadStream<'a> {
|
impl<'a> Seek for PartitionWii<'a> {
|
||||||
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
|
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
|
||||||
self.offset = match pos {
|
self.offset = match pos {
|
||||||
SeekFrom::Start(v) => v,
|
SeekFrom::Start(v) => v,
|
||||||
SeekFrom::End(v) => (self.stable_stream_len()? as i64 + v) as u64,
|
SeekFrom::End(v) => self.stable_stream_len()?.saturating_add_signed(v),
|
||||||
SeekFrom::Current(v) => (self.offset as i64 + v) as u64,
|
SeekFrom::Current(v) => self.offset.saturating_add_signed(v),
|
||||||
};
|
};
|
||||||
let block = (self.offset / BLOCK_SIZE as u64) as u32;
|
let block = self.offset / BLOCK_SIZE as u64;
|
||||||
if block != self.cur_block {
|
if block as u32 != self.cur_block {
|
||||||
self.stream.seek(SeekFrom::Start(block as u64 * SECTOR_SIZE as u64))?;
|
self.stream.seek(SeekFrom::Start(block * SECTOR_SIZE as u64))?;
|
||||||
self.cur_block = u32::MAX;
|
self.cur_block = u32::MAX;
|
||||||
}
|
}
|
||||||
Ok(self.offset)
|
Ok(self.offset)
|
||||||
|
@ -539,45 +550,10 @@ impl<'a> Seek for WiiPartReadStream<'a> {
|
||||||
fn stream_position(&mut self) -> io::Result<u64> { Ok(self.offset) }
|
fn stream_position(&mut self) -> io::Result<u64> { Ok(self.offset) }
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> ReadStream for WiiPartReadStream<'a> {
|
impl<'a> ReadStream for PartitionWii<'a> {
|
||||||
fn stable_stream_len(&mut self) -> io::Result<u64> {
|
fn stable_stream_len(&mut self) -> io::Result<u64> {
|
||||||
Ok(to_block_size(self.stream.stable_stream_len()?))
|
Ok(to_block_size(self.stream.stable_stream_len()?))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn as_dyn(&mut self) -> &mut dyn ReadStream { self }
|
fn as_dyn(&mut self) -> &mut dyn ReadStream { self }
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq)]
|
|
||||||
pub(crate) struct WiiPartition {
|
|
||||||
header: Header,
|
|
||||||
// #[br(seek_before = SeekFrom::Start(0x400))]
|
|
||||||
part_header: PartitionHeader,
|
|
||||||
// bi2_header: BI2Header,
|
|
||||||
// #[br(seek_before = SeekFrom::Start((part_header.fst_off as u64) << 2))]
|
|
||||||
// #[br(parse_with = node_parser)]
|
|
||||||
root_node: NodeType,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PartHeader for WiiPartition {
|
|
||||||
fn root_node(&self) -> &NodeType { &self.root_node }
|
|
||||||
|
|
||||||
fn find_node(&self, path: &str) -> Option<&NodeType> { find_node(&self.root_node, path) }
|
|
||||||
|
|
||||||
fn boot_bytes(&self) -> &[u8] { todo!() }
|
|
||||||
|
|
||||||
fn bi2_bytes(&self) -> &[u8] { todo!() }
|
|
||||||
|
|
||||||
fn apploader_bytes(&self) -> &[u8] { todo!() }
|
|
||||||
|
|
||||||
fn fst_bytes(&self) -> &[u8] { todo!() }
|
|
||||||
|
|
||||||
fn dol_bytes(&self) -> &[u8] { todo!() }
|
|
||||||
|
|
||||||
fn disc_header(&self) -> &Header { todo!() }
|
|
||||||
|
|
||||||
fn partition_header(&self) -> &PartitionHeader { todo!() }
|
|
||||||
|
|
||||||
fn apploader_header(&self) -> &AppLoaderHeader { todo!() }
|
|
||||||
|
|
||||||
fn dol_header(&self) -> &DolHeader { todo!() }
|
|
||||||
}
|
|
||||||
|
|
297
src/fst.rs
297
src/fst.rs
|
@ -1,17 +1,11 @@
|
||||||
//! Disc file system types
|
//! Disc file system types
|
||||||
|
|
||||||
use std::{
|
use std::{borrow::Cow, ffi::CStr, mem::size_of};
|
||||||
ffi::CString,
|
|
||||||
io,
|
|
||||||
io::{Read, Seek, SeekFrom},
|
|
||||||
};
|
|
||||||
|
|
||||||
use encoding_rs::SHIFT_JIS;
|
use encoding_rs::SHIFT_JIS;
|
||||||
|
use zerocopy::{big_endian::*, AsBytes, FromBytes, FromZeroes};
|
||||||
|
|
||||||
use crate::{
|
use crate::{static_assert, Result};
|
||||||
util::reader::{struct_size, FromReader, DYNAMIC_SIZE, U24},
|
|
||||||
Result, ResultContext,
|
|
||||||
};
|
|
||||||
|
|
||||||
/// File system node kind.
|
/// File system node kind.
|
||||||
#[derive(Clone, Debug, PartialEq)]
|
#[derive(Clone, Debug, PartialEq)]
|
||||||
|
@ -20,180 +14,161 @@ pub enum NodeKind {
|
||||||
File,
|
File,
|
||||||
/// Node is a directory.
|
/// Node is a directory.
|
||||||
Directory,
|
Directory,
|
||||||
}
|
/// Invalid node kind. (Should not normally occur)
|
||||||
|
Invalid,
|
||||||
impl FromReader for NodeKind {
|
|
||||||
type Args<'a> = ();
|
|
||||||
|
|
||||||
const STATIC_SIZE: usize = 1;
|
|
||||||
|
|
||||||
fn from_reader_args<R>(_reader: &mut R, _args: Self::Args<'_>) -> io::Result<Self>
|
|
||||||
where R: Read + ?Sized {
|
|
||||||
match u8::from_reader(_reader)? {
|
|
||||||
0 => Ok(NodeKind::File),
|
|
||||||
1 => Ok(NodeKind::Directory),
|
|
||||||
_ => Err(io::Error::new(io::ErrorKind::InvalidData, "invalid node kind")),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// An individual file system node.
|
/// An individual file system node.
|
||||||
#[derive(Clone, Debug, PartialEq)]
|
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||||||
|
#[repr(C, align(4))]
|
||||||
pub struct Node {
|
pub struct Node {
|
||||||
|
kind: u8,
|
||||||
|
// u24 big-endian
|
||||||
|
name_offset: [u8; 3],
|
||||||
|
offset: U32,
|
||||||
|
length: U32,
|
||||||
|
}
|
||||||
|
|
||||||
|
static_assert!(size_of::<Node>() == 12);
|
||||||
|
|
||||||
|
impl Node {
|
||||||
/// File system node type.
|
/// File system node type.
|
||||||
pub kind: NodeKind,
|
pub fn kind(&self) -> NodeKind {
|
||||||
|
match self.kind {
|
||||||
|
0 => NodeKind::File,
|
||||||
|
1 => NodeKind::Directory,
|
||||||
|
_ => NodeKind::Invalid,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Whether the node is a file.
|
||||||
|
pub fn is_file(&self) -> bool { self.kind == 0 }
|
||||||
|
|
||||||
|
/// Whether the node is a directory.
|
||||||
|
pub fn is_dir(&self) -> bool { self.kind == 1 }
|
||||||
|
|
||||||
/// Offset in the string table to the filename.
|
/// Offset in the string table to the filename.
|
||||||
pub name_offset: u32,
|
pub fn name_offset(&self) -> u32 {
|
||||||
|
u32::from_be_bytes([0, self.name_offset[0], self.name_offset[1], self.name_offset[2]])
|
||||||
|
}
|
||||||
|
|
||||||
/// For files, this is the partition offset of the file data. (Wii: >> 2)
|
/// For files, this is the partition offset of the file data. (Wii: >> 2)
|
||||||
///
|
///
|
||||||
/// For directories, this is the children start offset in the FST.
|
/// For directories, this is the parent node index in the FST.
|
||||||
pub offset: u32,
|
pub fn offset(&self, is_wii: bool) -> u64 {
|
||||||
|
if is_wii && self.kind == 0 {
|
||||||
|
self.offset.get() as u64 * 4
|
||||||
|
} else {
|
||||||
|
self.offset.get() as u64
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// For files, this is the byte size of the file.
|
/// For files, this is the byte size of the file. (Wii: >> 2)
|
||||||
///
|
///
|
||||||
/// For directories, this is the children end offset in the FST.
|
/// For directories, this is the child end index in the FST.
|
||||||
///
|
///
|
||||||
/// Number of child files and directories recursively is `length - offset`.
|
/// Number of child files and directories recursively is `length - offset`.
|
||||||
pub length: u32,
|
pub fn length(&self, is_wii: bool) -> u64 {
|
||||||
|
if is_wii && self.kind == 0 {
|
||||||
/// The node name.
|
self.length.get() as u64 * 4
|
||||||
pub name: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FromReader for Node {
|
|
||||||
type Args<'a> = ();
|
|
||||||
|
|
||||||
const STATIC_SIZE: usize = struct_size([
|
|
||||||
NodeKind::STATIC_SIZE, // type
|
|
||||||
U24::STATIC_SIZE, // name_offset
|
|
||||||
u32::STATIC_SIZE, // offset
|
|
||||||
u32::STATIC_SIZE, // length
|
|
||||||
]);
|
|
||||||
|
|
||||||
fn from_reader_args<R>(reader: &mut R, _args: Self::Args<'_>) -> io::Result<Self>
|
|
||||||
where R: Read + ?Sized {
|
|
||||||
let kind = NodeKind::from_reader(reader)?;
|
|
||||||
let name_offset = U24::from_reader(reader)?.0;
|
|
||||||
let offset = u32::from_reader(reader)?;
|
|
||||||
let length = u32::from_reader(reader)?;
|
|
||||||
Ok(Node { kind, offset, length, name_offset, name: Default::default() })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Contains a file system node, and if a directory, its children.
|
|
||||||
#[derive(Clone, Debug, PartialEq)]
|
|
||||||
pub enum NodeType {
|
|
||||||
/// A single file node.
|
|
||||||
File(Node),
|
|
||||||
/// A directory node with children.
|
|
||||||
Directory(Node, Vec<NodeType>),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FromReader for NodeType {
|
|
||||||
type Args<'a> = &'a mut u32;
|
|
||||||
|
|
||||||
const STATIC_SIZE: usize = DYNAMIC_SIZE;
|
|
||||||
|
|
||||||
fn from_reader_args<R>(reader: &mut R, idx: &mut u32) -> io::Result<Self>
|
|
||||||
where R: Read + ?Sized {
|
|
||||||
let node = Node::from_reader(reader)?;
|
|
||||||
*idx += 1;
|
|
||||||
Ok(if node.kind == NodeKind::Directory {
|
|
||||||
let mut children = Vec::with_capacity((node.length - *idx) as usize);
|
|
||||||
while *idx < node.length {
|
|
||||||
children.push(NodeType::from_reader_args(reader, idx)?);
|
|
||||||
}
|
|
||||||
NodeType::Directory(node, children)
|
|
||||||
} else {
|
} else {
|
||||||
NodeType::File(node)
|
self.length.get() as u64
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn read_node_name<R>(
|
|
||||||
reader: &mut R,
|
|
||||||
string_base: u64,
|
|
||||||
node: &mut NodeType,
|
|
||||||
root: bool,
|
|
||||||
) -> io::Result<()>
|
|
||||||
where
|
|
||||||
R: Read + Seek + ?Sized,
|
|
||||||
{
|
|
||||||
let mut decode_name = |v: &mut Node| -> io::Result<()> {
|
|
||||||
if !root {
|
|
||||||
let offset = string_base + v.name_offset as u64;
|
|
||||||
reader.seek(SeekFrom::Start(offset))?;
|
|
||||||
|
|
||||||
let c_string = CString::from_reader(reader)?;
|
|
||||||
let (decoded, _, errors) = SHIFT_JIS.decode(c_string.as_bytes());
|
|
||||||
if errors {
|
|
||||||
return Err(io::Error::new(io::ErrorKind::InvalidData, "invalid shift-jis"));
|
|
||||||
}
|
|
||||||
v.name = decoded.into_owned();
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
};
|
|
||||||
match node {
|
|
||||||
NodeType::File(inner) => {
|
|
||||||
decode_name(inner)?;
|
|
||||||
}
|
|
||||||
NodeType::Directory(inner, children) => {
|
|
||||||
decode_name(inner)?;
|
|
||||||
for child in children {
|
|
||||||
read_node_name(reader, string_base, child, false)?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn read_fst<R>(reader: &mut R) -> Result<NodeType>
|
|
||||||
where R: Read + Seek + ?Sized {
|
|
||||||
let mut node = NodeType::from_reader_args(reader, &mut 0).context("Parsing FST nodes")?;
|
|
||||||
let string_base = reader.stream_position().context("Reading FST end position")?;
|
|
||||||
read_node_name(reader, string_base, &mut node, true).context("Reading FST node names")?;
|
|
||||||
Ok(node)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn matches_name(node: &NodeType, name: &str) -> bool {
|
|
||||||
match node {
|
|
||||||
NodeType::File(v) => v.name.as_str().eq_ignore_ascii_case(name),
|
|
||||||
NodeType::Directory(v, _) => {
|
|
||||||
v.name.is_empty() /* root */ || v.name.as_str().eq_ignore_ascii_case(name)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn find_node<'a>(mut node: &'a NodeType, path: &str) -> Option<&'a NodeType> {
|
/// A view into the file system tree (FST).
|
||||||
let mut split = path.split('/');
|
pub struct Fst<'a> {
|
||||||
let mut current = split.next();
|
pub nodes: &'a [Node],
|
||||||
while current.is_some() {
|
pub string_table: &'a [u8],
|
||||||
if matches_name(node, current.unwrap()) {
|
}
|
||||||
match node {
|
|
||||||
NodeType::File(_) => {
|
impl<'a> Fst<'a> {
|
||||||
return if split.next().is_none() { Some(node) } else { None };
|
/// Create a new FST view from a buffer.
|
||||||
|
pub fn new(buf: &'a [u8]) -> Result<Self, &'static str> {
|
||||||
|
let Some(root_node) = Node::ref_from_prefix(buf) else {
|
||||||
|
return Err("FST root node not found");
|
||||||
|
};
|
||||||
|
// String table starts after the last node
|
||||||
|
let string_base = root_node.length(false) * size_of::<Node>() as u64;
|
||||||
|
if string_base >= buf.len() as u64 {
|
||||||
|
return Err("FST string table out of bounds");
|
||||||
|
}
|
||||||
|
let (node_buf, string_table) = buf.split_at(string_base as usize);
|
||||||
|
let nodes = Node::slice_from(node_buf).unwrap();
|
||||||
|
Ok(Self { nodes, string_table })
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Iterate over the nodes in the FST.
|
||||||
|
pub fn iter(&self) -> FstIter { FstIter { fst: self, idx: 1 } }
|
||||||
|
|
||||||
|
/// Get the name of a node.
|
||||||
|
pub fn get_name(&self, node: &Node) -> Result<Cow<str>, String> {
|
||||||
|
let name_buf = self.string_table.get(node.name_offset() as usize..).ok_or_else(|| {
|
||||||
|
format!(
|
||||||
|
"FST: name offset {} out of bounds (string table size: {})",
|
||||||
|
node.name_offset(),
|
||||||
|
self.string_table.len()
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
let c_string = CStr::from_bytes_until_nul(name_buf).map_err(|_| {
|
||||||
|
format!("FST: name at offset {} not null-terminated", node.name_offset())
|
||||||
|
})?;
|
||||||
|
let (decoded, _, errors) = SHIFT_JIS.decode(c_string.to_bytes());
|
||||||
|
if errors {
|
||||||
|
return Err(format!("FST: Failed to decode name at offset {}", node.name_offset()));
|
||||||
|
}
|
||||||
|
Ok(decoded)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Finds a particular file or directory by path.
|
||||||
|
pub fn find(&self, path: &str) -> Option<(usize, &Node)> {
|
||||||
|
let mut split = path.trim_matches('/').split('/');
|
||||||
|
let mut current = split.next()?;
|
||||||
|
let mut idx = 1;
|
||||||
|
let mut stop_at = None;
|
||||||
|
while let Some(node) = self.nodes.get(idx) {
|
||||||
|
if self.get_name(node).as_ref().map_or(false, |name| name.eq_ignore_ascii_case(current))
|
||||||
|
{
|
||||||
|
if let Some(next) = split.next() {
|
||||||
|
current = next;
|
||||||
|
} else {
|
||||||
|
return Some((idx, node));
|
||||||
}
|
}
|
||||||
NodeType::Directory(v, c) => {
|
// Descend into directory
|
||||||
// Find child
|
idx += 1;
|
||||||
if !v.name.is_empty() || current.unwrap().is_empty() {
|
stop_at = Some(node.length(false) as usize + idx);
|
||||||
current = split.next();
|
} else if node.is_dir() {
|
||||||
}
|
// Skip directory
|
||||||
if current.is_none() || current.unwrap().is_empty() {
|
idx = node.length(false) as usize;
|
||||||
return if split.next().is_none() { Some(node) } else { None };
|
} else {
|
||||||
}
|
// Skip file
|
||||||
for x in c {
|
idx += 1;
|
||||||
if matches_name(x, current.unwrap()) {
|
}
|
||||||
node = x;
|
if let Some(stop) = stop_at {
|
||||||
break;
|
if idx >= stop {
|
||||||
}
|
break;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Iterator over the nodes in an FST.
|
||||||
|
pub struct FstIter<'a> {
|
||||||
|
fst: &'a Fst<'a>,
|
||||||
|
idx: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> Iterator for FstIter<'a> {
|
||||||
|
type Item = (usize, &'a Node, Result<Cow<'a, str>, String>);
|
||||||
|
|
||||||
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
|
let idx = self.idx;
|
||||||
|
let node = self.fst.nodes.get(idx)?;
|
||||||
|
let name = self.fst.get_name(node);
|
||||||
|
self.idx += 1;
|
||||||
|
Some((idx, node, name))
|
||||||
}
|
}
|
||||||
None
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,267 @@
|
||||||
|
use std::{
|
||||||
|
cmp::min,
|
||||||
|
io,
|
||||||
|
io::{BufReader, Read, Seek, SeekFrom},
|
||||||
|
mem::size_of,
|
||||||
|
path::Path,
|
||||||
|
};
|
||||||
|
|
||||||
|
use zerocopy::{little_endian::*, AsBytes, FromBytes, FromZeroes};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
disc::{gcn::DiscGCN, wii::DiscWii, DiscBase, DL_DVD_SIZE, SECTOR_SIZE},
|
||||||
|
io::{nkit::NKitHeader, split::SplitFileReader, DiscIO, MagicBytes},
|
||||||
|
static_assert,
|
||||||
|
util::{
|
||||||
|
lfg::LaggedFibonacci,
|
||||||
|
reader::{read_box_slice, read_from},
|
||||||
|
},
|
||||||
|
DiscHeader, DiscMeta, Error, PartitionInfo, ReadStream, Result, ResultContext,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub const CISO_MAGIC: MagicBytes = *b"CISO";
|
||||||
|
pub const CISO_MAP_SIZE: usize = SECTOR_SIZE - 8;
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||||||
|
#[repr(C, align(4))]
|
||||||
|
struct CISOHeader {
|
||||||
|
magic: MagicBytes,
|
||||||
|
// little endian
|
||||||
|
block_size: U32,
|
||||||
|
block_present: [u8; CISO_MAP_SIZE],
|
||||||
|
}
|
||||||
|
|
||||||
|
static_assert!(size_of::<CISOHeader>() == SECTOR_SIZE);
|
||||||
|
|
||||||
|
pub struct DiscIOCISO {
|
||||||
|
inner: SplitFileReader,
|
||||||
|
header: CISOHeader,
|
||||||
|
block_map: [u16; CISO_MAP_SIZE],
|
||||||
|
nkit_header: Option<NKitHeader>,
|
||||||
|
junk_blocks: Option<Box<[u8]>>,
|
||||||
|
partitions: Vec<PartitionInfo>,
|
||||||
|
disc_num: u8,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DiscIOCISO {
|
||||||
|
pub fn new(filename: &Path) -> Result<Self> {
|
||||||
|
let mut inner = BufReader::new(SplitFileReader::new(filename)?);
|
||||||
|
|
||||||
|
// Read header
|
||||||
|
let header: CISOHeader = read_from(&mut inner).context("Reading CISO header")?;
|
||||||
|
if header.magic != CISO_MAGIC {
|
||||||
|
return Err(Error::DiscFormat("Invalid CISO magic".to_string()));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build block map
|
||||||
|
let mut block_map = [0u16; CISO_MAP_SIZE];
|
||||||
|
let mut block = 0u16;
|
||||||
|
for (presence, out) in header.block_present.iter().zip(block_map.iter_mut()) {
|
||||||
|
if *presence == 1 {
|
||||||
|
*out = block;
|
||||||
|
block += 1;
|
||||||
|
} else {
|
||||||
|
*out = u16::MAX;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let file_size = SECTOR_SIZE as u64 + block as u64 * header.block_size.get() as u64;
|
||||||
|
if file_size > inner.get_ref().len() {
|
||||||
|
return Err(Error::DiscFormat(format!(
|
||||||
|
"CISO file size mismatch: expected at least {} bytes, got {}",
|
||||||
|
file_size,
|
||||||
|
inner.get_ref().len()
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read NKit header if present (after CISO data)
|
||||||
|
let nkit_header = if inner.get_ref().len() > file_size + 4 {
|
||||||
|
inner.seek(SeekFrom::Start(file_size)).context("Seeking to NKit header")?;
|
||||||
|
NKitHeader::try_read_from(&mut inner)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
// Read junk data bitstream if present (after NKit header)
|
||||||
|
let junk_blocks = if nkit_header.is_some() {
|
||||||
|
let n = 1 + DL_DVD_SIZE / header.block_size.get() as u64 / 8;
|
||||||
|
Some(read_box_slice(&mut inner, n as usize).context("Reading NKit bitstream")?)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
let (partitions, disc_num) = if junk_blocks.is_some() {
|
||||||
|
let mut stream: Box<dyn ReadStream> = Box::new(CISOReadStream {
|
||||||
|
inner: BufReader::new(inner.get_ref().clone()),
|
||||||
|
block_size: header.block_size.get(),
|
||||||
|
block_map,
|
||||||
|
cur_block: u16::MAX,
|
||||||
|
pos: 0,
|
||||||
|
junk_blocks: None,
|
||||||
|
partitions: vec![],
|
||||||
|
disc_num: 0,
|
||||||
|
});
|
||||||
|
let header: DiscHeader = read_from(stream.as_mut()).context("Reading disc header")?;
|
||||||
|
let disc_num = header.disc_num;
|
||||||
|
let disc_base: Box<dyn DiscBase> = if header.is_wii() {
|
||||||
|
Box::new(DiscWii::new(stream.as_mut(), header, None)?)
|
||||||
|
} else if header.is_gamecube() {
|
||||||
|
Box::new(DiscGCN::new(stream.as_mut(), header, None)?)
|
||||||
|
} else {
|
||||||
|
return Err(Error::DiscFormat(format!(
|
||||||
|
"Invalid GC/Wii magic: {:#010X}/{:#010X}",
|
||||||
|
header.gcn_magic.get(),
|
||||||
|
header.wii_magic.get()
|
||||||
|
)));
|
||||||
|
};
|
||||||
|
(disc_base.partitions(), disc_num)
|
||||||
|
} else {
|
||||||
|
(vec![], 0)
|
||||||
|
};
|
||||||
|
|
||||||
|
// Reset reader
|
||||||
|
let mut inner = inner.into_inner();
|
||||||
|
inner.reset();
|
||||||
|
Ok(Self { inner, header, block_map, nkit_header, junk_blocks, partitions, disc_num })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DiscIO for DiscIOCISO {
|
||||||
|
fn open(&self) -> Result<Box<dyn ReadStream>> {
|
||||||
|
Ok(Box::new(CISOReadStream {
|
||||||
|
inner: BufReader::new(self.inner.clone()),
|
||||||
|
block_size: self.header.block_size.get(),
|
||||||
|
block_map: self.block_map,
|
||||||
|
cur_block: u16::MAX,
|
||||||
|
pos: 0,
|
||||||
|
junk_blocks: self.junk_blocks.clone(),
|
||||||
|
partitions: self.partitions.clone(),
|
||||||
|
disc_num: self.disc_num,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn meta(&self) -> Result<DiscMeta> {
|
||||||
|
Ok(self.nkit_header.as_ref().map(DiscMeta::from).unwrap_or_default())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn disc_size(&self) -> Option<u64> { self.nkit_header.as_ref().and_then(|h| h.size) }
|
||||||
|
}
|
||||||
|
|
||||||
|
struct CISOReadStream {
|
||||||
|
inner: BufReader<SplitFileReader>,
|
||||||
|
block_size: u32,
|
||||||
|
block_map: [u16; CISO_MAP_SIZE],
|
||||||
|
cur_block: u16,
|
||||||
|
pos: u64,
|
||||||
|
|
||||||
|
// Data for recreating junk data
|
||||||
|
junk_blocks: Option<Box<[u8]>>,
|
||||||
|
partitions: Vec<PartitionInfo>,
|
||||||
|
disc_num: u8,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CISOReadStream {
|
||||||
|
fn read_junk_data(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||||
|
let Some(junk_blocks) = self.junk_blocks.as_deref() else {
|
||||||
|
return Ok(0);
|
||||||
|
};
|
||||||
|
let block_size = self.block_size as u64;
|
||||||
|
let block = (self.pos / block_size) as u16;
|
||||||
|
if junk_blocks[(block / 8) as usize] & (1 << (7 - (block & 7))) == 0 {
|
||||||
|
return Ok(0);
|
||||||
|
}
|
||||||
|
let Some(partition) = self.partitions.iter().find(|p| {
|
||||||
|
let start = p.part_offset + p.data_offset;
|
||||||
|
start <= self.pos && self.pos < start + p.data_size
|
||||||
|
}) else {
|
||||||
|
log::warn!("No partition found for junk data at offset {:#x}", self.pos);
|
||||||
|
return Ok(0);
|
||||||
|
};
|
||||||
|
let offset = self.pos - (partition.part_offset + partition.data_offset);
|
||||||
|
let to_read = min(
|
||||||
|
buf.len(),
|
||||||
|
// The LFG is only valid for a single sector
|
||||||
|
SECTOR_SIZE - (offset % SECTOR_SIZE as u64) as usize,
|
||||||
|
);
|
||||||
|
let mut lfg = LaggedFibonacci::default();
|
||||||
|
lfg.init_with_seed(partition.lfg_seed, self.disc_num, offset);
|
||||||
|
lfg.fill(&mut buf[..to_read]);
|
||||||
|
self.pos += to_read as u64;
|
||||||
|
Ok(to_read)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Read for CISOReadStream {
|
||||||
|
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||||
|
let block_size = self.block_size as u64;
|
||||||
|
let block = (self.pos / block_size) as u16;
|
||||||
|
let block_offset = self.pos & (block_size - 1);
|
||||||
|
if block != self.cur_block {
|
||||||
|
if block >= CISO_MAP_SIZE as u16 {
|
||||||
|
return Ok(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find the block in the map
|
||||||
|
let phys_block = self.block_map[block as usize];
|
||||||
|
if phys_block == u16::MAX {
|
||||||
|
// Try to recreate junk data
|
||||||
|
let read = self.read_junk_data(buf)?;
|
||||||
|
if read > 0 {
|
||||||
|
return Ok(read);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise, read zeroes
|
||||||
|
let to_read = min(buf.len(), (block_size - block_offset) as usize);
|
||||||
|
buf[..to_read].fill(0);
|
||||||
|
self.pos += to_read as u64;
|
||||||
|
return Ok(to_read);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Seek to the new block
|
||||||
|
let file_offset =
|
||||||
|
size_of::<CISOHeader>() as u64 + phys_block as u64 * block_size + block_offset;
|
||||||
|
self.inner.seek(SeekFrom::Start(file_offset))?;
|
||||||
|
self.cur_block = block;
|
||||||
|
}
|
||||||
|
|
||||||
|
let to_read = min(buf.len(), (block_size - block_offset) as usize);
|
||||||
|
let read = self.inner.read(&mut buf[..to_read])?;
|
||||||
|
self.pos += read as u64;
|
||||||
|
Ok(read)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Seek for CISOReadStream {
|
||||||
|
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
|
||||||
|
let new_pos = match pos {
|
||||||
|
SeekFrom::Start(v) => v,
|
||||||
|
SeekFrom::End(_) => {
|
||||||
|
return Err(io::Error::new(
|
||||||
|
io::ErrorKind::Unsupported,
|
||||||
|
"CISOReadStream: SeekFrom::End is not supported",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
SeekFrom::Current(v) => self.pos.saturating_add_signed(v),
|
||||||
|
};
|
||||||
|
|
||||||
|
let block_size = self.block_size as u64;
|
||||||
|
let new_block = (self.pos / block_size) as u16;
|
||||||
|
if new_block == self.cur_block {
|
||||||
|
// Seek within the same block
|
||||||
|
self.inner.seek(SeekFrom::Current(new_pos as i64 - self.pos as i64))?;
|
||||||
|
} else {
|
||||||
|
// Seek to a different block, handled by next read
|
||||||
|
self.cur_block = u16::MAX;
|
||||||
|
}
|
||||||
|
|
||||||
|
self.pos = new_pos;
|
||||||
|
Ok(new_pos)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ReadStream for CISOReadStream {
|
||||||
|
fn stable_stream_len(&mut self) -> io::Result<u64> {
|
||||||
|
Ok(self.block_size as u64 * CISO_MAP_SIZE as u64)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn as_dyn(&mut self) -> &mut dyn ReadStream { self }
|
||||||
|
}
|
|
@ -1,49 +1,25 @@
|
||||||
use std::{
|
use std::{io::BufReader, path::Path};
|
||||||
fs::File,
|
|
||||||
io,
|
use crate::{
|
||||||
io::{Seek, SeekFrom},
|
io::{split::SplitFileReader, DiscIO},
|
||||||
path::{Path, PathBuf},
|
streams::ReadStream,
|
||||||
|
Result,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{io::DiscIO, streams::ReadStream, Result};
|
pub struct DiscIOISO {
|
||||||
|
pub inner: SplitFileReader,
|
||||||
pub(crate) struct DiscIOISO {
|
|
||||||
pub(crate) filename: PathBuf,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DiscIOISO {
|
impl DiscIOISO {
|
||||||
pub(crate) fn new(filename: &Path) -> Result<DiscIOISO> {
|
pub fn new(filename: &Path) -> Result<Self> {
|
||||||
Ok(DiscIOISO { filename: filename.to_owned() })
|
Ok(Self { inner: SplitFileReader::new(filename)? })
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DiscIO for DiscIOISO {
|
impl DiscIO for DiscIOISO {
|
||||||
fn begin_read_stream(&mut self, offset: u64) -> io::Result<Box<dyn ReadStream>> {
|
fn open(&self) -> Result<Box<dyn ReadStream>> {
|
||||||
let mut file = File::open(&*self.filename)?;
|
Ok(Box::new(BufReader::new(self.inner.clone())))
|
||||||
file.seek(SeekFrom::Start(offset))?;
|
|
||||||
Ok(Box::from(file))
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) struct DiscIOISOStream<T>
|
fn disc_size(&self) -> Option<u64> { Some(self.inner.len()) }
|
||||||
where T: ReadStream + Sized
|
|
||||||
{
|
|
||||||
pub(crate) stream: T,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> DiscIOISOStream<T>
|
|
||||||
where T: ReadStream + Sized
|
|
||||||
{
|
|
||||||
pub(crate) fn new(stream: T) -> Result<DiscIOISOStream<T>> { Ok(DiscIOISOStream { stream }) }
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> DiscIO for DiscIOISOStream<T>
|
|
||||||
where T: ReadStream + Sized + Send + Sync
|
|
||||||
{
|
|
||||||
fn begin_read_stream<'a>(&'a mut self, offset: u64) -> io::Result<Box<dyn ReadStream + 'a>> {
|
|
||||||
let size = self.stream.stable_stream_len()?;
|
|
||||||
let mut stream = self.stream.new_window(0, size)?;
|
|
||||||
stream.seek(SeekFrom::Start(offset))?;
|
|
||||||
Ok(Box::from(stream))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
144
src/io/mod.rs
144
src/io/mod.rs
|
@ -1,52 +1,52 @@
|
||||||
//! Disc file format related logic (ISO, NFS, etc)
|
//! Disc file format related logic (CISO, NFS, WBFS, WIA, etc.)
|
||||||
|
|
||||||
use std::{fs, io, path::Path};
|
use std::{fs, fs::File, path::Path};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
io::{
|
streams::ReadStream, util::reader::read_from, Error, OpenOptions, Result, ResultContext,
|
||||||
iso::{DiscIOISO, DiscIOISOStream},
|
|
||||||
nfs::DiscIONFS,
|
|
||||||
wia::DiscIOWIA,
|
|
||||||
},
|
|
||||||
streams::{ByteReadStream, ReadStream},
|
|
||||||
Error, Result,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
pub(crate) mod ciso;
|
||||||
pub(crate) mod iso;
|
pub(crate) mod iso;
|
||||||
pub(crate) mod nfs;
|
pub(crate) mod nfs;
|
||||||
|
pub(crate) mod nkit;
|
||||||
|
pub(crate) mod split;
|
||||||
|
pub(crate) mod wbfs;
|
||||||
pub(crate) mod wia;
|
pub(crate) mod wia;
|
||||||
|
|
||||||
#[derive(Default, Debug, Clone)]
|
/// SHA-1 hash bytes
|
||||||
pub struct DiscIOOptions {
|
pub(crate) type HashBytes = [u8; 20];
|
||||||
/// Rebuild hashes for the disc image.
|
|
||||||
pub rebuild_hashes: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Abstraction over supported disc file types.
|
/// AES key bytes
|
||||||
|
pub(crate) type KeyBytes = [u8; 16];
|
||||||
|
|
||||||
|
/// Magic bytes
|
||||||
|
pub(crate) type MagicBytes = [u8; 4];
|
||||||
|
|
||||||
|
/// Abstraction over supported disc file formats.
|
||||||
pub trait DiscIO: Send + Sync {
|
pub trait DiscIO: Send + Sync {
|
||||||
/// Opens a new read stream for the disc file(s).
|
/// Opens a new read stream for the disc file(s).
|
||||||
/// Generally does _not_ need to be used directly.
|
/// Generally does _not_ need to be used directly.
|
||||||
fn begin_read_stream(&mut self, offset: u64) -> io::Result<Box<dyn ReadStream + '_>>;
|
fn open(&self) -> Result<Box<dyn ReadStream + '_>>;
|
||||||
|
|
||||||
/// If false, the file format does not use standard Wii partition encryption. (e.g. NFS)
|
/// Returns extra metadata included in the disc file format, if any.
|
||||||
fn has_wii_crypto(&self) -> bool { true }
|
fn meta(&self) -> Result<DiscMeta> { Ok(DiscMeta::default()) }
|
||||||
|
|
||||||
|
/// If None, the file format does not store the original disc size. (e.g. WBFS, NFS)
|
||||||
|
fn disc_size(&self) -> Option<u64>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Extra metadata included in some disc file formats.
|
||||||
|
#[derive(Debug, Clone, Default)]
|
||||||
|
pub struct DiscMeta {
|
||||||
|
pub crc32: Option<u32>,
|
||||||
|
pub md5: Option<[u8; 16]>,
|
||||||
|
pub sha1: Option<[u8; 20]>,
|
||||||
|
pub xxhash64: Option<u64>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a new [`DiscIO`] instance.
|
/// Creates a new [`DiscIO`] instance.
|
||||||
///
|
pub fn open(filename: &Path, options: &OpenOptions) -> Result<Box<dyn DiscIO>> {
|
||||||
/// # Examples
|
|
||||||
///
|
|
||||||
/// Basic usage:
|
|
||||||
/// ```no_run
|
|
||||||
/// use nod::io::{new_disc_io, DiscIOOptions};
|
|
||||||
///
|
|
||||||
/// # fn main() -> nod::Result<()> {
|
|
||||||
/// let options = DiscIOOptions::default();
|
|
||||||
/// let mut disc_io = new_disc_io("path/to/file.iso".as_ref(), &options)?;
|
|
||||||
/// # Ok(())
|
|
||||||
/// # }
|
|
||||||
/// ```
|
|
||||||
pub fn new_disc_io(filename: &Path, options: &DiscIOOptions) -> Result<Box<dyn DiscIO>> {
|
|
||||||
let path_result = fs::canonicalize(filename);
|
let path_result = fs::canonicalize(filename);
|
||||||
if let Err(err) = path_result {
|
if let Err(err) = path_result {
|
||||||
return Err(Error::Io(format!("Failed to open {}", filename.display()), err));
|
return Err(Error::Io(format!("Failed to open {}", filename.display()), err));
|
||||||
|
@ -59,66 +59,38 @@ pub fn new_disc_io(filename: &Path, options: &DiscIOOptions) -> Result<Box<dyn D
|
||||||
if !meta.unwrap().is_file() {
|
if !meta.unwrap().is_file() {
|
||||||
return Err(Error::DiscFormat(format!("Input is not a file: {}", filename.display())));
|
return Err(Error::DiscFormat(format!("Input is not a file: {}", filename.display())));
|
||||||
}
|
}
|
||||||
if has_extension(path, "iso") {
|
let magic: MagicBytes = {
|
||||||
Ok(Box::from(DiscIOISO::new(path)?))
|
let mut file =
|
||||||
} else if has_extension(path, "nfs") {
|
File::open(path).with_context(|| format!("Opening file {}", filename.display()))?;
|
||||||
match path.parent() {
|
read_from(&mut file)
|
||||||
|
.with_context(|| format!("Reading magic bytes from {}", filename.display()))?
|
||||||
|
};
|
||||||
|
match magic {
|
||||||
|
ciso::CISO_MAGIC => Ok(Box::new(ciso::DiscIOCISO::new(path)?)),
|
||||||
|
nfs::NFS_MAGIC => match path.parent() {
|
||||||
Some(parent) if parent.is_dir() => {
|
Some(parent) if parent.is_dir() => {
|
||||||
Ok(Box::from(DiscIONFS::new(path.parent().unwrap())?))
|
Ok(Box::new(nfs::DiscIONFS::new(path.parent().unwrap(), options)?))
|
||||||
}
|
}
|
||||||
_ => Err(Error::DiscFormat("Failed to locate NFS parent directory".to_string())),
|
_ => Err(Error::DiscFormat("Failed to locate NFS parent directory".to_string())),
|
||||||
}
|
},
|
||||||
} else if has_extension(path, "wia") || has_extension(path, "rvz") {
|
wbfs::WBFS_MAGIC => Ok(Box::new(wbfs::DiscIOWBFS::new(path)?)),
|
||||||
Ok(Box::from(DiscIOWIA::new(path, options)?))
|
wia::WIA_MAGIC | wia::RVZ_MAGIC => Ok(Box::new(wia::DiscIOWIA::new(path, options)?)),
|
||||||
} else {
|
_ => Ok(Box::new(iso::DiscIOISO::new(path)?)),
|
||||||
Err(Error::DiscFormat("Unknown file type".to_string()))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a new [`DiscIO`] instance from a byte slice.
|
/// Encrypts data in-place using AES-128-CBC with the given key and IV.
|
||||||
///
|
pub(crate) fn aes_encrypt(key: &KeyBytes, iv: KeyBytes, data: &mut [u8]) {
|
||||||
/// # Examples
|
use aes::cipher::{block_padding::NoPadding, BlockEncryptMut, KeyIvInit};
|
||||||
///
|
<cbc::Encryptor<aes::Aes128>>::new(key.into(), &aes::Block::from(iv))
|
||||||
/// Basic usage:
|
.encrypt_padded_mut::<NoPadding>(data, data.len())
|
||||||
/// ```no_run
|
.unwrap(); // Safe: using NoPadding
|
||||||
/// use nod::io::new_disc_io_from_buf;
|
|
||||||
///
|
|
||||||
/// # fn main() -> nod::Result<()> {
|
|
||||||
/// # #[allow(non_upper_case_globals)] const buf: &[u8] = &[0u8; 0];
|
|
||||||
/// let mut disc_io = new_disc_io_from_buf(buf)?;
|
|
||||||
/// # Ok(())
|
|
||||||
/// # }
|
|
||||||
/// ```
|
|
||||||
pub fn new_disc_io_from_buf(buf: &[u8]) -> Result<Box<dyn DiscIO + '_>> {
|
|
||||||
new_disc_io_from_stream(ByteReadStream { bytes: buf, position: 0 })
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a new [`DiscIO`] instance from an existing [`ReadStream`].
|
/// Decrypts data in-place using AES-128-CBC with the given key and IV.
|
||||||
///
|
pub(crate) fn aes_decrypt(key: &KeyBytes, iv: KeyBytes, data: &mut [u8]) {
|
||||||
/// # Examples
|
use aes::cipher::{block_padding::NoPadding, BlockDecryptMut, KeyIvInit};
|
||||||
///
|
<cbc::Decryptor<aes::Aes128>>::new(key.into(), &aes::Block::from(iv))
|
||||||
/// Basic usage:
|
.decrypt_padded_mut::<NoPadding>(data)
|
||||||
/// ```no_run
|
.unwrap(); // Safe: using NoPadding
|
||||||
/// use nod::{io::new_disc_io_from_stream, streams::ByteReadStream};
|
|
||||||
///
|
|
||||||
/// # fn main() -> nod::Result<()> {
|
|
||||||
/// # #[allow(non_upper_case_globals)] const buf: &[u8] = &[0u8; 0];
|
|
||||||
/// let stream = ByteReadStream { bytes: buf, position: 0 };
|
|
||||||
/// let mut disc_io = new_disc_io_from_stream(stream)?;
|
|
||||||
/// # Ok(())
|
|
||||||
/// # }
|
|
||||||
/// ```
|
|
||||||
pub fn new_disc_io_from_stream<'a, T: 'a + ReadStream + Sized + Send + Sync>(
|
|
||||||
stream: T,
|
|
||||||
) -> Result<Box<dyn DiscIO + 'a>> {
|
|
||||||
Ok(Box::from(DiscIOISOStream::new(stream)?))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Helper function for checking a file extension.
|
|
||||||
#[inline(always)]
|
|
||||||
pub fn has_extension(filename: &Path, extension: &str) -> bool {
|
|
||||||
match filename.extension() {
|
|
||||||
Some(ext) => ext.eq_ignore_ascii_case(extension),
|
|
||||||
None => false,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
524
src/io/nfs.rs
524
src/io/nfs.rs
|
@ -2,342 +2,306 @@ use std::{
|
||||||
fs::File,
|
fs::File,
|
||||||
io,
|
io,
|
||||||
io::{BufReader, Read, Seek, SeekFrom},
|
io::{BufReader, Read, Seek, SeekFrom},
|
||||||
|
mem::size_of,
|
||||||
path::{Component, Path, PathBuf},
|
path::{Component, Path, PathBuf},
|
||||||
};
|
};
|
||||||
|
|
||||||
use aes::{
|
use zerocopy::{big_endian::U32, AsBytes, FromBytes, FromZeroes};
|
||||||
cipher::{block_padding::NoPadding, BlockDecryptMut, KeyIvInit},
|
|
||||||
Aes128,
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
disc::SECTOR_SIZE,
|
array_ref,
|
||||||
io::DiscIO,
|
disc::{
|
||||||
|
wii::{read_partition_info, HASHES_SIZE},
|
||||||
|
SECTOR_SIZE,
|
||||||
|
},
|
||||||
|
io::{aes_decrypt, aes_encrypt, split::SplitFileReader, DiscIO, KeyBytes, MagicBytes},
|
||||||
|
static_assert,
|
||||||
streams::ReadStream,
|
streams::ReadStream,
|
||||||
util::reader::{read_vec, struct_size, FromReader},
|
util::reader::read_from,
|
||||||
Error, Result, ResultContext,
|
DiscHeader, Error, OpenOptions, Result, ResultContext,
|
||||||
};
|
};
|
||||||
|
|
||||||
type Aes128Cbc = cbc::Decryptor<Aes128>;
|
pub const NFS_MAGIC: MagicBytes = *b"EGGS";
|
||||||
|
pub const NFS_END_MAGIC: MagicBytes = *b"SGGE";
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq)]
|
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||||||
pub(crate) struct LBARange {
|
#[repr(C, align(4))]
|
||||||
pub(crate) start_block: u32,
|
pub struct LBARange {
|
||||||
pub(crate) num_blocks: u32,
|
pub start_sector: U32,
|
||||||
|
pub num_sectors: U32,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FromReader for LBARange {
|
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||||||
type Args<'a> = ();
|
#[repr(C, align(4))]
|
||||||
|
pub struct NFSHeader {
|
||||||
const STATIC_SIZE: usize = struct_size([
|
pub magic: MagicBytes,
|
||||||
u32::STATIC_SIZE, // start_block
|
pub version: U32,
|
||||||
u32::STATIC_SIZE, // num_blocks
|
pub unk1: U32,
|
||||||
]);
|
pub unk2: U32,
|
||||||
|
pub num_lba_ranges: U32,
|
||||||
fn from_reader_args<R>(reader: &mut R, _args: Self::Args<'_>) -> io::Result<Self>
|
pub lba_ranges: [LBARange; 61],
|
||||||
where R: Read + ?Sized {
|
pub end_magic: MagicBytes,
|
||||||
Ok(LBARange {
|
|
||||||
start_block: u32::from_reader(reader)?,
|
|
||||||
num_blocks: u32::from_reader(reader)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type MagicBytes = [u8; 4];
|
static_assert!(size_of::<NFSHeader>() == 0x200);
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq)]
|
|
||||||
pub(crate) struct NFSHeader {
|
|
||||||
pub(crate) version: u32,
|
|
||||||
pub(crate) unk1: u32,
|
|
||||||
pub(crate) unk2: u32,
|
|
||||||
pub(crate) lba_ranges: Vec<LBARange>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FromReader for NFSHeader {
|
|
||||||
type Args<'a> = ();
|
|
||||||
|
|
||||||
const STATIC_SIZE: usize = struct_size([
|
|
||||||
MagicBytes::STATIC_SIZE, // magic
|
|
||||||
u32::STATIC_SIZE, // version
|
|
||||||
u32::STATIC_SIZE, // unk1
|
|
||||||
u32::STATIC_SIZE, // unk2
|
|
||||||
u32::STATIC_SIZE, // lba_range_count
|
|
||||||
LBARange::STATIC_SIZE * 61, // lba_ranges
|
|
||||||
MagicBytes::STATIC_SIZE, // end_magic
|
|
||||||
]);
|
|
||||||
|
|
||||||
fn from_reader_args<R>(reader: &mut R, _args: Self::Args<'_>) -> io::Result<Self>
|
|
||||||
where R: Read + ?Sized {
|
|
||||||
if MagicBytes::from_reader(reader)? != *b"EGGS" {
|
|
||||||
return Err(io::Error::new(io::ErrorKind::InvalidData, "Invalid NFS magic"));
|
|
||||||
}
|
|
||||||
let version = u32::from_reader(reader)?;
|
|
||||||
let unk1 = u32::from_reader(reader)?;
|
|
||||||
let unk2 = u32::from_reader(reader)?;
|
|
||||||
let lba_range_count = u32::from_reader(reader)?;
|
|
||||||
let mut lba_ranges = read_vec(reader, 61)?;
|
|
||||||
lba_ranges.truncate(lba_range_count as usize);
|
|
||||||
if MagicBytes::from_reader(reader)? != *b"SGGE" {
|
|
||||||
return Err(io::Error::new(io::ErrorKind::InvalidData, "Invalid NFS end magic"));
|
|
||||||
}
|
|
||||||
Ok(NFSHeader { version, unk1, unk2, lba_ranges })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, PartialEq)]
|
|
||||||
pub(crate) struct Fbo {
|
|
||||||
pub(crate) file: u32,
|
|
||||||
pub(crate) block: u32,
|
|
||||||
pub(crate) l_block: u32,
|
|
||||||
pub(crate) offset: u32,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for Fbo {
|
|
||||||
fn default() -> Self {
|
|
||||||
Fbo { file: u32::MAX, block: u32::MAX, l_block: u32::MAX, offset: u32::MAX }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl NFSHeader {
|
impl NFSHeader {
|
||||||
pub(crate) fn calculate_num_files(&self) -> u32 {
|
pub fn validate(&self) -> Result<()> {
|
||||||
let total_block_count =
|
if self.magic != NFS_MAGIC {
|
||||||
self.lba_ranges.iter().fold(0u32, |acc, range| acc + range.num_blocks);
|
return Err(Error::DiscFormat("Invalid NFS magic".to_string()));
|
||||||
(((total_block_count as u64) * 0x8000u64 + (0x200u64 + 0xF9FFFFFu64)) / 0xFA00000u64) as u32
|
}
|
||||||
|
if self.num_lba_ranges.get() > 61 {
|
||||||
|
return Err(Error::DiscFormat("Invalid NFS LBA range count".to_string()));
|
||||||
|
}
|
||||||
|
if self.end_magic != NFS_END_MAGIC {
|
||||||
|
return Err(Error::DiscFormat("Invalid NFS end magic".to_string()));
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn logical_to_fbo(&self, offset: u64) -> Fbo {
|
pub fn lba_ranges(&self) -> &[LBARange] {
|
||||||
let block_div = (offset / 0x8000) as u32;
|
&self.lba_ranges[..self.num_lba_ranges.get() as usize]
|
||||||
let block_off = (offset % 0x8000) as u32;
|
}
|
||||||
let mut block = u32::MAX;
|
|
||||||
let mut physical_block = 0u32;
|
pub fn calculate_num_files(&self) -> u32 {
|
||||||
for range in self.lba_ranges.iter() {
|
let sector_count =
|
||||||
if block_div >= range.start_block && block_div - range.start_block < range.num_blocks {
|
self.lba_ranges().iter().fold(0u32, |acc, range| acc + range.num_sectors.get());
|
||||||
block = physical_block + (block_div - range.start_block);
|
(((sector_count as u64) * (SECTOR_SIZE as u64)
|
||||||
break;
|
+ (size_of::<NFSHeader>() as u64 + 0xF9FFFFFu64))
|
||||||
|
/ 0xFA00000u64) as u32
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn phys_sector(&self, sector: u32) -> u32 {
|
||||||
|
let mut cur_sector = 0u32;
|
||||||
|
for range in self.lba_ranges().iter() {
|
||||||
|
if sector >= range.start_sector.get()
|
||||||
|
&& sector - range.start_sector.get() < range.num_sectors.get()
|
||||||
|
{
|
||||||
|
return cur_sector + (sector - range.start_sector.get());
|
||||||
}
|
}
|
||||||
physical_block += range.num_blocks;
|
cur_sector += range.num_sectors.get();
|
||||||
}
|
|
||||||
if block == u32::MAX {
|
|
||||||
Fbo::default()
|
|
||||||
} else {
|
|
||||||
Fbo { file: block / 8000, block: block % 8000, l_block: block_div, offset: block_off }
|
|
||||||
}
|
}
|
||||||
|
u32::MAX
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) struct DiscIONFS {
|
pub struct DiscIONFS {
|
||||||
pub(crate) directory: PathBuf,
|
pub inner: SplitFileReader,
|
||||||
pub(crate) key: [u8; 16],
|
pub header: NFSHeader,
|
||||||
pub(crate) header: Option<NFSHeader>,
|
pub raw_size: u64,
|
||||||
|
pub disc_size: u64,
|
||||||
|
pub key: KeyBytes,
|
||||||
|
pub encrypt: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DiscIONFS {
|
impl DiscIONFS {
|
||||||
pub(crate) fn new(directory: &Path) -> Result<DiscIONFS> {
|
pub fn new(directory: &Path, options: &OpenOptions) -> Result<DiscIONFS> {
|
||||||
let mut disc_io = DiscIONFS { directory: directory.to_owned(), key: [0; 16], header: None };
|
let mut disc_io = DiscIONFS {
|
||||||
disc_io.validate_files()?;
|
inner: SplitFileReader::empty(),
|
||||||
|
header: NFSHeader::new_zeroed(),
|
||||||
|
raw_size: 0,
|
||||||
|
disc_size: 0,
|
||||||
|
key: [0; 16],
|
||||||
|
encrypt: options.rebuild_encryption,
|
||||||
|
};
|
||||||
|
disc_io.load_files(directory)?;
|
||||||
Ok(disc_io)
|
Ok(disc_io)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) struct NFSReadStream<'a> {
|
pub struct NFSReadStream {
|
||||||
disc_io: &'a DiscIONFS,
|
/// Underlying file reader
|
||||||
file: Option<File>,
|
inner: SplitFileReader,
|
||||||
crypto: [u8; 16],
|
/// NFS file header
|
||||||
// Physical address - all UINT32_MAX indicates logical zero block
|
header: NFSHeader,
|
||||||
phys_addr: Fbo,
|
/// Inner disc header
|
||||||
// Logical address
|
disc_header: Option<DiscHeader>,
|
||||||
offset: u64,
|
/// Estimated disc size
|
||||||
// Active file stream and its offset as set in the system.
|
disc_size: u64,
|
||||||
// Block is typically one ahead of the presently decrypted block.
|
/// Current offset
|
||||||
cur_file: u32,
|
pos: u64,
|
||||||
cur_block: u32,
|
/// Current sector
|
||||||
|
sector: u32,
|
||||||
|
/// Current decrypted sector
|
||||||
buf: [u8; SECTOR_SIZE],
|
buf: [u8; SECTOR_SIZE],
|
||||||
|
/// AES key
|
||||||
|
key: KeyBytes,
|
||||||
|
/// Wii partition info
|
||||||
|
part_info: Vec<PartitionInfo>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> NFSReadStream<'a> {
|
struct PartitionInfo {
|
||||||
fn set_cur_file(&mut self, cur_file: u32) -> Result<()> {
|
start_sector: u32,
|
||||||
if cur_file >= self.disc_io.header.as_ref().unwrap().calculate_num_files() {
|
end_sector: u32,
|
||||||
return Err(Error::DiscFormat(format!("Out of bounds NFS file access: {}", cur_file)));
|
key: KeyBytes,
|
||||||
}
|
}
|
||||||
self.cur_file = cur_file;
|
|
||||||
self.cur_block = u32::MAX;
|
|
||||||
let path = self.disc_io.get_nfs(cur_file)?;
|
|
||||||
self.file = Option::from(
|
|
||||||
File::open(&path).with_context(|| format!("Opening file {}", path.display()))?,
|
|
||||||
);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn set_cur_block(&mut self, cur_block: u32) -> io::Result<()> {
|
impl NFSReadStream {
|
||||||
self.cur_block = cur_block;
|
fn read_sector(&mut self, sector: u32) -> io::Result<()> {
|
||||||
self.file
|
// Calculate physical sector
|
||||||
.as_ref()
|
let phys_sector = self.header.phys_sector(sector);
|
||||||
.unwrap()
|
if phys_sector == u32::MAX {
|
||||||
.seek(SeekFrom::Start(self.cur_block as u64 * SECTOR_SIZE as u64 + 0x200u64))?;
|
// Logical zero sector
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn set_phys_addr(&mut self, phys_addr: Fbo) -> Result<()> {
|
|
||||||
// If we're just changing the offset, nothing else needs to be done
|
|
||||||
if self.phys_addr.file == phys_addr.file && self.phys_addr.block == phys_addr.block {
|
|
||||||
self.phys_addr.offset = phys_addr.offset;
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
self.phys_addr = phys_addr;
|
|
||||||
|
|
||||||
// Set logical zero block
|
|
||||||
if phys_addr.file == u32::MAX {
|
|
||||||
self.buf.fill(0u8);
|
self.buf.fill(0u8);
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make necessary file and block current with system
|
// Read sector
|
||||||
if phys_addr.file != self.cur_file {
|
let offset = size_of::<NFSHeader>() as u64 + phys_sector as u64 * SECTOR_SIZE as u64;
|
||||||
self.set_cur_file(phys_addr.file)?;
|
self.inner.seek(SeekFrom::Start(offset))?;
|
||||||
}
|
self.inner.read_exact(&mut self.buf)?;
|
||||||
if phys_addr.block != self.cur_block {
|
|
||||||
self.set_cur_block(phys_addr.block)
|
|
||||||
.with_context(|| format!("Seeking to NFS block {}", phys_addr.block))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read block, handling 0x200 overlap case
|
|
||||||
if phys_addr.block == 7999 {
|
|
||||||
self.file
|
|
||||||
.as_ref()
|
|
||||||
.unwrap()
|
|
||||||
.read_exact(&mut self.buf[..SECTOR_SIZE - 0x200])
|
|
||||||
.context("Reading NFS block 7999 part 1")?;
|
|
||||||
self.set_cur_file(self.cur_file + 1)?;
|
|
||||||
self.file
|
|
||||||
.as_ref()
|
|
||||||
.unwrap()
|
|
||||||
.read_exact(&mut self.buf[SECTOR_SIZE - 0x200..])
|
|
||||||
.context("Reading NFS block 7999 part 2")?;
|
|
||||||
self.cur_block = 0;
|
|
||||||
} else {
|
|
||||||
self.file
|
|
||||||
.as_ref()
|
|
||||||
.unwrap()
|
|
||||||
.read_exact(&mut self.buf)
|
|
||||||
.with_context(|| format!("Reading NFS block {}", phys_addr.block))?;
|
|
||||||
self.cur_block += 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decrypt
|
// Decrypt
|
||||||
|
let iv_bytes = sector.to_be_bytes();
|
||||||
#[rustfmt::skip]
|
#[rustfmt::skip]
|
||||||
let iv: [u8; 16] = [
|
let iv: KeyBytes = [
|
||||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
(phys_addr.l_block & 0xFF) as u8,
|
iv_bytes[0], iv_bytes[1], iv_bytes[2], iv_bytes[3],
|
||||||
((phys_addr.l_block >> 8) & 0xFF) as u8,
|
|
||||||
((phys_addr.l_block >> 16) & 0xFF) as u8,
|
|
||||||
((phys_addr.l_block >> 24) & 0xFF) as u8,
|
|
||||||
];
|
];
|
||||||
Aes128Cbc::new(self.crypto.as_ref().into(), &iv.into())
|
aes_decrypt(&self.key, iv, &mut self.buf);
|
||||||
.decrypt_padded_mut::<NoPadding>(&mut self.buf)?;
|
|
||||||
|
if sector == 0 {
|
||||||
|
if let Some(header) = &self.disc_header {
|
||||||
|
// Replace disc header in buffer
|
||||||
|
let header_bytes = header.as_bytes();
|
||||||
|
self.buf[..header_bytes.len()].copy_from_slice(header_bytes);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Re-encrypt if needed
|
||||||
|
if let Some(part) = self
|
||||||
|
.part_info
|
||||||
|
.iter()
|
||||||
|
.find(|part| sector >= part.start_sector && sector < part.end_sector)
|
||||||
|
{
|
||||||
|
// Encrypt hashes
|
||||||
|
aes_encrypt(&part.key, [0u8; 16], &mut self.buf[..HASHES_SIZE]);
|
||||||
|
// Encrypt data using IV from H2
|
||||||
|
aes_encrypt(&part.key, *array_ref![self.buf, 0x3d0, 16], &mut self.buf[HASHES_SIZE..]);
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn set_logical_addr(&mut self, addr: u64) -> Result<()> {
|
|
||||||
self.set_phys_addr(self.disc_io.header.as_ref().unwrap().logical_to_fbo(addr))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> Read for NFSReadStream<'a> {
|
impl Read for NFSReadStream {
|
||||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||||
let mut rem = buf.len();
|
let sector = (self.pos / SECTOR_SIZE as u64) as u32;
|
||||||
let mut read: usize = 0;
|
let sector_off = (self.pos % SECTOR_SIZE as u64) as usize;
|
||||||
while rem > 0 {
|
if sector != self.sector {
|
||||||
let mut read_size = rem;
|
self.read_sector(sector)?;
|
||||||
let block_offset: usize =
|
self.sector = sector;
|
||||||
if self.phys_addr.offset == u32::MAX { 0 } else { self.phys_addr.offset as usize };
|
|
||||||
if read_size + block_offset > SECTOR_SIZE {
|
|
||||||
read_size = SECTOR_SIZE - block_offset
|
|
||||||
}
|
|
||||||
buf[read..read + read_size]
|
|
||||||
.copy_from_slice(&self.buf[block_offset..block_offset + read_size]);
|
|
||||||
read += read_size;
|
|
||||||
rem -= read_size;
|
|
||||||
self.offset += read_size as u64;
|
|
||||||
self.set_logical_addr(self.offset).map_err(|e| match e {
|
|
||||||
Error::Io(s, e) => io::Error::new(e.kind(), s),
|
|
||||||
_ => io::Error::from(io::ErrorKind::Other),
|
|
||||||
})?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let read = buf.len().min(SECTOR_SIZE - sector_off);
|
||||||
|
buf[..read].copy_from_slice(&self.buf[sector_off..sector_off + read]);
|
||||||
|
self.pos += read as u64;
|
||||||
Ok(read)
|
Ok(read)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> Seek for NFSReadStream<'a> {
|
impl Seek for NFSReadStream {
|
||||||
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
|
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
|
||||||
self.offset = match pos {
|
self.pos = match pos {
|
||||||
SeekFrom::Start(v) => v,
|
SeekFrom::Start(v) => v,
|
||||||
SeekFrom::End(v) => (self.stable_stream_len()? as i64 + v) as u64,
|
SeekFrom::End(_) => {
|
||||||
SeekFrom::Current(v) => (self.offset as i64 + v) as u64,
|
return Err(io::Error::new(
|
||||||
|
io::ErrorKind::Unsupported,
|
||||||
|
"NFSReadStream: SeekFrom::End is not supported",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
SeekFrom::Current(v) => self.pos.saturating_add_signed(v),
|
||||||
};
|
};
|
||||||
self.set_logical_addr(self.offset).map_err(|v| match v {
|
Ok(self.pos)
|
||||||
Error::Io(_, v) => v,
|
|
||||||
_ => io::Error::from(io::ErrorKind::Other),
|
|
||||||
})?;
|
|
||||||
Ok(self.offset)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn stream_position(&mut self) -> io::Result<u64> { Ok(self.offset) }
|
fn stream_position(&mut self) -> io::Result<u64> { Ok(self.pos) }
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> ReadStream for NFSReadStream<'a> {
|
impl ReadStream for NFSReadStream {
|
||||||
fn stable_stream_len(&mut self) -> io::Result<u64> { todo!() }
|
fn stable_stream_len(&mut self) -> io::Result<u64> { Ok(self.disc_size) }
|
||||||
|
|
||||||
fn as_dyn(&mut self) -> &mut dyn ReadStream { self }
|
fn as_dyn(&mut self) -> &mut dyn ReadStream { self }
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DiscIO for DiscIONFS {
|
impl DiscIO for DiscIONFS {
|
||||||
fn begin_read_stream(&mut self, offset: u64) -> io::Result<Box<dyn ReadStream + '_>> {
|
fn open(&self) -> Result<Box<dyn ReadStream>> {
|
||||||
Ok(Box::from(NFSReadStream {
|
let mut stream = NFSReadStream {
|
||||||
disc_io: self,
|
inner: self.inner.clone(),
|
||||||
file: None,
|
header: self.header.clone(),
|
||||||
crypto: self.key,
|
disc_header: None,
|
||||||
phys_addr: Fbo::default(),
|
disc_size: self.disc_size,
|
||||||
offset,
|
pos: 0,
|
||||||
cur_file: u32::MAX,
|
sector: u32::MAX,
|
||||||
cur_block: u32::MAX,
|
|
||||||
buf: [0; SECTOR_SIZE],
|
buf: [0; SECTOR_SIZE],
|
||||||
}))
|
key: self.key,
|
||||||
|
part_info: vec![],
|
||||||
|
};
|
||||||
|
let mut disc_header: DiscHeader = read_from(&mut stream).context("Reading disc header")?;
|
||||||
|
if !self.encrypt {
|
||||||
|
// If we're not re-encrypting, disable partition encryption in disc header
|
||||||
|
disc_header.no_partition_encryption = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read partition info so we can re-encrypt
|
||||||
|
if self.encrypt && disc_header.is_wii() {
|
||||||
|
for part in read_partition_info(&mut stream)? {
|
||||||
|
let start = part.offset + part.header.data_off();
|
||||||
|
let end = start + part.header.data_size();
|
||||||
|
if start % SECTOR_SIZE as u64 != 0 || end % SECTOR_SIZE as u64 != 0 {
|
||||||
|
return Err(Error::DiscFormat(format!(
|
||||||
|
"Partition start / end not aligned to sector size: {} / {}",
|
||||||
|
start, end
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
stream.part_info.push(PartitionInfo {
|
||||||
|
start_sector: (start / SECTOR_SIZE as u64) as u32,
|
||||||
|
end_sector: (end / SECTOR_SIZE as u64) as u32,
|
||||||
|
key: part.header.ticket.title_key,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
stream.disc_header = Some(disc_header);
|
||||||
|
// Reset stream position
|
||||||
|
stream.pos = 0;
|
||||||
|
stream.sector = u32::MAX;
|
||||||
|
Ok(Box::new(stream))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn has_wii_crypto(&self) -> bool { false }
|
fn disc_size(&self) -> Option<u64> { None }
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_path<P>(directory: &Path, path: P) -> PathBuf
|
||||||
|
where P: AsRef<Path> {
|
||||||
|
let mut buf = directory.to_path_buf();
|
||||||
|
for component in path.as_ref().components() {
|
||||||
|
match component {
|
||||||
|
Component::ParentDir => {
|
||||||
|
buf.pop();
|
||||||
|
}
|
||||||
|
_ => buf.push(component),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
buf
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_nfs(directory: &Path, num: u32) -> Result<PathBuf> {
|
||||||
|
let path = get_path(directory, format!("hif_{:06}.nfs", num));
|
||||||
|
if path.exists() {
|
||||||
|
Ok(path)
|
||||||
|
} else {
|
||||||
|
Err(Error::DiscFormat(format!("Failed to locate {}", path.display())))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DiscIONFS {
|
impl DiscIONFS {
|
||||||
fn get_path<P>(&self, path: P) -> PathBuf
|
pub fn load_files(&mut self, directory: &Path) -> Result<()> {
|
||||||
where P: AsRef<Path> {
|
|
||||||
let mut buf = self.directory.clone();
|
|
||||||
for component in path.as_ref().components() {
|
|
||||||
match component {
|
|
||||||
Component::ParentDir => {
|
|
||||||
buf.pop();
|
|
||||||
}
|
|
||||||
_ => buf.push(component),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
buf
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_nfs(&self, num: u32) -> Result<PathBuf> {
|
|
||||||
let path = self.get_path(format!("hif_{:06}.nfs", num));
|
|
||||||
if path.exists() {
|
|
||||||
Ok(path)
|
|
||||||
} else {
|
|
||||||
Err(Error::DiscFormat(format!("Failed to locate {}", path.display())))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn validate_files(&mut self) -> Result<()> {
|
|
||||||
{
|
{
|
||||||
// Load key file
|
// Load key file
|
||||||
let primary_key_path =
|
let primary_key_path =
|
||||||
self.get_path(["..", "code", "htk.bin"].iter().collect::<PathBuf>());
|
get_path(directory, ["..", "code", "htk.bin"].iter().collect::<PathBuf>());
|
||||||
let secondary_key_path = self.get_path("htk.bin");
|
let secondary_key_path = get_path(directory, "htk.bin");
|
||||||
let mut key_path = primary_key_path.canonicalize();
|
let mut key_path = primary_key_path.canonicalize();
|
||||||
if key_path.is_err() {
|
if key_path.is_err() {
|
||||||
key_path = secondary_key_path.canonicalize();
|
key_path = secondary_key_path.canonicalize();
|
||||||
|
@ -355,19 +319,47 @@ impl DiscIONFS {
|
||||||
.read(&mut self.key)
|
.read(&mut self.key)
|
||||||
.map_err(|v| Error::Io(format!("Failed to read {}", resolved_path.display()), v))?;
|
.map_err(|v| Error::Io(format!("Failed to read {}", resolved_path.display()), v))?;
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
// Load header from first file
|
// Load header from first file
|
||||||
let path = self.get_nfs(0)?;
|
let path = get_nfs(directory, 0)?;
|
||||||
|
self.inner.add(&path)?;
|
||||||
|
|
||||||
let mut file = BufReader::new(
|
let mut file = BufReader::new(
|
||||||
File::open(&path).with_context(|| format!("Opening file {}", path.display()))?,
|
File::open(&path).with_context(|| format!("Opening file {}", path.display()))?,
|
||||||
);
|
);
|
||||||
let header = NFSHeader::from_reader(&mut file)
|
let header: NFSHeader = read_from(&mut file)
|
||||||
.with_context(|| format!("Reading NFS header from file {}", path.display()))?;
|
.with_context(|| format!("Reading NFS header from file {}", path.display()))?;
|
||||||
|
header.validate()?;
|
||||||
|
// log::debug!("{:?}", header);
|
||||||
|
|
||||||
// Ensure remaining files exist
|
// Ensure remaining files exist
|
||||||
for i in 1..header.calculate_num_files() {
|
for i in 1..header.calculate_num_files() {
|
||||||
self.get_nfs(i)?;
|
self.inner.add(&get_nfs(directory, i)?)?;
|
||||||
}
|
}
|
||||||
self.header = Option::from(header);
|
|
||||||
|
// Calculate sizes
|
||||||
|
let num_sectors =
|
||||||
|
header.lba_ranges().iter().map(|range| range.num_sectors.get()).sum::<u32>();
|
||||||
|
let max_sector = header
|
||||||
|
.lba_ranges()
|
||||||
|
.iter()
|
||||||
|
.map(|range| range.start_sector.get() + range.num_sectors.get())
|
||||||
|
.max()
|
||||||
|
.unwrap();
|
||||||
|
let raw_size = size_of::<NFSHeader>() + (num_sectors as usize * SECTOR_SIZE);
|
||||||
|
let data_size = max_sector as usize * SECTOR_SIZE;
|
||||||
|
if raw_size > self.inner.len() as usize {
|
||||||
|
return Err(Error::DiscFormat(format!(
|
||||||
|
"NFS raw size mismatch: expected at least {}, got {}",
|
||||||
|
raw_size,
|
||||||
|
self.inner.len()
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
self.header = header;
|
||||||
|
self.raw_size = raw_size as u64;
|
||||||
|
self.disc_size = data_size as u64;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,146 @@
|
||||||
|
use std::{
|
||||||
|
io,
|
||||||
|
io::{Read, Seek, SeekFrom},
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
io::MagicBytes,
|
||||||
|
util::reader::{read_from, read_u16_be, read_u32_be, read_u64_be, read_vec},
|
||||||
|
DiscMeta,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[allow(unused)]
|
||||||
|
#[repr(u16)]
|
||||||
|
enum NKitHeaderFlags {
|
||||||
|
Size = 0x1,
|
||||||
|
Crc32 = 0x2,
|
||||||
|
Md5 = 0x4,
|
||||||
|
Sha1 = 0x8,
|
||||||
|
Xxhash64 = 0x10,
|
||||||
|
Key = 0x20,
|
||||||
|
Encrypted = 0x40,
|
||||||
|
ExtraData = 0x80,
|
||||||
|
IndexFile = 0x100,
|
||||||
|
}
|
||||||
|
|
||||||
|
const NKIT_HEADER_V1_FLAGS: u16 = NKitHeaderFlags::Crc32 as u16
|
||||||
|
| NKitHeaderFlags::Md5 as u16
|
||||||
|
| NKitHeaderFlags::Sha1 as u16
|
||||||
|
| NKitHeaderFlags::Xxhash64 as u16;
|
||||||
|
|
||||||
|
const fn calc_header_size(version: u8, flags: u16, key_len: u32) -> usize {
|
||||||
|
let mut size = 8;
|
||||||
|
if version >= 2 {
|
||||||
|
// header size + flags
|
||||||
|
size += 4;
|
||||||
|
}
|
||||||
|
if flags & NKitHeaderFlags::Size as u16 != 0 {
|
||||||
|
size += 8;
|
||||||
|
}
|
||||||
|
if flags & NKitHeaderFlags::Crc32 as u16 != 0 {
|
||||||
|
size += 4;
|
||||||
|
}
|
||||||
|
if flags & NKitHeaderFlags::Md5 as u16 != 0 {
|
||||||
|
size += 16;
|
||||||
|
}
|
||||||
|
if flags & NKitHeaderFlags::Sha1 as u16 != 0 {
|
||||||
|
size += 20;
|
||||||
|
}
|
||||||
|
if flags & NKitHeaderFlags::Xxhash64 as u16 != 0 {
|
||||||
|
size += 8;
|
||||||
|
}
|
||||||
|
if flags & NKitHeaderFlags::Key as u16 != 0 {
|
||||||
|
size += key_len as usize + 2;
|
||||||
|
}
|
||||||
|
size
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(unused)]
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct NKitHeader {
|
||||||
|
pub version: u8,
|
||||||
|
pub flags: u16,
|
||||||
|
pub size: Option<u64>,
|
||||||
|
pub crc32: Option<u32>,
|
||||||
|
pub md5: Option<[u8; 16]>,
|
||||||
|
pub sha1: Option<[u8; 20]>,
|
||||||
|
pub xxhash64: Option<u64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
const VERSION_PREFIX: [u8; 7] = *b"NKIT v";
|
||||||
|
|
||||||
|
impl NKitHeader {
|
||||||
|
pub fn try_read_from<R>(reader: &mut R) -> Option<Self>
|
||||||
|
where R: Read + Seek + ?Sized {
|
||||||
|
let magic: MagicBytes = read_from(reader).ok()?;
|
||||||
|
if magic == *b"NKIT" {
|
||||||
|
reader.seek(SeekFrom::Current(-4)).ok()?;
|
||||||
|
match NKitHeader::read_from(reader) {
|
||||||
|
Ok(header) => Some(header),
|
||||||
|
Err(e) => {
|
||||||
|
log::warn!("Failed to read NKit header: {}", e);
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn read_from<R>(reader: &mut R) -> io::Result<Self>
|
||||||
|
where R: Read + ?Sized {
|
||||||
|
let version_string: [u8; 8] = read_from(reader)?;
|
||||||
|
if version_string[0..7] != VERSION_PREFIX
|
||||||
|
|| version_string[7] < b'1'
|
||||||
|
|| version_string[7] > b'9'
|
||||||
|
{
|
||||||
|
return Err(io::Error::new(
|
||||||
|
io::ErrorKind::InvalidData,
|
||||||
|
"Invalid NKit header version string",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
let version = version_string[7] - b'0';
|
||||||
|
let header_size = match version {
|
||||||
|
1 => calc_header_size(version, NKIT_HEADER_V1_FLAGS, 0) as u16,
|
||||||
|
2 => read_u16_be(reader)?,
|
||||||
|
_ => {
|
||||||
|
return Err(io::Error::new(
|
||||||
|
io::ErrorKind::InvalidData,
|
||||||
|
format!("Unsupported NKit header version: {}", version),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut remaining_header_size = header_size as usize - 8;
|
||||||
|
if version >= 2 {
|
||||||
|
// We read the header size already
|
||||||
|
remaining_header_size -= 2;
|
||||||
|
}
|
||||||
|
let header_bytes = read_vec(reader, remaining_header_size)?;
|
||||||
|
let mut reader = &header_bytes[..];
|
||||||
|
|
||||||
|
let flags = if version == 1 { NKIT_HEADER_V1_FLAGS } else { read_u16_be(&mut reader)? };
|
||||||
|
let size = (flags & NKitHeaderFlags::Size as u16 != 0)
|
||||||
|
.then(|| read_u64_be(&mut reader))
|
||||||
|
.transpose()?;
|
||||||
|
let crc32 = (flags & NKitHeaderFlags::Crc32 as u16 != 0)
|
||||||
|
.then(|| read_u32_be(&mut reader))
|
||||||
|
.transpose()?;
|
||||||
|
let md5 = (flags & NKitHeaderFlags::Md5 as u16 != 0)
|
||||||
|
.then(|| read_from::<[u8; 16], _>(&mut reader))
|
||||||
|
.transpose()?;
|
||||||
|
let sha1 = (flags & NKitHeaderFlags::Sha1 as u16 != 0)
|
||||||
|
.then(|| read_from::<[u8; 20], _>(&mut reader))
|
||||||
|
.transpose()?;
|
||||||
|
let xxhash64 = (flags & NKitHeaderFlags::Xxhash64 as u16 != 0)
|
||||||
|
.then(|| read_u64_be(&mut reader))
|
||||||
|
.transpose()?;
|
||||||
|
Ok(Self { version, flags, size, crc32, md5, sha1, xxhash64 })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<&NKitHeader> for DiscMeta {
|
||||||
|
fn from(value: &NKitHeader) -> Self {
|
||||||
|
Self { crc32: value.crc32, md5: value.md5, sha1: value.sha1, xxhash64: value.xxhash64 }
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,165 @@
|
||||||
|
use std::{
|
||||||
|
fs::File,
|
||||||
|
io,
|
||||||
|
io::{Read, Seek, SeekFrom},
|
||||||
|
path::{Path, PathBuf},
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::{ErrorContext, ReadStream, Result, ResultContext};
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct SplitFileReader {
|
||||||
|
files: Vec<Split<PathBuf>>,
|
||||||
|
open_file: Option<Split<File>>,
|
||||||
|
pos: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
struct Split<T> {
|
||||||
|
inner: T,
|
||||||
|
begin: u64,
|
||||||
|
size: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> Split<T> {
|
||||||
|
fn contains(&self, pos: u64) -> bool { self.begin <= pos && pos < self.begin + self.size }
|
||||||
|
}
|
||||||
|
|
||||||
|
// .iso.1, .iso.2, etc.
|
||||||
|
fn split_path_1(input: &Path, index: u32) -> PathBuf {
|
||||||
|
let input_str = input.to_str().unwrap_or("[INVALID]");
|
||||||
|
let mut out = input_str.to_string();
|
||||||
|
out.push('.');
|
||||||
|
out.push(char::from_digit(index, 10).unwrap());
|
||||||
|
PathBuf::from(out)
|
||||||
|
}
|
||||||
|
|
||||||
|
// .part1.iso, .part2.iso, etc.
|
||||||
|
fn split_path_2(input: &Path, index: u32) -> PathBuf {
|
||||||
|
let extension = input.extension().and_then(|s| s.to_str()).unwrap_or("iso");
|
||||||
|
let input_without_ext = input.with_extension("");
|
||||||
|
let input_str = input_without_ext.to_str().unwrap_or("[INVALID]");
|
||||||
|
let mut out = input_str.to_string();
|
||||||
|
out.push_str(".part");
|
||||||
|
out.push(char::from_digit(index, 10).unwrap());
|
||||||
|
out.push('.');
|
||||||
|
out.push_str(extension);
|
||||||
|
PathBuf::from(out)
|
||||||
|
}
|
||||||
|
|
||||||
|
// .wbf1, .wbf2, etc.
|
||||||
|
fn split_path_3(input: &Path, index: u32) -> PathBuf {
|
||||||
|
let input_str = input.to_str().unwrap_or("[INVALID]");
|
||||||
|
let mut chars = input_str.chars();
|
||||||
|
chars.next_back();
|
||||||
|
let mut out = chars.as_str().to_string();
|
||||||
|
out.push(char::from_digit(index, 10).unwrap());
|
||||||
|
PathBuf::from(out)
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SplitFileReader {
|
||||||
|
pub fn empty() -> Self { Self { files: Vec::new(), open_file: None, pos: 0 } }
|
||||||
|
|
||||||
|
pub fn new(path: &Path) -> Result<Self> {
|
||||||
|
let mut files = vec![];
|
||||||
|
let mut begin = 0;
|
||||||
|
match path.metadata() {
|
||||||
|
Ok(metadata) => {
|
||||||
|
files.push(Split { inner: path.to_path_buf(), begin, size: metadata.len() });
|
||||||
|
begin += metadata.len();
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
return Err(e.context(format!("Failed to stat file {}", path.display())));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for path_fn in [split_path_1, split_path_2, split_path_3] {
|
||||||
|
let mut index = 1;
|
||||||
|
loop {
|
||||||
|
let path = path_fn(path, index);
|
||||||
|
if let Ok(metadata) = path.metadata() {
|
||||||
|
files.push(Split { inner: path, begin, size: metadata.len() });
|
||||||
|
begin += metadata.len();
|
||||||
|
index += 1;
|
||||||
|
} else {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if index > 1 {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(Self { files, open_file: None, pos: 0 })
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn add(&mut self, path: &Path) -> Result<()> {
|
||||||
|
let begin = self.len();
|
||||||
|
let metadata =
|
||||||
|
path.metadata().context(format!("Failed to stat file {}", path.display()))?;
|
||||||
|
self.files.push(Split { inner: path.to_path_buf(), begin, size: metadata.len() });
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn reset(&mut self) {
|
||||||
|
self.open_file = None;
|
||||||
|
self.pos = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn len(&self) -> u64 { self.files.last().map_or(0, |f| f.begin + f.size) }
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Read for SplitFileReader {
|
||||||
|
fn read(&mut self, mut buf: &mut [u8]) -> io::Result<usize> {
|
||||||
|
let mut total = 0;
|
||||||
|
while !buf.is_empty() {
|
||||||
|
if let Some(split) = &mut self.open_file {
|
||||||
|
let n = buf.len().min((split.begin + split.size - self.pos) as usize);
|
||||||
|
if n == 0 {
|
||||||
|
self.open_file = None;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
split.inner.read_exact(&mut buf[..n])?;
|
||||||
|
total += n;
|
||||||
|
self.pos += n as u64;
|
||||||
|
buf = &mut buf[n..];
|
||||||
|
} else if let Some(split) = self.files.iter().find(|f| f.contains(self.pos)) {
|
||||||
|
let mut file = File::open(&split.inner)?;
|
||||||
|
if self.pos > split.begin {
|
||||||
|
file.seek(SeekFrom::Start(self.pos - split.begin))?;
|
||||||
|
}
|
||||||
|
self.open_file = Some(Split { inner: file, begin: split.begin, size: split.size });
|
||||||
|
} else {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(total)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Seek for SplitFileReader {
|
||||||
|
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
|
||||||
|
self.pos = match pos {
|
||||||
|
SeekFrom::Start(pos) => pos,
|
||||||
|
SeekFrom::Current(offset) => self.pos.saturating_add_signed(offset),
|
||||||
|
SeekFrom::End(offset) => self.len().saturating_add_signed(offset),
|
||||||
|
};
|
||||||
|
if let Some(split) = &mut self.open_file {
|
||||||
|
if split.contains(self.pos) {
|
||||||
|
// Seek within the open file
|
||||||
|
split.inner.seek(SeekFrom::Start(self.pos - split.begin))?;
|
||||||
|
} else {
|
||||||
|
self.open_file = None;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(self.pos)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ReadStream for SplitFileReader {
|
||||||
|
fn stable_stream_len(&mut self) -> io::Result<u64> { Ok(self.len()) }
|
||||||
|
|
||||||
|
fn as_dyn(&mut self) -> &mut dyn ReadStream { self }
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Clone for SplitFileReader {
|
||||||
|
fn clone(&self) -> Self { Self { files: self.files.clone(), open_file: None, pos: self.pos } }
|
||||||
|
}
|
|
@ -0,0 +1,203 @@
|
||||||
|
use std::{
|
||||||
|
cmp::min,
|
||||||
|
io,
|
||||||
|
io::{BufReader, Read, Seek, SeekFrom},
|
||||||
|
mem::size_of,
|
||||||
|
path::Path,
|
||||||
|
};
|
||||||
|
|
||||||
|
use zerocopy::{big_endian::*, AsBytes, FromBytes, FromZeroes};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
disc::SECTOR_SIZE,
|
||||||
|
io::{nkit::NKitHeader, split::SplitFileReader, DiscIO, DiscMeta, MagicBytes},
|
||||||
|
util::reader::{read_from, read_vec},
|
||||||
|
Error, ReadStream, Result, ResultContext,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub const WBFS_MAGIC: MagicBytes = *b"WBFS";
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||||||
|
#[repr(C, align(4))]
|
||||||
|
struct WBFSHeader {
|
||||||
|
magic: MagicBytes,
|
||||||
|
num_sectors: U32,
|
||||||
|
sector_size_shift: u8,
|
||||||
|
wbfs_sector_size_shift: u8,
|
||||||
|
_pad: [u8; 2],
|
||||||
|
}
|
||||||
|
|
||||||
|
impl WBFSHeader {
|
||||||
|
fn sector_size(&self) -> u32 { 1 << self.sector_size_shift }
|
||||||
|
|
||||||
|
fn wbfs_sector_size(&self) -> u32 { 1 << self.wbfs_sector_size_shift }
|
||||||
|
|
||||||
|
// fn align_lba(&self, x: u32) -> u32 { (x + self.sector_size() - 1) & !(self.sector_size() - 1) }
|
||||||
|
//
|
||||||
|
// fn num_wii_sectors(&self) -> u32 {
|
||||||
|
// (self.num_sectors.get() / SECTOR_SIZE as u32) * self.sector_size()
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// fn max_wii_sectors(&self) -> u32 { NUM_WII_SECTORS }
|
||||||
|
//
|
||||||
|
// fn num_wbfs_sectors(&self) -> u32 {
|
||||||
|
// self.num_wii_sectors() >> (self.wbfs_sector_size_shift - 15)
|
||||||
|
// }
|
||||||
|
|
||||||
|
fn max_wbfs_sectors(&self) -> u32 { NUM_WII_SECTORS >> (self.wbfs_sector_size_shift - 15) }
|
||||||
|
}
|
||||||
|
|
||||||
|
const DISC_HEADER_SIZE: usize = 0x100;
|
||||||
|
const NUM_WII_SECTORS: u32 = 143432 * 2; // Double layer discs
|
||||||
|
|
||||||
|
pub struct DiscIOWBFS {
|
||||||
|
pub inner: SplitFileReader,
|
||||||
|
/// WBFS header
|
||||||
|
header: WBFSHeader,
|
||||||
|
/// Map of Wii LBAs to WBFS LBAs
|
||||||
|
wlba_table: Vec<U16>,
|
||||||
|
/// Optional NKit header
|
||||||
|
nkit_header: Option<NKitHeader>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DiscIOWBFS {
|
||||||
|
pub fn new(filename: &Path) -> Result<Self> {
|
||||||
|
let mut inner = BufReader::new(SplitFileReader::new(filename)?);
|
||||||
|
|
||||||
|
let header: WBFSHeader = read_from(&mut inner).context("Reading WBFS header")?;
|
||||||
|
if header.magic != WBFS_MAGIC {
|
||||||
|
return Err(Error::DiscFormat("Invalid WBFS magic".to_string()));
|
||||||
|
}
|
||||||
|
// log::debug!("{:?}", header);
|
||||||
|
// log::debug!("sector_size: {}", header.sector_size());
|
||||||
|
// log::debug!("wbfs_sector_size: {}", header.wbfs_sector_size());
|
||||||
|
let file_len = inner.stable_stream_len().context("Getting WBFS file size")?;
|
||||||
|
let expected_file_len = header.num_sectors.get() as u64 * header.sector_size() as u64;
|
||||||
|
if file_len != expected_file_len {
|
||||||
|
return Err(Error::DiscFormat(format!(
|
||||||
|
"Invalid WBFS file size: {}, expected {}",
|
||||||
|
file_len, expected_file_len
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
let disc_table: Vec<u8> =
|
||||||
|
read_vec(&mut inner, header.sector_size() as usize - size_of::<WBFSHeader>())
|
||||||
|
.context("Reading WBFS disc table")?;
|
||||||
|
if disc_table[0] != 1 {
|
||||||
|
return Err(Error::DiscFormat("WBFS doesn't contain a disc".to_string()));
|
||||||
|
}
|
||||||
|
if disc_table[1../*max_disc as usize*/].iter().any(|&x| x != 0) {
|
||||||
|
return Err(Error::DiscFormat("Only single WBFS discs are supported".to_string()));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read WBFS LBA table
|
||||||
|
inner
|
||||||
|
.seek(SeekFrom::Start(header.sector_size() as u64 + DISC_HEADER_SIZE as u64))
|
||||||
|
.context("Seeking to WBFS LBA table")?; // Skip header
|
||||||
|
let wlba_table: Vec<U16> = read_vec(&mut inner, header.max_wbfs_sectors() as usize)
|
||||||
|
.context("Reading WBFS LBA table")?;
|
||||||
|
|
||||||
|
// Read NKit header if present (always at 0x10000)
|
||||||
|
inner.seek(SeekFrom::Start(0x10000)).context("Seeking to NKit header")?;
|
||||||
|
let nkit_header = NKitHeader::try_read_from(&mut inner);
|
||||||
|
|
||||||
|
// Reset reader
|
||||||
|
let mut inner = inner.into_inner();
|
||||||
|
inner.reset();
|
||||||
|
Ok(Self { inner, header, wlba_table, nkit_header })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DiscIO for DiscIOWBFS {
|
||||||
|
fn open(&self) -> Result<Box<dyn ReadStream>> {
|
||||||
|
Ok(Box::new(WBFSReadStream {
|
||||||
|
inner: BufReader::new(self.inner.clone()),
|
||||||
|
header: self.header.clone(),
|
||||||
|
wlba_table: self.wlba_table.clone(),
|
||||||
|
wlba: u32::MAX,
|
||||||
|
pos: 0,
|
||||||
|
disc_size: self.nkit_header.as_ref().and_then(|h| h.size),
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn meta(&self) -> Result<DiscMeta> {
|
||||||
|
Ok(self.nkit_header.as_ref().map(DiscMeta::from).unwrap_or_default())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn disc_size(&self) -> Option<u64> { self.nkit_header.as_ref().and_then(|h| h.size) }
|
||||||
|
}
|
||||||
|
|
||||||
|
struct WBFSReadStream {
|
||||||
|
/// File reader
|
||||||
|
inner: BufReader<SplitFileReader>,
|
||||||
|
/// WBFS header
|
||||||
|
header: WBFSHeader,
|
||||||
|
/// Map of Wii LBAs to WBFS LBAs
|
||||||
|
wlba_table: Vec<U16>,
|
||||||
|
/// Current WBFS LBA
|
||||||
|
wlba: u32,
|
||||||
|
/// Current stream offset
|
||||||
|
pos: u64,
|
||||||
|
/// Optional known size
|
||||||
|
disc_size: Option<u64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl WBFSReadStream {
|
||||||
|
fn disc_size(&self) -> u64 {
|
||||||
|
self.disc_size.unwrap_or(NUM_WII_SECTORS as u64 * SECTOR_SIZE as u64)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Read for WBFSReadStream {
|
||||||
|
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||||
|
let wlba = (self.pos >> self.header.wbfs_sector_size_shift) as u32;
|
||||||
|
let wlba_size = self.header.wbfs_sector_size() as u64;
|
||||||
|
let wlba_offset = self.pos & (wlba_size - 1);
|
||||||
|
if wlba != self.wlba {
|
||||||
|
if self.pos >= self.disc_size() || wlba >= self.header.max_wbfs_sectors() {
|
||||||
|
return Ok(0);
|
||||||
|
}
|
||||||
|
let wlba_start = wlba_size * self.wlba_table[wlba as usize].get() as u64;
|
||||||
|
self.inner.seek(SeekFrom::Start(wlba_start + wlba_offset))?;
|
||||||
|
self.wlba = wlba;
|
||||||
|
}
|
||||||
|
|
||||||
|
let to_read = min(buf.len(), (wlba_size - wlba_offset) as usize);
|
||||||
|
let read = self.inner.read(&mut buf[..to_read])?;
|
||||||
|
self.pos += read as u64;
|
||||||
|
Ok(read)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Seek for WBFSReadStream {
|
||||||
|
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
|
||||||
|
let new_pos = match pos {
|
||||||
|
SeekFrom::Start(v) => v,
|
||||||
|
SeekFrom::End(_) => {
|
||||||
|
return Err(io::Error::new(
|
||||||
|
io::ErrorKind::Unsupported,
|
||||||
|
"WBFSReadStream: SeekFrom::End is not supported",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
SeekFrom::Current(v) => self.pos.saturating_add_signed(v),
|
||||||
|
};
|
||||||
|
|
||||||
|
let new_wlba = (self.pos >> self.header.wbfs_sector_size_shift) as u32;
|
||||||
|
if new_wlba == self.wlba {
|
||||||
|
// Seek within the same WBFS LBA
|
||||||
|
self.inner.seek(SeekFrom::Current(new_pos as i64 - self.pos as i64))?;
|
||||||
|
} else {
|
||||||
|
// Seek to a different WBFS LBA, handled by next read
|
||||||
|
self.wlba = u32::MAX;
|
||||||
|
}
|
||||||
|
|
||||||
|
self.pos = new_pos;
|
||||||
|
Ok(new_pos)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ReadStream for WBFSReadStream {
|
||||||
|
fn stable_stream_len(&mut self) -> io::Result<u64> { Ok(self.disc_size()) }
|
||||||
|
|
||||||
|
fn as_dyn(&mut self) -> &mut dyn ReadStream { self }
|
||||||
|
}
|
1229
src/io/wia.rs
1229
src/io/wia.rs
File diff suppressed because it is too large
Load Diff
132
src/lib.rs
132
src/lib.rs
|
@ -1,4 +1,4 @@
|
||||||
#![warn(missing_docs, rustdoc::missing_doc_code_examples)]
|
// #![warn(missing_docs, rustdoc::missing_doc_code_examples)]
|
||||||
//! Library for traversing & reading GameCube and Wii disc images.
|
//! Library for traversing & reading GameCube and Wii disc images.
|
||||||
//!
|
//!
|
||||||
//! Based on the C++ library [nod](https://github.com/AxioDL/nod),
|
//! Based on the C++ library [nod](https://github.com/AxioDL/nod),
|
||||||
|
@ -16,22 +16,17 @@
|
||||||
//! ```no_run
|
//! ```no_run
|
||||||
//! use std::io::Read;
|
//! use std::io::Read;
|
||||||
//!
|
//!
|
||||||
//! use nod::{
|
//! use nod::{Disc, PartitionKind};
|
||||||
//! disc::{new_disc_base, PartHeader},
|
|
||||||
//! fst::NodeType,
|
|
||||||
//! io::{new_disc_io, DiscIOOptions},
|
|
||||||
//! };
|
|
||||||
//!
|
//!
|
||||||
//! fn main() -> nod::Result<()> {
|
//! fn main() -> nod::Result<()> {
|
||||||
//! let options = DiscIOOptions::default();
|
//! let disc = Disc::new("path/to/file.iso")?;
|
||||||
//! let mut disc_io = new_disc_io("path/to/file.iso".as_ref(), &options)?;
|
//! let mut partition = disc.open_partition_kind(PartitionKind::Data)?;
|
||||||
//! let disc_base = new_disc_base(disc_io.as_mut())?;
|
//! let meta = partition.meta()?;
|
||||||
//! let mut partition = disc_base.get_data_partition(disc_io.as_mut(), false)?;
|
//! let fst = meta.fst()?;
|
||||||
//! let header = partition.read_header()?;
|
//! if let Some((_, node)) = fst.find("/MP3/Worlds.txt") {
|
||||||
//! if let Some(NodeType::File(node)) = header.find_node("/MP3/Worlds.txt") {
|
|
||||||
//! let mut s = String::new();
|
//! let mut s = String::new();
|
||||||
//! partition
|
//! partition
|
||||||
//! .begin_file_stream(node)
|
//! .open_file(node)
|
||||||
//! .expect("Failed to open file stream")
|
//! .expect("Failed to open file stream")
|
||||||
//! .read_to_string(&mut s)
|
//! .read_to_string(&mut s)
|
||||||
//! .expect("Failed to read file");
|
//! .expect("Failed to read file");
|
||||||
|
@ -40,11 +35,24 @@
|
||||||
//! Ok(())
|
//! Ok(())
|
||||||
//! }
|
//! }
|
||||||
//! ```
|
//! ```
|
||||||
pub mod disc;
|
|
||||||
pub mod fst;
|
use std::path::Path;
|
||||||
pub mod io;
|
|
||||||
pub mod streams;
|
use disc::DiscBase;
|
||||||
pub mod util;
|
pub use disc::{
|
||||||
|
AppLoaderHeader, DiscHeader, DolHeader, PartitionBase, PartitionHeader, PartitionInfo,
|
||||||
|
PartitionKind, PartitionMeta, BI2_SIZE, BOOT_SIZE,
|
||||||
|
};
|
||||||
|
pub use fst::{Fst, Node, NodeKind};
|
||||||
|
use io::DiscIO;
|
||||||
|
pub use io::DiscMeta;
|
||||||
|
pub use streams::ReadStream;
|
||||||
|
|
||||||
|
mod disc;
|
||||||
|
mod fst;
|
||||||
|
mod io;
|
||||||
|
mod streams;
|
||||||
|
mod util;
|
||||||
|
|
||||||
/// Error types for nod.
|
/// Error types for nod.
|
||||||
#[derive(thiserror::Error, Debug)]
|
#[derive(thiserror::Error, Debug)]
|
||||||
|
@ -55,19 +63,22 @@ pub enum Error {
|
||||||
/// A general I/O error.
|
/// A general I/O error.
|
||||||
#[error("I/O error: {0}")]
|
#[error("I/O error: {0}")]
|
||||||
Io(String, #[source] std::io::Error),
|
Io(String, #[source] std::io::Error),
|
||||||
|
/// An unknown error.
|
||||||
|
#[error("error: {0}")]
|
||||||
|
Other(String),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<&str> for Error {
|
||||||
|
fn from(s: &str) -> Error { Error::Other(s.to_string()) }
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<String> for Error {
|
||||||
|
fn from(s: String) -> Error { Error::Other(s) }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Helper result type for [`Error`].
|
/// Helper result type for [`Error`].
|
||||||
pub type Result<T, E = Error> = core::result::Result<T, E>;
|
pub type Result<T, E = Error> = core::result::Result<T, E>;
|
||||||
|
|
||||||
impl From<aes::cipher::block_padding::UnpadError> for Error {
|
|
||||||
fn from(_: aes::cipher::block_padding::UnpadError) -> Self { unreachable!() }
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<base16ct::Error> for Error {
|
|
||||||
fn from(_: base16ct::Error) -> Self { unreachable!() }
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait ErrorContext {
|
pub trait ErrorContext {
|
||||||
fn context(self, context: impl Into<String>) -> Error;
|
fn context(self, context: impl Into<String>) -> Error;
|
||||||
}
|
}
|
||||||
|
@ -95,3 +106,72 @@ where E: ErrorContext
|
||||||
self.map_err(|e| e.context(f()))
|
self.map_err(|e| e.context(f()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Default, Debug, Clone)]
|
||||||
|
pub struct OpenOptions {
|
||||||
|
/// Wii: Validate partition data hashes while reading the disc image if present.
|
||||||
|
pub validate_hashes: bool,
|
||||||
|
/// Wii: Rebuild partition data hashes for the disc image if the underlying format
|
||||||
|
/// does not store them. (e.g. WIA/RVZ)
|
||||||
|
pub rebuild_hashes: bool,
|
||||||
|
/// Wii: Rebuild partition data encryption if the underlying format stores data decrypted.
|
||||||
|
/// (e.g. WIA/RVZ, NFS)
|
||||||
|
///
|
||||||
|
/// Unnecessary if only opening a disc partition stream, which will already provide a decrypted
|
||||||
|
/// stream. In this case, this will cause unnecessary processing.
|
||||||
|
///
|
||||||
|
/// Only valid in combination with `rebuild_hashes`, as the data encryption is derived from the
|
||||||
|
/// partition data hashes.
|
||||||
|
pub rebuild_encryption: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct Disc {
|
||||||
|
io: Box<dyn DiscIO>,
|
||||||
|
base: Box<dyn DiscBase>,
|
||||||
|
options: OpenOptions,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Disc {
|
||||||
|
/// Opens a disc image from a file path.
|
||||||
|
pub fn new<P: AsRef<Path>>(path: P) -> Result<Disc> {
|
||||||
|
Disc::new_with_options(path, &OpenOptions::default())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Opens a disc image from a file path with custom options.
|
||||||
|
pub fn new_with_options<P: AsRef<Path>>(path: P, options: &OpenOptions) -> Result<Disc> {
|
||||||
|
let mut io = io::open(path.as_ref(), options)?;
|
||||||
|
let base = disc::new(io.as_mut())?;
|
||||||
|
Ok(Disc { io, base, options: options.clone() })
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The disc's header.
|
||||||
|
pub fn header(&self) -> &DiscHeader { self.base.header() }
|
||||||
|
|
||||||
|
/// Returns extra metadata included in the disc file format, if any.
|
||||||
|
pub fn meta(&self) -> Result<DiscMeta> { self.io.meta() }
|
||||||
|
|
||||||
|
/// The disc's size in bytes or an estimate if not stored by the format.
|
||||||
|
pub fn disc_size(&self) -> u64 { self.base.disc_size() }
|
||||||
|
|
||||||
|
/// A list of partitions on the disc.
|
||||||
|
///
|
||||||
|
/// For GameCube discs, this will return a single data partition spanning the entire disc.
|
||||||
|
pub fn partitions(&self) -> Vec<PartitionInfo> { self.base.partitions() }
|
||||||
|
|
||||||
|
/// Opens a new read stream for the base disc image.
|
||||||
|
///
|
||||||
|
/// Generally does _not_ need to be used directly. Opening a partition will provide a
|
||||||
|
/// decrypted stream instead.
|
||||||
|
pub fn open(&self) -> Result<Box<dyn ReadStream + '_>> { self.io.open() }
|
||||||
|
|
||||||
|
/// Opens a new, decrypted partition read stream for the specified partition index.
|
||||||
|
pub fn open_partition(&self, index: usize) -> Result<Box<dyn PartitionBase + '_>> {
|
||||||
|
self.base.open_partition(self.io.as_ref(), index, &self.options)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Opens a new partition read stream for the first partition matching
|
||||||
|
/// the specified type.
|
||||||
|
pub fn open_partition_kind(&self, kind: PartitionKind) -> Result<Box<dyn PartitionBase + '_>> {
|
||||||
|
self.base.open_partition_kind(self.io.as_ref(), kind, &self.options)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -3,8 +3,7 @@
|
||||||
use std::{
|
use std::{
|
||||||
fs::File,
|
fs::File,
|
||||||
io,
|
io,
|
||||||
io::{Read, Seek, SeekFrom},
|
io::{BufReader, Read, Seek, SeekFrom},
|
||||||
ops::DerefMut,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Creates a fixed-size array reference from a slice.
|
/// Creates a fixed-size array reference from a slice.
|
||||||
|
@ -31,6 +30,14 @@ macro_rules! array_ref_mut {
|
||||||
}};
|
}};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Compile-time assertion.
|
||||||
|
#[macro_export]
|
||||||
|
macro_rules! static_assert {
|
||||||
|
($condition:expr) => {
|
||||||
|
const _: () = core::assert!($condition);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
/// A helper trait for seekable read streams.
|
/// A helper trait for seekable read streams.
|
||||||
pub trait ReadStream: Read + Seek {
|
pub trait ReadStream: Read + Seek {
|
||||||
/// Replace with [`Read.stream_len`] when stabilized.
|
/// Replace with [`Read.stream_len`] when stabilized.
|
||||||
|
@ -65,12 +72,20 @@ impl ReadStream for File {
|
||||||
fn as_dyn(&mut self) -> &mut dyn ReadStream { self }
|
fn as_dyn(&mut self) -> &mut dyn ReadStream { self }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T> ReadStream for BufReader<T>
|
||||||
|
where T: ReadStream
|
||||||
|
{
|
||||||
|
fn stable_stream_len(&mut self) -> io::Result<u64> { self.get_mut().stable_stream_len() }
|
||||||
|
|
||||||
|
fn as_dyn(&mut self) -> &mut dyn ReadStream { self }
|
||||||
|
}
|
||||||
|
|
||||||
trait WindowedReadStream: ReadStream {
|
trait WindowedReadStream: ReadStream {
|
||||||
fn base_stream(&mut self) -> &mut dyn ReadStream;
|
fn base_stream(&mut self) -> &mut dyn ReadStream;
|
||||||
fn window(&self) -> (u64, u64);
|
fn window(&self) -> (u64, u64);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// An window into an existing [`ReadStream`], with ownership of the underlying stream.
|
/// A window into an existing [`ReadStream`], with ownership of the underlying stream.
|
||||||
pub struct OwningWindowedReadStream<'a> {
|
pub struct OwningWindowedReadStream<'a> {
|
||||||
/// The base stream.
|
/// The base stream.
|
||||||
pub base: Box<dyn ReadStream + 'a>,
|
pub base: Box<dyn ReadStream + 'a>,
|
||||||
|
@ -111,7 +126,7 @@ impl<'a> SharedWindowedReadStream<'a> {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn windowed_read(stream: &mut dyn WindowedReadStream, buf: &mut [u8]) -> io::Result<usize> {
|
fn windowed_read(stream: &mut impl WindowedReadStream, buf: &mut [u8]) -> io::Result<usize> {
|
||||||
let pos = stream.stream_position()?;
|
let pos = stream.stream_position()?;
|
||||||
let size = stream.stable_stream_len()?;
|
let size = stream.stable_stream_len()?;
|
||||||
if pos == size {
|
if pos == size {
|
||||||
|
@ -125,7 +140,7 @@ fn windowed_read(stream: &mut dyn WindowedReadStream, buf: &mut [u8]) -> io::Res
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn windowed_seek(stream: &mut dyn WindowedReadStream, pos: SeekFrom) -> io::Result<u64> {
|
fn windowed_seek(stream: &mut impl WindowedReadStream, pos: SeekFrom) -> io::Result<u64> {
|
||||||
let (begin, end) = stream.window();
|
let (begin, end) = stream.window();
|
||||||
let result = stream.base_stream().seek(match pos {
|
let result = stream.base_stream().seek(match pos {
|
||||||
SeekFrom::Start(p) => SeekFrom::Start(begin + p),
|
SeekFrom::Start(p) => SeekFrom::Start(begin + p),
|
||||||
|
@ -158,7 +173,7 @@ impl<'a> ReadStream for OwningWindowedReadStream<'a> {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> WindowedReadStream for OwningWindowedReadStream<'a> {
|
impl<'a> WindowedReadStream for OwningWindowedReadStream<'a> {
|
||||||
fn base_stream(&mut self) -> &mut dyn ReadStream { self.base.deref_mut() }
|
fn base_stream(&mut self) -> &mut dyn ReadStream { self.base.as_dyn() }
|
||||||
|
|
||||||
fn window(&self) -> (u64, u64) { (self.begin, self.end) }
|
fn window(&self) -> (u64, u64) { (self.begin, self.end) }
|
||||||
}
|
}
|
||||||
|
@ -219,8 +234,8 @@ impl Seek for ByteReadStream<'_> {
|
||||||
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
|
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
|
||||||
let new_pos = match pos {
|
let new_pos = match pos {
|
||||||
SeekFrom::Start(v) => v,
|
SeekFrom::Start(v) => v,
|
||||||
SeekFrom::End(v) => (self.bytes.len() as i64 + v) as u64,
|
SeekFrom::End(v) => (self.bytes.len() as u64).saturating_add_signed(v),
|
||||||
SeekFrom::Current(v) => (self.position as i64 + v) as u64,
|
SeekFrom::Current(v) => self.position.saturating_add_signed(v),
|
||||||
};
|
};
|
||||||
if new_pos > self.bytes.len() as u64 {
|
if new_pos > self.bytes.len() as u64 {
|
||||||
Err(io::Error::from(io::ErrorKind::UnexpectedEof))
|
Err(io::Error::from(io::ErrorKind::UnexpectedEof))
|
||||||
|
|
|
@ -0,0 +1,82 @@
|
||||||
|
use std::{io, io::Read};
|
||||||
|
|
||||||
|
use crate::{Error, Result};
|
||||||
|
|
||||||
|
/// Decodes the LZMA Properties byte (lc/lp/pb).
|
||||||
|
/// See `lzma_lzma_lclppb_decode` in `liblzma/lzma/lzma_decoder.c`.
|
||||||
|
#[cfg(feature = "compress-lzma")]
|
||||||
|
pub fn lzma_lclppb_decode(options: &mut liblzma::stream::LzmaOptions, byte: u8) -> Result<()> {
|
||||||
|
let mut d = byte as u32;
|
||||||
|
if d >= (9 * 5 * 5) {
|
||||||
|
return Err(Error::DiscFormat(format!("Invalid LZMA props byte: {}", d)));
|
||||||
|
}
|
||||||
|
options.literal_context_bits(d % 9);
|
||||||
|
d /= 9;
|
||||||
|
options.position_bits(d / 5);
|
||||||
|
options.literal_position_bits(d % 5);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Decodes LZMA properties.
|
||||||
|
/// See `lzma_lzma_props_decode` in `liblzma/lzma/lzma_decoder.c`.
|
||||||
|
#[cfg(feature = "compress-lzma")]
|
||||||
|
pub fn lzma_props_decode(props: &[u8]) -> Result<liblzma::stream::LzmaOptions> {
|
||||||
|
use crate::array_ref;
|
||||||
|
if props.len() != 5 {
|
||||||
|
return Err(Error::DiscFormat(format!("Invalid LZMA props length: {}", props.len())));
|
||||||
|
}
|
||||||
|
let mut options = liblzma::stream::LzmaOptions::new();
|
||||||
|
lzma_lclppb_decode(&mut options, props[0])?;
|
||||||
|
options.dict_size(u32::from_le_bytes(*array_ref!(props, 1, 4)));
|
||||||
|
Ok(options)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Decodes LZMA2 properties.
|
||||||
|
/// See `lzma_lzma2_props_decode` in `liblzma/lzma/lzma2_decoder.c`.
|
||||||
|
#[cfg(feature = "compress-lzma")]
|
||||||
|
pub fn lzma2_props_decode(props: &[u8]) -> Result<liblzma::stream::LzmaOptions> {
|
||||||
|
use std::cmp::Ordering;
|
||||||
|
if props.len() != 1 {
|
||||||
|
return Err(Error::DiscFormat(format!("Invalid LZMA2 props length: {}", props.len())));
|
||||||
|
}
|
||||||
|
let d = props[0] as u32;
|
||||||
|
let mut options = liblzma::stream::LzmaOptions::new();
|
||||||
|
options.dict_size(match d.cmp(&40) {
|
||||||
|
Ordering::Greater => {
|
||||||
|
return Err(Error::DiscFormat(format!("Invalid LZMA2 props byte: {}", d)));
|
||||||
|
}
|
||||||
|
Ordering::Equal => u32::MAX,
|
||||||
|
Ordering::Less => (2 | (d & 1)) << (d / 2 + 11),
|
||||||
|
});
|
||||||
|
Ok(options)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a new raw LZMA decoder with the given options.
|
||||||
|
#[cfg(feature = "compress-lzma")]
|
||||||
|
pub fn new_lzma_decoder<R>(
|
||||||
|
reader: R,
|
||||||
|
options: &liblzma::stream::LzmaOptions,
|
||||||
|
) -> io::Result<liblzma::read::XzDecoder<R>>
|
||||||
|
where
|
||||||
|
R: Read,
|
||||||
|
{
|
||||||
|
let mut filters = liblzma::stream::Filters::new();
|
||||||
|
filters.lzma1(options);
|
||||||
|
let stream = liblzma::stream::Stream::new_raw_decoder(&filters).map_err(io::Error::from)?;
|
||||||
|
Ok(liblzma::read::XzDecoder::new_stream(reader, stream))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a new raw LZMA2 decoder with the given options.
|
||||||
|
#[cfg(feature = "compress-lzma")]
|
||||||
|
pub fn new_lzma2_decoder<R>(
|
||||||
|
reader: R,
|
||||||
|
options: &liblzma::stream::LzmaOptions,
|
||||||
|
) -> io::Result<liblzma::read::XzDecoder<R>>
|
||||||
|
where
|
||||||
|
R: Read,
|
||||||
|
{
|
||||||
|
let mut filters = liblzma::stream::Filters::new();
|
||||||
|
filters.lzma2(options);
|
||||||
|
let stream = liblzma::stream::Stream::new_raw_decoder(&filters).map_err(io::Error::from)?;
|
||||||
|
Ok(liblzma::read::XzDecoder::new_stream(reader, stream))
|
||||||
|
}
|
|
@ -1,12 +1,19 @@
|
||||||
use std::{cmp::min, io, io::Read};
|
use std::{cmp::min, io, io::Read};
|
||||||
|
|
||||||
pub(crate) const LFG_K: usize = 521;
|
use zerocopy::{transmute_ref, AsBytes};
|
||||||
pub(crate) const LFG_J: usize = 32;
|
|
||||||
pub(crate) const SEED_SIZE: usize = 17;
|
use crate::disc::SECTOR_SIZE;
|
||||||
|
|
||||||
|
pub const LFG_K: usize = 521;
|
||||||
|
pub const LFG_J: usize = 32;
|
||||||
|
pub const SEED_SIZE: usize = 17;
|
||||||
|
|
||||||
/// Lagged Fibonacci generator for Wii partition junk data.
|
/// Lagged Fibonacci generator for Wii partition junk data.
|
||||||
/// https://github.com/dolphin-emu/dolphin/blob/master/docs/WiaAndRvz.md#prng-algorithm
|
///
|
||||||
pub(crate) struct LaggedFibonacci {
|
/// References (license CC0-1.0):
|
||||||
|
/// https://github.com/dolphin-emu/dolphin/blob/a0f555648c27ec0c928f6b1e1fcad5e2d7c4d0c4/docs/WiaAndRvz.md
|
||||||
|
/// https://github.com/dolphin-emu/dolphin/blob/a0f555648c27ec0c928f6b1e1fcad5e2d7c4d0c4/Source/Core/DiscIO/LaggedFibonacciGenerator.cpp
|
||||||
|
pub struct LaggedFibonacci {
|
||||||
buffer: [u32; LFG_K],
|
buffer: [u32; LFG_K],
|
||||||
position: usize,
|
position: usize,
|
||||||
}
|
}
|
||||||
|
@ -21,6 +28,8 @@ impl LaggedFibonacci {
|
||||||
self.buffer[i] =
|
self.buffer[i] =
|
||||||
(self.buffer[i - 17] << 23) ^ (self.buffer[i - 16] >> 9) ^ self.buffer[i - 1];
|
(self.buffer[i - 17] << 23) ^ (self.buffer[i - 16] >> 9) ^ self.buffer[i - 1];
|
||||||
}
|
}
|
||||||
|
// Instead of doing the "shift by 18 instead of 16" oddity when actually outputting the data,
|
||||||
|
// we can do the shifting (and byteswapping) at this point to make the output code simpler.
|
||||||
for x in self.buffer.iter_mut() {
|
for x in self.buffer.iter_mut() {
|
||||||
*x = ((*x & 0xFF00FFFF) | (*x >> 2 & 0x00FF0000)).swap_bytes();
|
*x = ((*x & 0xFF00FFFF) | (*x >> 2 & 0x00FF0000)).swap_bytes();
|
||||||
}
|
}
|
||||||
|
@ -29,9 +38,32 @@ impl LaggedFibonacci {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn init_with_reader<R>(&mut self, reader: &mut R) -> io::Result<()>
|
pub fn init_with_seed(&mut self, init: [u8; 4], disc_num: u8, partition_offset: u64) {
|
||||||
|
let seed = u32::from_be_bytes([
|
||||||
|
init[2],
|
||||||
|
init[1],
|
||||||
|
init[3].wrapping_add(init[2]),
|
||||||
|
init[0].wrapping_add(init[1]),
|
||||||
|
]) ^ disc_num as u32;
|
||||||
|
let sector = (partition_offset / SECTOR_SIZE as u64) as u32;
|
||||||
|
let mut n = seed.wrapping_mul(0x260BCD5) ^ sector.wrapping_mul(0x1EF29123);
|
||||||
|
for i in 0..SEED_SIZE {
|
||||||
|
let mut v = 0u32;
|
||||||
|
for _ in 0..LFG_J {
|
||||||
|
n = n.wrapping_mul(0x5D588B65).wrapping_add(1);
|
||||||
|
v = (v >> 1) | (n & 0x80000000);
|
||||||
|
}
|
||||||
|
self.buffer[i] = v;
|
||||||
|
}
|
||||||
|
self.buffer[16] ^= self.buffer[0] >> 9 ^ self.buffer[16] << 23;
|
||||||
|
self.position = 0;
|
||||||
|
self.init();
|
||||||
|
self.skip((partition_offset % SECTOR_SIZE as u64) as usize);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn init_with_reader<R>(&mut self, reader: &mut R) -> io::Result<()>
|
||||||
where R: Read + ?Sized {
|
where R: Read + ?Sized {
|
||||||
reader.read_exact(bytemuck::cast_slice_mut(&mut self.buffer[..SEED_SIZE]))?;
|
reader.read_exact(self.buffer[..SEED_SIZE].as_bytes_mut())?;
|
||||||
for x in self.buffer[..SEED_SIZE].iter_mut() {
|
for x in self.buffer[..SEED_SIZE].iter_mut() {
|
||||||
*x = u32::from_be(*x);
|
*x = u32::from_be(*x);
|
||||||
}
|
}
|
||||||
|
@ -40,7 +72,7 @@ impl LaggedFibonacci {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn forward(&mut self) {
|
pub fn forward(&mut self) {
|
||||||
for i in 0..LFG_J {
|
for i in 0..LFG_J {
|
||||||
self.buffer[i] ^= self.buffer[i + LFG_K - LFG_J];
|
self.buffer[i] ^= self.buffer[i + LFG_K - LFG_J];
|
||||||
}
|
}
|
||||||
|
@ -49,7 +81,7 @@ impl LaggedFibonacci {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn skip(&mut self, n: usize) {
|
pub fn skip(&mut self, n: usize) {
|
||||||
self.position += n;
|
self.position += n;
|
||||||
while self.position >= LFG_K * 4 {
|
while self.position >= LFG_K * 4 {
|
||||||
self.forward();
|
self.forward();
|
||||||
|
@ -57,15 +89,11 @@ impl LaggedFibonacci {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
pub fn fill(&mut self, mut buf: &mut [u8]) {
|
||||||
fn bytes(&self) -> &[u8; LFG_K * 4] {
|
|
||||||
unsafe { &*(self.buffer.as_ptr() as *const [u8; LFG_K * 4]) }
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn fill(&mut self, mut buf: &mut [u8]) {
|
|
||||||
while !buf.is_empty() {
|
while !buf.is_empty() {
|
||||||
let len = min(buf.len(), LFG_K * 4 - self.position);
|
let len = min(buf.len(), LFG_K * 4 - self.position);
|
||||||
buf[..len].copy_from_slice(&self.bytes()[self.position..self.position + len]);
|
let bytes: &[u8; LFG_K * 4] = transmute_ref!(&self.buffer);
|
||||||
|
buf[..len].copy_from_slice(&bytes[self.position..self.position + len]);
|
||||||
self.position += len;
|
self.position += len;
|
||||||
buf = &mut buf[len..];
|
buf = &mut buf[len..];
|
||||||
if self.position == LFG_K * 4 {
|
if self.position == LFG_K * 4 {
|
||||||
|
@ -75,3 +103,32 @@ impl LaggedFibonacci {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_init_with_seed_1() {
|
||||||
|
let mut lfg = LaggedFibonacci::default();
|
||||||
|
lfg.init_with_seed([0x47, 0x41, 0x4c, 0x45], 0, 0x600000);
|
||||||
|
let mut buf = [0u8; 16];
|
||||||
|
lfg.fill(&mut buf);
|
||||||
|
assert_eq!(buf, [
|
||||||
|
0xE9, 0x47, 0x67, 0xBD, 0x41, 0x50, 0x4D, 0x5D, 0x61, 0x48, 0xB1, 0x99, 0xA0, 0x12,
|
||||||
|
0x0C, 0xBA
|
||||||
|
]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_init_with_seed_2() {
|
||||||
|
let mut lfg = LaggedFibonacci::default();
|
||||||
|
lfg.init_with_seed([0x47, 0x41, 0x4c, 0x45], 0, 0x608000);
|
||||||
|
let mut buf = [0u8; 16];
|
||||||
|
lfg.fill(&mut buf);
|
||||||
|
assert_eq!(buf, [
|
||||||
|
0xE2, 0xBB, 0xBD, 0x77, 0xDA, 0xB2, 0x22, 0x42, 0x1C, 0x0C, 0x0B, 0xFC, 0xAC, 0x06,
|
||||||
|
0xEA, 0xD0
|
||||||
|
]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
use std::ops::{Div, Rem};
|
use std::ops::{Div, Rem};
|
||||||
|
|
||||||
|
pub(crate) mod compress;
|
||||||
pub(crate) mod lfg;
|
pub(crate) mod lfg;
|
||||||
pub(crate) mod reader;
|
pub(crate) mod reader;
|
||||||
pub(crate) mod take_seek;
|
pub(crate) mod take_seek;
|
||||||
|
|
|
@ -1,243 +1,60 @@
|
||||||
use std::{ffi::CString, io, io::Read};
|
use std::{io, io::Read};
|
||||||
|
|
||||||
use io::Write;
|
use zerocopy::{AsBytes, FromBytes, FromZeroes};
|
||||||
|
|
||||||
pub(crate) const DYNAMIC_SIZE: usize = 0;
|
#[inline(always)]
|
||||||
|
pub fn read_from<T, R>(reader: &mut R) -> io::Result<T>
|
||||||
pub(crate) const fn struct_size<const N: usize>(fields: [usize; N]) -> usize {
|
|
||||||
let mut result = 0;
|
|
||||||
let mut i = 0;
|
|
||||||
while i < N {
|
|
||||||
let size = fields[i];
|
|
||||||
if size == DYNAMIC_SIZE {
|
|
||||||
// Dynamically sized
|
|
||||||
return DYNAMIC_SIZE;
|
|
||||||
}
|
|
||||||
result += size;
|
|
||||||
i += 1;
|
|
||||||
}
|
|
||||||
result
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn skip_bytes<const N: usize, R>(reader: &mut R) -> io::Result<()>
|
|
||||||
where R: Read + ?Sized {
|
|
||||||
let mut buf = [0u8; N];
|
|
||||||
reader.read_exact(&mut buf)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) trait FromReader: Sized {
|
|
||||||
type Args<'a>;
|
|
||||||
|
|
||||||
const STATIC_SIZE: usize;
|
|
||||||
|
|
||||||
fn from_reader_args<R>(reader: &mut R, args: Self::Args<'_>) -> io::Result<Self>
|
|
||||||
where R: Read + ?Sized;
|
|
||||||
|
|
||||||
fn from_reader<'a, R>(reader: &mut R) -> io::Result<Self>
|
|
||||||
where
|
|
||||||
R: Read + ?Sized,
|
|
||||||
Self::Args<'a>: Default,
|
|
||||||
{
|
|
||||||
Self::from_reader_args(reader, Default::default())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
macro_rules! impl_from_reader {
|
|
||||||
($($t:ty),*) => {
|
|
||||||
$(
|
|
||||||
impl FromReader for $t {
|
|
||||||
type Args<'a> = ();
|
|
||||||
|
|
||||||
const STATIC_SIZE: usize = std::mem::size_of::<Self>();
|
|
||||||
|
|
||||||
fn from_reader_args<R>(reader: &mut R, _args: Self::Args<'_>) -> io::Result<Self> where R: Read + ?Sized{
|
|
||||||
let mut buf = [0u8; Self::STATIC_SIZE];
|
|
||||||
reader.read_exact(&mut buf)?;
|
|
||||||
Ok(Self::from_be_bytes(buf))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)*
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
impl_from_reader!(u8, u16, u32, u64, u128, i8, i16, i32, i64, i128);
|
|
||||||
|
|
||||||
#[repr(transparent)]
|
|
||||||
pub struct U24(pub u32);
|
|
||||||
|
|
||||||
impl FromReader for U24 {
|
|
||||||
type Args<'a> = ();
|
|
||||||
|
|
||||||
const STATIC_SIZE: usize = 3;
|
|
||||||
|
|
||||||
fn from_reader_args<R>(reader: &mut R, _args: Self::Args<'_>) -> io::Result<Self>
|
|
||||||
where R: Read + ?Sized {
|
|
||||||
let mut buf = [0u8; 4];
|
|
||||||
reader.read_exact(&mut buf[1..])?;
|
|
||||||
Ok(U24(u32::from_be_bytes(buf)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<const N: usize> FromReader for [u8; N] {
|
|
||||||
type Args<'a> = ();
|
|
||||||
|
|
||||||
const STATIC_SIZE: usize = N;
|
|
||||||
|
|
||||||
fn from_reader_args<R>(reader: &mut R, _args: Self::Args<'_>) -> io::Result<Self>
|
|
||||||
where R: Read + ?Sized {
|
|
||||||
let mut buf = [0u8; N];
|
|
||||||
reader.read_exact(&mut buf)?;
|
|
||||||
Ok(buf)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<const N: usize> FromReader for [u32; N] {
|
|
||||||
type Args<'a> = ();
|
|
||||||
|
|
||||||
const STATIC_SIZE: usize = N * u32::STATIC_SIZE;
|
|
||||||
|
|
||||||
fn from_reader_args<R>(reader: &mut R, _args: Self::Args<'_>) -> io::Result<Self>
|
|
||||||
where R: Read + ?Sized {
|
|
||||||
let mut buf = [0u32; N];
|
|
||||||
reader.read_exact(unsafe {
|
|
||||||
std::slice::from_raw_parts_mut(buf.as_mut_ptr() as *mut u8, Self::STATIC_SIZE)
|
|
||||||
})?;
|
|
||||||
for x in buf.iter_mut() {
|
|
||||||
*x = u32::from_be(*x);
|
|
||||||
}
|
|
||||||
Ok(buf)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FromReader for CString {
|
|
||||||
type Args<'a> = ();
|
|
||||||
|
|
||||||
const STATIC_SIZE: usize = DYNAMIC_SIZE;
|
|
||||||
|
|
||||||
fn from_reader_args<R>(reader: &mut R, _args: Self::Args<'_>) -> io::Result<Self>
|
|
||||||
where R: Read + ?Sized {
|
|
||||||
let mut buf = Vec::new();
|
|
||||||
loop {
|
|
||||||
let mut byte = [0u8; 1];
|
|
||||||
reader.read_exact(&mut byte)?;
|
|
||||||
buf.push(byte[0]);
|
|
||||||
if byte[0] == 0 {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(unsafe { CString::from_vec_with_nul_unchecked(buf) })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn read_bytes<R>(reader: &mut R, count: usize) -> io::Result<Vec<u8>>
|
|
||||||
where R: Read + ?Sized {
|
|
||||||
let mut buf = vec![0u8; count];
|
|
||||||
reader.read_exact(&mut buf)?;
|
|
||||||
Ok(buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn read_vec<'a, T, R>(reader: &mut R, count: usize) -> io::Result<Vec<T>>
|
|
||||||
where
|
where
|
||||||
T: FromReader,
|
T: FromBytes + FromZeroes + AsBytes,
|
||||||
R: Read + ?Sized,
|
R: Read + ?Sized,
|
||||||
<T as FromReader>::Args<'a>: Default,
|
|
||||||
{
|
{
|
||||||
let mut vec = Vec::with_capacity(count);
|
let mut ret = <T>::new_zeroed();
|
||||||
if T::STATIC_SIZE != DYNAMIC_SIZE {
|
reader.read_exact(ret.as_bytes_mut())?;
|
||||||
// Read the entire buffer at once
|
Ok(ret)
|
||||||
let buf = read_bytes(reader, T::STATIC_SIZE * count)?;
|
|
||||||
let mut slice = buf.as_slice();
|
|
||||||
for _ in 0..count {
|
|
||||||
vec.push(T::from_reader(&mut slice)?);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
for _ in 0..count {
|
|
||||||
vec.push(T::from_reader(reader)?);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(vec)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) trait ToWriter: Sized {
|
#[inline(always)]
|
||||||
fn to_writer<W>(&self, writer: &mut W) -> io::Result<()>
|
pub fn read_vec<T, R>(reader: &mut R, count: usize) -> io::Result<Vec<T>>
|
||||||
where W: Write + ?Sized;
|
|
||||||
|
|
||||||
fn to_bytes(&self) -> io::Result<Vec<u8>> {
|
|
||||||
let mut buf = vec![0u8; self.write_size()];
|
|
||||||
self.to_writer(&mut buf.as_mut_slice())?;
|
|
||||||
Ok(buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn write_size(&self) -> usize;
|
|
||||||
}
|
|
||||||
|
|
||||||
macro_rules! impl_to_writer {
|
|
||||||
($($t:ty),*) => {
|
|
||||||
$(
|
|
||||||
impl ToWriter for $t {
|
|
||||||
fn to_writer<W>(&self, writer: &mut W) -> io::Result<()>
|
|
||||||
where W: Write + ?Sized {
|
|
||||||
writer.write_all(&self.to_be_bytes())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn to_bytes(&self) -> io::Result<Vec<u8>> {
|
|
||||||
Ok(self.to_be_bytes().to_vec())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn write_size(&self) -> usize {
|
|
||||||
std::mem::size_of::<Self>()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)*
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
impl_to_writer!(u8, u16, u32, u64, u128, i8, i16, i32, i64, i128);
|
|
||||||
|
|
||||||
impl ToWriter for U24 {
|
|
||||||
fn to_writer<W>(&self, writer: &mut W) -> io::Result<()>
|
|
||||||
where W: Write + ?Sized {
|
|
||||||
writer.write_all(&self.0.to_be_bytes()[1..])
|
|
||||||
}
|
|
||||||
|
|
||||||
fn write_size(&self) -> usize { 3 }
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<const N: usize> ToWriter for [u8; N] {
|
|
||||||
fn to_writer<W>(&self, writer: &mut W) -> io::Result<()>
|
|
||||||
where W: Write + ?Sized {
|
|
||||||
writer.write_all(self)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn write_size(&self) -> usize { N }
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ToWriter for &[u8] {
|
|
||||||
fn to_writer<W>(&self, writer: &mut W) -> io::Result<()>
|
|
||||||
where W: Write + ?Sized {
|
|
||||||
writer.write_all(self)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn write_size(&self) -> usize { self.len() }
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ToWriter for Vec<u8> {
|
|
||||||
fn to_writer<W>(&self, writer: &mut W) -> io::Result<()>
|
|
||||||
where W: Write + ?Sized {
|
|
||||||
writer.write_all(self)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn write_size(&self) -> usize { self.len() }
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn write_vec<T, W>(writer: &mut W, vec: &[T]) -> io::Result<()>
|
|
||||||
where
|
where
|
||||||
T: ToWriter,
|
T: FromBytes + FromZeroes + AsBytes,
|
||||||
W: Write + ?Sized,
|
R: Read + ?Sized,
|
||||||
{
|
{
|
||||||
for item in vec {
|
let mut ret = <T>::new_vec_zeroed(count);
|
||||||
item.to_writer(writer)?;
|
reader.read_exact(ret.as_mut_slice().as_bytes_mut())?;
|
||||||
}
|
Ok(ret)
|
||||||
Ok(())
|
}
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
pub fn read_box_slice<T, R>(reader: &mut R, count: usize) -> io::Result<Box<[T]>>
|
||||||
|
where
|
||||||
|
T: FromBytes + FromZeroes + AsBytes,
|
||||||
|
R: Read + ?Sized,
|
||||||
|
{
|
||||||
|
let mut ret = <T>::new_box_slice_zeroed(count);
|
||||||
|
reader.read_exact(ret.as_mut().as_bytes_mut())?;
|
||||||
|
Ok(ret)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
pub fn read_u16_be<R>(reader: &mut R) -> io::Result<u16>
|
||||||
|
where R: Read + ?Sized {
|
||||||
|
let mut buf = [0u8; 2];
|
||||||
|
reader.read_exact(&mut buf)?;
|
||||||
|
Ok(u16::from_be_bytes(buf))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
pub fn read_u32_be<R>(reader: &mut R) -> io::Result<u32>
|
||||||
|
where R: Read + ?Sized {
|
||||||
|
let mut buf = [0u8; 4];
|
||||||
|
reader.read_exact(&mut buf)?;
|
||||||
|
Ok(u32::from_be_bytes(buf))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
pub fn read_u64_be<R>(reader: &mut R) -> io::Result<u64>
|
||||||
|
where R: Read + ?Sized {
|
||||||
|
let mut buf = [0u8; 8];
|
||||||
|
reader.read_exact(&mut buf)?;
|
||||||
|
Ok(u64::from_be_bytes(buf))
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue