Documentation updates & fixes for Wii partition streams

This commit is contained in:
Luke Street 2024-02-24 12:35:41 -07:00
parent 1895b7df3f
commit 8bd52d4075
19 changed files with 296 additions and 199 deletions

4
Cargo.lock generated
View File

@ -411,7 +411,7 @@ dependencies = [
[[package]]
name = "nod"
version = "1.0.0"
version = "1.1.0"
dependencies = [
"adler",
"aes",
@ -434,7 +434,7 @@ dependencies = [
[[package]]
name = "nodtool"
version = "1.0.0"
version = "1.1.0"
dependencies = [
"argp",
"base16ct",

View File

@ -70,26 +70,45 @@ Opening a disc image and reading a file:
```rust
use std::io::Read;
use nod::{Disc, PartitionKind};
// Open a disc image and the first data partition.
let disc = nod::Disc::new("path/to/file.iso")
.expect("Failed to open disc");
let mut partition = disc.open_partition_kind(nod::PartitionKind::Data)
.expect("Failed to open data partition");
fn main() -> nod::Result<()> {
let disc = Disc::new("path/to/file.iso")?;
let mut partition = disc.open_partition_kind(PartitionKind::Data)?;
let meta = partition.meta()?;
let fst = meta.fst()?;
if let Some((_, node)) = fst.find("/MP3/Worlds.txt") {
let mut s = String::new();
partition
.open_file(node)
.expect("Failed to open file stream")
.read_to_string(&mut s)
.expect("Failed to read file");
println!("{}", s);
}
Ok(())
// Read partition metadata and the file system table.
let meta = partition.meta()
.expect("Failed to read partition metadata");
let fst = meta.fst()
.expect("File system table is invalid");
// Find a file by path and read it into a string.
if let Some((_, node)) = fst.find("/MP3/Worlds.txt") {
let mut s = String::new();
partition
.open_file(node)
.expect("Failed to open file stream")
.read_to_string(&mut s)
.expect("Failed to read file");
println!("{}", s);
}
```
Converting a disc image to raw ISO:
```rust
// Enable `rebuild_encryption` to ensure the output is a valid ISO.
let options = nod::OpenOptions { rebuild_encryption: true, ..Default::default() };
let mut disc = nod::Disc::new_with_options("path/to/file.rvz", &options)
.expect("Failed to open disc");
// Read directly from the open disc and write to the output file.
let mut out = std::fs::File::create("output.iso")
.expect("Failed to create output file");
std::io::copy(&mut disc, &mut out)
.expect("Failed to write data");
```
## License
Licensed under either of

View File

@ -1,6 +1,6 @@
[package]
name = "nod"
version = "1.0.0"
version = "1.1.0"
edition = "2021"
rust-version = "1.73.0"
authors = ["Luke Street <luke@street.dev>"]

View File

@ -9,7 +9,7 @@ use zerocopy::{FromBytes, FromZeroes};
use crate::{
disc::{
AppLoaderHeader, DiscHeader, DolHeader, PartitionBase, PartitionHeader, PartitionMeta,
ApploaderHeader, DiscHeader, DolHeader, PartitionBase, PartitionHeader, PartitionMeta,
BI2_SIZE, BOOT_SIZE, SECTOR_SIZE,
},
fst::{Node, NodeKind},
@ -79,7 +79,6 @@ impl Read for PartitionGC {
self.block.copy_raw(
self.sector_buf.as_mut(),
self.block_buf.as_ref(),
block_idx,
sector,
&self.disc_header,
)?;
@ -137,33 +136,34 @@ pub(crate) fn read_part_meta(
// apploader.bin
let mut raw_apploader: Vec<u8> =
read_vec(reader, size_of::<AppLoaderHeader>()).context("Reading apploader header")?;
let apploader_header = AppLoaderHeader::ref_from(raw_apploader.as_slice()).unwrap();
read_vec(reader, size_of::<ApploaderHeader>()).context("Reading apploader header")?;
let apploader_header = ApploaderHeader::ref_from(raw_apploader.as_slice()).unwrap();
raw_apploader.resize(
size_of::<AppLoaderHeader>()
size_of::<ApploaderHeader>()
+ apploader_header.size.get() as usize
+ apploader_header.trailer_size.get() as usize,
0,
);
reader
.read_exact(&mut raw_apploader[size_of::<AppLoaderHeader>()..])
.read_exact(&mut raw_apploader[size_of::<ApploaderHeader>()..])
.context("Reading apploader")?;
// fst.bin
reader
.seek(SeekFrom::Start(partition_header.fst_off(is_wii)))
.seek(SeekFrom::Start(partition_header.fst_offset(is_wii)))
.context("Seeking to FST offset")?;
let raw_fst: Box<[u8]> = read_box_slice(reader, partition_header.fst_sz(is_wii) as usize)
let raw_fst: Box<[u8]> = read_box_slice(reader, partition_header.fst_size(is_wii) as usize)
.with_context(|| {
format!(
"Reading partition FST (offset {}, size {})",
partition_header.fst_off, partition_header.fst_sz
partition_header.fst_offset(is_wii),
partition_header.fst_size(is_wii)
)
})?;
// main.dol
reader
.seek(SeekFrom::Start(partition_header.dol_off(is_wii)))
.seek(SeekFrom::Start(partition_header.dol_offset(is_wii)))
.context("Seeking to DOL offset")?;
let mut raw_dol: Vec<u8> =
read_vec(reader, size_of::<DolHeader>()).context("Reading DOL header")?;

View File

@ -82,10 +82,6 @@ pub fn rebuild_hashes(reader: &mut DiscReader) -> Result<()> {
// Precompute hashes for zeroed sectors.
const ZERO_H0_BYTES: &[u8] = &[0u8; HASHES_SIZE];
let zero_h0_hash = hash_bytes(ZERO_H0_BYTES);
let mut zero_h1_hash = Sha1::new();
for _ in 0..NUM_H0_HASHES {
zero_h1_hash.update(zero_h0_hash);
}
let partitions = reader.partitions();
let mut hash_tables = Vec::with_capacity(partitions.len());
@ -171,6 +167,7 @@ pub fn rebuild_hashes(reader: &mut DiscReader) -> Result<()> {
.context("Seeking to H3 table")?;
let h3_table: Box<[HashBytes]> =
read_box_slice(reader, hash_table.h3_hashes.len()).context("Reading H3 table")?;
let mut mismatches = 0;
for (idx, (expected_hash, h3_hash)) in
h3_table.iter().zip(hash_table.h3_hashes.iter()).enumerate()
{
@ -180,12 +177,16 @@ pub fn rebuild_hashes(reader: &mut DiscReader) -> Result<()> {
let mut expected_bytes = [0u8; 40];
let expected =
base16ct::lower::encode_str(expected_hash, &mut expected_bytes).unwrap();
log::warn!(
log::debug!(
"Partition {} H3 table does not match:\n\tindex {}\n\texpected: {}\n\tgot: {}",
part.index, idx, expected, got
);
mismatches += 1;
}
}
if mismatches > 0 {
log::warn!("Partition {} H3 table has {} hash mismatches", part.index, mismatches);
}
}
for (part, hash_table) in reader.partitions.iter_mut().zip(hash_tables) {

View File

@ -25,9 +25,12 @@ pub(crate) mod hashes;
pub(crate) mod reader;
pub(crate) mod wii;
/// Size in bytes of a disc sector.
pub const SECTOR_SIZE: usize = 0x8000;
/// Shared GameCube & Wii disc header
/// Shared GameCube & Wii disc header.
///
/// This header is always at the start of the disc image and within each Wii partition.
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
#[repr(C, align(4))]
pub struct DiscHeader {
@ -78,30 +81,34 @@ impl DiscHeader {
pub fn is_wii(&self) -> bool { self.wii_magic.get() == 0x5D1C9EA3 }
}
/// Partition header
/// A header describing the contents of a disc partition.
///
/// **GameCube**: Always follows the disc header.
///
/// **Wii**: Follows the disc header within each partition.
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
#[repr(C, align(4))]
pub struct PartitionHeader {
/// Debug monitor offset
pub debug_mon_off: U32,
pub debug_mon_offset: U32,
/// Debug monitor load address
pub debug_load_addr: U32,
pub debug_load_address: U32,
/// Padding
_pad1: [u8; 0x18],
/// Offset to main DOL (Wii: >> 2)
pub dol_off: U32,
pub dol_offset: U32,
/// Offset to file system table (Wii: >> 2)
pub fst_off: U32,
pub fst_offset: U32,
/// File system size (Wii: >> 2)
pub fst_sz: U32,
pub fst_size: U32,
/// File system max size (Wii: >> 2)
pub fst_max_sz: U32,
pub fst_max_size: U32,
/// File system table load address
pub fst_memory_address: U32,
/// User position
pub user_position: U32,
/// User size
pub user_sz: U32,
pub user_size: U32,
/// Padding
_pad2: [u8; 4],
}
@ -109,43 +116,47 @@ pub struct PartitionHeader {
static_assert!(size_of::<PartitionHeader>() == 0x40);
impl PartitionHeader {
pub fn dol_off(&self, is_wii: bool) -> u64 {
/// Offset within the partition to the main DOL.
pub fn dol_offset(&self, is_wii: bool) -> u64 {
if is_wii {
self.dol_off.get() as u64 * 4
self.dol_offset.get() as u64 * 4
} else {
self.dol_off.get() as u64
self.dol_offset.get() as u64
}
}
pub fn fst_off(&self, is_wii: bool) -> u64 {
/// Offset within the partition to the file system table (FST).
pub fn fst_offset(&self, is_wii: bool) -> u64 {
if is_wii {
self.fst_off.get() as u64 * 4
self.fst_offset.get() as u64 * 4
} else {
self.fst_off.get() as u64
self.fst_offset.get() as u64
}
}
pub fn fst_sz(&self, is_wii: bool) -> u64 {
/// Size of the file system table (FST).
pub fn fst_size(&self, is_wii: bool) -> u64 {
if is_wii {
self.fst_sz.get() as u64 * 4
self.fst_size.get() as u64 * 4
} else {
self.fst_sz.get() as u64
self.fst_size.get() as u64
}
}
pub fn fst_max_sz(&self, is_wii: bool) -> u64 {
/// Maximum size of the file system table (FST) across multi-disc games.
pub fn fst_max_size(&self, is_wii: bool) -> u64 {
if is_wii {
self.fst_max_sz.get() as u64 * 4
self.fst_max_size.get() as u64 * 4
} else {
self.fst_max_sz.get() as u64
self.fst_max_size.get() as u64
}
}
}
/// Apploader header
/// Apploader header.
#[derive(Debug, PartialEq, Clone, FromBytes, FromZeroes, AsBytes)]
#[repr(C, align(4))]
pub struct AppLoaderHeader {
pub struct ApploaderHeader {
/// Apploader build date
pub date: [u8; 16],
/// Entry point
@ -158,19 +169,19 @@ pub struct AppLoaderHeader {
_pad: [u8; 4],
}
impl AppLoaderHeader {
/// Apploader build date as a string
impl ApploaderHeader {
/// Apploader build date as a string.
pub fn date_str(&self) -> Option<&str> {
CStr::from_bytes_until_nul(&self.date).ok().and_then(|c| c.to_str().ok())
}
}
/// Maximum number of text sections in a DOL
/// Maximum number of text sections in a DOL.
pub const DOL_MAX_TEXT_SECTIONS: usize = 7;
/// Maximum number of data sections in a DOL
/// Maximum number of data sections in a DOL.
pub const DOL_MAX_DATA_SECTIONS: usize = 11;
/// DOL header
/// Dolphin executable (DOL) header.
#[derive(Debug, Clone, FromBytes, FromZeroes)]
pub struct DolHeader {
/// Text section offsets
@ -197,12 +208,16 @@ pub struct DolHeader {
static_assert!(size_of::<DolHeader>() == 0x100);
/// Partition type
/// The kind of disc partition.
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub enum PartitionKind {
/// Data partition.
Data,
/// Update partition.
Update,
/// Channel partition.
Channel,
/// Other partition kind.
Other(u32),
}
@ -246,7 +261,7 @@ impl From<u32> for PartitionKind {
}
}
/// An open read stream for a disc partition.
/// An open disc partition.
pub trait PartitionBase: DynClone + ReadStream + Send + Sync {
/// Reads the partition header and file system table.
fn meta(&mut self) -> Result<Box<PartitionMeta>>;
@ -283,7 +298,7 @@ pub trait PartitionBase: DynClone + ReadStream + Send + Sync {
/// The ideal size for buffered reads from this partition.
/// GameCube discs have a data block size of 0x8000,
/// whereas Wii discs have a data block size of 0x7c00.
/// whereas Wii discs have a data block size of 0x7C00.
fn ideal_buffer_size(&self) -> usize;
}
@ -294,7 +309,7 @@ pub const BOOT_SIZE: usize = size_of::<DiscHeader>() + size_of::<PartitionHeader
/// Size of the debug and region information (bi2.bin)
pub const BI2_SIZE: usize = 0x2000;
/// Disc partition metadata
/// Extra disc partition data. (DOL, FST, etc.)
#[derive(Clone, Debug)]
pub struct PartitionMeta {
/// Disc and partition header (boot.bin)
@ -318,31 +333,50 @@ pub struct PartitionMeta {
}
impl PartitionMeta {
/// A view into the disc header.
pub fn header(&self) -> &DiscHeader {
DiscHeader::ref_from(&self.raw_boot[..size_of::<DiscHeader>()]).unwrap()
}
/// A view into the partition header.
pub fn partition_header(&self) -> &PartitionHeader {
PartitionHeader::ref_from(&self.raw_boot[size_of::<DiscHeader>()..]).unwrap()
}
pub fn apploader_header(&self) -> &AppLoaderHeader {
AppLoaderHeader::ref_from_prefix(&self.raw_apploader).unwrap()
/// A view into the apploader header.
pub fn apploader_header(&self) -> &ApploaderHeader {
ApploaderHeader::ref_from_prefix(&self.raw_apploader).unwrap()
}
/// A view into the file system table (FST).
pub fn fst(&self) -> Result<Fst, &'static str> { Fst::new(&self.raw_fst) }
/// A view into the DOL header.
pub fn dol_header(&self) -> &DolHeader { DolHeader::ref_from_prefix(&self.raw_dol).unwrap() }
/// A view into the ticket. (Wii only)
pub fn ticket(&self) -> Option<&Ticket> {
self.raw_ticket.as_ref().and_then(|v| Ticket::ref_from(v))
}
/// A view into the TMD. (Wii only)
pub fn tmd_header(&self) -> Option<&TmdHeader> {
self.raw_tmd.as_ref().and_then(|v| TmdHeader::ref_from_prefix(v))
}
}
/// The size of a single-layer MiniDVD. (1.4 GB)
///
/// GameCube games and some third-party Wii discs (Datel) use this format.
pub const MINI_DVD_SIZE: u64 = 1_459_978_240;
/// The size of a single-layer DVD. (4.7 GB)
///
/// The vast majority of Wii games use this format.
pub const SL_DVD_SIZE: u64 = 4_699_979_776;
/// The size of a dual-layer DVD. (8.5 GB)
///
/// A few larger Wii games use this format.
/// (Super Smash Bros. Brawl, Metroid Prime Trilogy, etc.)
pub const DL_DVD_SIZE: u64 = 8_511_160_320;

View File

@ -83,7 +83,7 @@ impl DiscReader {
if reader.disc_header.is_wii() {
reader.partitions = read_partition_info(&mut reader)?;
// Rebuild hashes if the format requires it
if options.rebuild_encryption && meta.needs_hash_recovery {
if (options.rebuild_encryption || options.validate_hashes) && meta.needs_hash_recovery {
rebuild_hashes(&mut reader)?;
}
}
@ -130,22 +130,22 @@ impl DiscReader {
}
/// Opens a new, decrypted partition read stream for the first partition matching
/// the specified type.
/// the specified kind.
pub fn open_partition_kind(
&self,
part_type: PartitionKind,
kind: PartitionKind,
options: &OpenOptions,
) -> Result<Box<dyn PartitionBase>> {
if self.disc_header.is_gamecube() {
if part_type == PartitionKind::Data {
if kind == PartitionKind::Data {
Ok(PartitionGC::new(self.io.clone(), self.disc_header.clone())?)
} else {
Err(Error::DiscFormat("GameCube discs only have a data partition".to_string()))
}
} else if let Some(part) = self.partitions.iter().find(|v| v.kind == part_type) {
} else if let Some(part) = self.partitions.iter().find(|v| v.kind == kind) {
Ok(PartitionWii::new(self.io.clone(), self.disc_header.clone(), part, options)?)
} else {
Err(Error::DiscFormat(format!("Partition type {part_type} not found")))
Err(Error::DiscFormat(format!("Partition type {kind} not found")))
}
}
}
@ -176,14 +176,12 @@ impl Read for DiscReader {
EncryptionMode::Decrypted => self.block.decrypt(
self.sector_buf.as_mut(),
self.block_buf.as_ref(),
block_idx,
abs_sector,
partition,
)?,
EncryptionMode::Encrypted => self.block.encrypt(
self.sector_buf.as_mut(),
self.block_buf.as_ref(),
block_idx,
abs_sector,
partition,
)?,
@ -192,7 +190,6 @@ impl Read for DiscReader {
self.block.copy_raw(
self.sector_buf.as_mut(),
self.block_buf.as_ref(),
block_idx,
abs_sector,
&self.disc_header,
)?;
@ -225,7 +222,7 @@ impl Seek for DiscReader {
}
}
fn read_partition_info(reader: &mut DiscReader) -> crate::Result<Vec<PartitionInfo>> {
fn read_partition_info(reader: &mut DiscReader) -> Result<Vec<PartitionInfo>> {
reader.seek(SeekFrom::Start(WII_PART_GROUP_OFF)).context("Seeking to partition groups")?;
let part_groups: [WiiPartGroup; 4] = read_from(reader).context("Reading partition groups")?;
let mut part_info = Vec::new();
@ -306,6 +303,7 @@ fn guess_disc_size(part_info: &[PartitionInfo]) -> u64 {
})
.max()
.unwrap_or(0x50000);
// TODO add FST offsets (decrypted partitions)
if max_offset <= MINI_DVD_SIZE && !part_info.iter().any(|v| v.kind == PartitionKind::Data) {
// Datel disc
MINI_DVD_SIZE

View File

@ -27,7 +27,10 @@ use crate::{
DiscHeader, Error, OpenOptions, Result, ResultContext,
};
/// Size in bytes of the hashes block in a Wii disc sector
pub(crate) const HASHES_SIZE: usize = 0x400;
/// Size in bytes of the data block in a Wii disc sector (excluding hashes)
pub(crate) const SECTOR_DATA_SIZE: usize = SECTOR_SIZE - HASHES_SIZE; // 0x7C00
// ppki (Retail)
@ -312,12 +315,13 @@ impl PartitionWii {
impl Read for PartitionWii {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let partition_sector = (self.pos / SECTOR_DATA_SIZE as u64) as u32;
let sector = self.partition.data_start_sector + partition_sector;
if sector >= self.partition.data_end_sector {
let part_sector = (self.pos / SECTOR_DATA_SIZE as u64) as u32;
let abs_sector = self.partition.data_start_sector + part_sector;
if abs_sector >= self.partition.data_end_sector {
return Ok(0);
}
let block_idx = (sector as u64 * SECTOR_SIZE as u64 / self.block_buf.len() as u64) as u32;
let block_idx =
(abs_sector as u64 * SECTOR_SIZE as u64 / self.block_buf.len() as u64) as u32;
// Read new block if necessary
if block_idx != self.block_idx {
@ -327,18 +331,17 @@ impl Read for PartitionWii {
}
// Decrypt sector if necessary
if sector != self.sector {
if abs_sector != self.sector {
self.block.decrypt(
self.sector_buf.as_mut(),
self.block_buf.as_ref(),
block_idx,
sector,
abs_sector,
&self.partition,
)?;
if self.verify {
verify_hashes(&self.sector_buf, sector)?;
verify_hashes(self.sector_buf.as_ref(), part_sector, self.raw_h3_table.as_ref())?;
}
self.sector = sector;
self.sector = abs_sector;
}
let offset = (self.pos % SECTOR_DATA_SIZE as u64) as usize;
@ -369,9 +372,9 @@ impl Seek for PartitionWii {
#[inline(always)]
pub(crate) fn as_digest(slice: &[u8; 20]) -> digest::Output<Sha1> { (*slice).into() }
fn verify_hashes(buf: &[u8; SECTOR_SIZE], sector: u32) -> io::Result<()> {
let (mut group, sub_group) = div_rem(sector as usize, 8);
group %= 8;
fn verify_hashes(buf: &[u8; SECTOR_SIZE], part_sector: u32, h3_table: &[u8]) -> io::Result<()> {
let (cluster, sector) = div_rem(part_sector as usize, 8);
let (group, sub_group) = div_rem(cluster, 8);
// H0 hashes
for i in 0..31 {
@ -391,14 +394,14 @@ fn verify_hashes(buf: &[u8; SECTOR_SIZE], sector: u32) -> io::Result<()> {
{
let mut hash = Sha1::new();
hash.update(array_ref![buf, 0, 0x26C]);
let expected = as_digest(array_ref![buf, 0x280 + sub_group * 20, 20]);
let expected = as_digest(array_ref![buf, 0x280 + sector * 20, 20]);
let output = hash.finalize();
if output != expected {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"Invalid H1 hash! (subgroup {:?}) {:x}\n\texpected {:x}",
sub_group, output, expected
sector, output, expected
),
));
}
@ -408,19 +411,33 @@ fn verify_hashes(buf: &[u8; SECTOR_SIZE], sector: u32) -> io::Result<()> {
{
let mut hash = Sha1::new();
hash.update(array_ref![buf, 0x280, 0xA0]);
let expected = as_digest(array_ref![buf, 0x340 + group * 20, 20]);
let expected = as_digest(array_ref![buf, 0x340 + sub_group * 20, 20]);
let output = hash.finalize();
if output != expected {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"Invalid H2 hash! (group {:?}) {:x}\n\texpected {:x}",
group, output, expected
sub_group, output, expected
),
));
}
}
// TODO H3 hash
// H3 hash
{
let mut hash = Sha1::new();
hash.update(array_ref![buf, 0x340, 0xA0]);
let expected = as_digest(array_ref![h3_table, group * 20, 20]);
let output = hash.finalize();
if output != expected {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("Invalid H3 hash! {:x}\n\texpected {:x}", output, expected),
));
}
}
Ok(())
}

View File

@ -32,7 +32,7 @@ pub struct Node {
static_assert!(size_of::<Node>() == 12);
impl Node {
/// File system node type.
/// File system node kind.
pub fn kind(&self) -> NodeKind {
match self.kind {
0 => NodeKind::File,
@ -71,9 +71,11 @@ impl Node {
pub fn length(&self) -> u64 { self.length.get() as u64 }
}
/// A view into the file system tree (FST).
/// A view into the file system table (FST).
pub struct Fst<'a> {
/// The nodes in the FST.
pub nodes: &'a [Node],
/// The string table containing all file and directory names.
pub string_table: &'a [u8],
}

View File

@ -135,20 +135,32 @@ pub fn open(filename: &Path) -> Result<Box<dyn BlockIO>> {
Ok(io)
}
/// Wii partition information.
#[derive(Debug, Clone)]
pub struct PartitionInfo {
/// The partition index.
pub index: usize,
/// The kind of disc partition.
pub kind: PartitionKind,
/// The start sector of the partition.
pub start_sector: u32,
/// The start sector of the partition's (encrypted) data.
pub data_start_sector: u32,
/// The end sector of the partition's (encrypted) data.
pub data_end_sector: u32,
/// The AES key for the partition, also known as the "title key".
pub key: KeyBytes,
/// The Wii partition header.
pub header: Box<WiiPartitionHeader>,
/// The disc header within the partition.
pub disc_header: Box<DiscHeader>,
/// The partition header within the partition.
pub partition_header: Box<PartitionHeader>,
/// The hash table for the partition, if rebuilt.
pub hash_table: Option<HashTable>,
}
/// The block kind returned by [`BlockIO::read_block`].
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
pub enum Block {
/// Raw data or encrypted Wii partition data
@ -171,29 +183,28 @@ impl Block {
self,
out: &mut [u8; SECTOR_SIZE],
data: &[u8],
block_idx: u32,
abs_sector: u32,
partition: &PartitionInfo,
) -> io::Result<()> {
let rel_sector = abs_sector - self.start_sector(block_idx, data.len());
let part_sector = abs_sector - partition.data_start_sector;
match self {
Block::Raw => {
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, rel_sector)?);
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, abs_sector)?);
decrypt_sector(out, partition);
}
Block::PartDecrypted { has_hashes } => {
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, rel_sector)?);
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, abs_sector)?);
if !has_hashes {
rebuild_hash_block(out, abs_sector, partition);
rebuild_hash_block(out, part_sector, partition);
}
}
Block::Junk => {
generate_junk(out, abs_sector, Some(partition), &partition.disc_header);
rebuild_hash_block(out, abs_sector, partition);
generate_junk(out, part_sector, Some(partition), &partition.disc_header);
rebuild_hash_block(out, part_sector, partition);
}
Block::Zero => {
out.fill(0);
rebuild_hash_block(out, abs_sector, partition);
rebuild_hash_block(out, part_sector, partition);
}
}
Ok(())
@ -204,30 +215,29 @@ impl Block {
self,
out: &mut [u8; SECTOR_SIZE],
data: &[u8],
block_idx: u32,
abs_sector: u32,
partition: &PartitionInfo,
) -> io::Result<()> {
let rel_sector = abs_sector - self.start_sector(block_idx, data.len());
let part_sector = abs_sector - partition.data_start_sector;
match self {
Block::Raw => {
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, rel_sector)?);
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, abs_sector)?);
}
Block::PartDecrypted { has_hashes } => {
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, rel_sector)?);
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, abs_sector)?);
if !has_hashes {
rebuild_hash_block(out, abs_sector, partition);
rebuild_hash_block(out, part_sector, partition);
}
encrypt_sector(out, partition);
}
Block::Junk => {
generate_junk(out, abs_sector, Some(partition), &partition.disc_header);
rebuild_hash_block(out, abs_sector, partition);
generate_junk(out, part_sector, Some(partition), &partition.disc_header);
rebuild_hash_block(out, part_sector, partition);
encrypt_sector(out, partition);
}
Block::Zero => {
out.fill(0);
rebuild_hash_block(out, abs_sector, partition);
rebuild_hash_block(out, part_sector, partition);
encrypt_sector(out, partition);
}
}
@ -239,16 +249,12 @@ impl Block {
self,
out: &mut [u8; SECTOR_SIZE],
data: &[u8],
block_idx: u32,
abs_sector: u32,
disc_header: &DiscHeader,
) -> io::Result<()> {
match self {
Block::Raw => {
out.copy_from_slice(block_sector::<SECTOR_SIZE>(
data,
abs_sector - self.start_sector(block_idx, data.len()),
)?);
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, abs_sector)?);
}
Block::PartDecrypted { .. } => {
return Err(io::Error::new(
@ -261,11 +267,6 @@ impl Block {
}
Ok(())
}
/// Returns the start sector of the block.
fn start_sector(&self, index: u32, block_size: usize) -> u32 {
(index as u64 * block_size as u64 / SECTOR_SIZE as u64) as u32
}
}
#[inline(always)]
@ -276,14 +277,15 @@ fn block_sector<const N: usize>(data: &[u8], sector_idx: u32) -> io::Result<&[u8
format!("Expected block size {} to be a multiple of {}", data.len(), N),
));
}
let offset = sector_idx as usize * N;
let rel_sector = sector_idx % (data.len() / N) as u32;
let offset = rel_sector as usize * N;
data.get(offset..offset + N)
.ok_or_else(|| {
io::Error::new(
io::ErrorKind::InvalidData,
format!(
"Sector {} out of range (block size {}, sector size {})",
sector_idx,
rel_sector,
data.len(),
N
),
@ -298,12 +300,11 @@ fn generate_junk(
partition: Option<&PartitionInfo>,
disc_header: &DiscHeader,
) {
let mut pos = if let Some(partition) = partition {
(sector - partition.data_start_sector) as u64 * SECTOR_DATA_SIZE as u64
let (mut pos, mut offset) = if partition.is_some() {
(sector as u64 * SECTOR_DATA_SIZE as u64, HASHES_SIZE)
} else {
sector as u64 * SECTOR_SIZE as u64
(sector as u64 * SECTOR_SIZE as u64, 0)
};
let mut offset = if partition.is_some() { HASHES_SIZE } else { 0 };
out[..offset].fill(0);
while offset < SECTOR_SIZE {
// The LFG spans a single sector of the decrypted data,
@ -318,11 +319,11 @@ fn generate_junk(
}
}
fn rebuild_hash_block(out: &mut [u8; SECTOR_SIZE], sector: u32, partition: &PartitionInfo) {
fn rebuild_hash_block(out: &mut [u8; SECTOR_SIZE], part_sector: u32, partition: &PartitionInfo) {
let Some(hash_table) = partition.hash_table.as_ref() else {
return;
};
let sector_idx = (sector - partition.data_start_sector) as usize;
let sector_idx = part_sector as usize;
let h0_hashes: &[u8; 0x26C] =
transmute_ref!(array_ref![hash_table.h0_hashes, sector_idx * 31, 31]);
out[0..0x26C].copy_from_slice(h0_hashes);

View File

@ -80,8 +80,6 @@ impl DiscIOCISO {
None
};
// Reset reader
inner.reset();
Ok(Box::new(Self { inner, header, block_map, nkit_header }))
}
}

View File

@ -78,9 +78,6 @@ impl DiscIOGCZ {
// header + block_count * (u64 + u32)
let data_offset = size_of::<GCZHeader>() as u64 + block_count as u64 * 12;
// Reset reader
inner.reset();
let block_buf = <u8>::new_box_slice_zeroed(header.block_size.get() as usize);
Ok(Box::new(Self { inner, header, block_map, block_hashes, block_buf, data_offset }))
}

View File

@ -22,12 +22,13 @@ pub(crate) type KeyBytes = [u8; 16];
/// Magic bytes
pub(crate) type MagicBytes = [u8; 4];
/// The disc file format.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
pub enum Format {
/// Raw ISO
/// ISO / GCM (GameCube master disc)
#[default]
Iso,
/// CISO
/// CISO (Compact ISO)
Ciso,
/// GCZ
Gcz,
@ -55,6 +56,7 @@ impl fmt::Display for Format {
}
}
/// The disc file format's compression algorithm.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
pub enum Compression {
/// No compression

View File

@ -100,11 +100,6 @@ impl SplitFileReader {
Ok(())
}
pub fn reset(&mut self) {
self.open_file = None;
self.pos = 0;
}
pub fn len(&self) -> u64 { self.files.last().map_or(0, |f| f.begin + f.size) }
}

View File

@ -102,8 +102,6 @@ impl DiscIOWBFS {
inner.seek(SeekFrom::Start(0x10000)).context("Seeking to NKit header")?;
let nkit_header = NKitHeader::try_read_from(&mut inner, header.block_size(), true);
// Reset reader
inner.reset();
Ok(Box::new(Self { inner, header, block_map, nkit_header }))
}
}

View File

@ -91,16 +91,16 @@ impl WIAFileHeader {
pub fn is_rvz(&self) -> bool { self.magic == RVZ_MAGIC }
}
/// Disc type
/// Disc kind
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum DiscType {
pub enum DiscKind {
/// GameCube disc
GameCube,
/// Wii disc
Wii,
}
impl TryFrom<u32> for DiscType {
impl TryFrom<u32> for DiscKind {
type Error = Error;
fn try_from(value: u32) -> Result<Self> {
@ -225,11 +225,11 @@ static_assert!(size_of::<WIADisc>() == 0xDC);
impl WIADisc {
pub fn validate(&self) -> Result<()> {
DiscType::try_from(self.disc_type.get())?;
DiscKind::try_from(self.disc_type.get())?;
WIACompression::try_from(self.compression.get())?;
if self.partition_type_size.get() != size_of::<WIAPartition>() as u32 {
return Err(Error::DiscFormat(format!(
"WIA partition type size is {}, expected {}",
"WIA/RVZ partition type size is {}, expected {}",
self.partition_type_size.get(),
size_of::<WIAPartition>()
)));
@ -518,12 +518,12 @@ pub struct DiscIOWIA {
impl Clone for DiscIOWIA {
fn clone(&self) -> Self {
Self {
inner: self.inner.clone(),
header: self.header.clone(),
disc: self.disc.clone(),
partitions: self.partitions.clone(),
raw_data: self.raw_data.clone(),
groups: self.groups.clone(),
inner: self.inner.clone(),
nkit_header: self.nkit_header.clone(),
decompressor: self.decompressor.clone(),
group: u32::MAX,
@ -541,7 +541,7 @@ fn verify_hash(buf: &[u8], expected: &HashBytes) -> Result<()> {
let mut expected_bytes = [0u8; 40];
let expected = base16ct::lower::encode_str(expected, &mut expected_bytes).unwrap(); // Safe: fixed buffer size
return Err(Error::DiscFormat(format!(
"WIA hash mismatch: {}, expected {}",
"WIA/RVZ hash mismatch: {}, expected {}",
got, expected
)));
}
@ -685,18 +685,10 @@ impl BlockIO for DiscIOWIA {
sector: u32,
partition: Option<&PartitionInfo>,
) -> io::Result<Block> {
let mut chunk_size = self.disc.chunk_size.get();
let chunk_size = self.disc.chunk_size.get();
let sectors_per_chunk = chunk_size / SECTOR_SIZE as u32;
let disc_offset = sector as u64 * SECTOR_SIZE as u64;
let mut partition_offset = disc_offset;
if let Some(partition) = partition {
// Within a partition, hashes are excluded from the data size
chunk_size = (chunk_size * SECTOR_DATA_SIZE as u32) / SECTOR_SIZE as u32;
partition_offset =
(sector - partition.data_start_sector) as u64 * SECTOR_DATA_SIZE as u64;
}
let (group_index, group_sector) = if let Some(partition) = partition {
let (group_index, group_sector, partition_offset) = if let Some(partition) = partition {
// Find the partition
let Some(wia_part) = self.partitions.get(partition.index) else {
return Err(io::Error::new(
@ -747,7 +739,12 @@ impl BlockIO for DiscIOWIA {
));
}
(pd.group_index.get() + part_group_index, part_group_sector)
// Calculate the group offset within the partition
let part_group_offset =
(((part_group_index * sectors_per_chunk) + pd.first_sector.get())
- wia_part.partition_data[0].first_sector.get()) as u64
* SECTOR_DATA_SIZE as u64;
(pd.group_index.get() + part_group_index, part_group_sector, part_group_offset)
} else {
let Some(rd) = self.raw_data.iter().find(|d| d.contains(sector)) else {
return Err(io::Error::new(
@ -771,7 +768,9 @@ impl BlockIO for DiscIOWIA {
));
}
(rd.group_index.get() + group_index, group_sector)
// Calculate the group offset
let group_offset = rd.raw_data_offset.get() + (group_index * chunk_size) as u64;
(rd.group_index.get() + group_index, group_sector, group_offset)
};
// Fetch the group
@ -790,7 +789,13 @@ impl BlockIO for DiscIOWIA {
// Read group data if necessary
if group_index != self.group {
self.group_data = Vec::with_capacity(chunk_size as usize);
let group_data_size = if partition.is_some() {
// Within a partition, hashes are excluded from the data size
(sectors_per_chunk * SECTOR_DATA_SIZE as u32) as usize
} else {
chunk_size as usize
};
self.group_data = Vec::with_capacity(group_data_size);
let group_data_start = group.data_offset.get() as u64 * 4;
self.inner.seek(SeekFrom::Start(group_data_start))?;
@ -864,10 +869,10 @@ impl BlockIO for DiscIOWIA {
// Read sector from cached group data
if partition.is_some() {
let sector_data_start = group_sector as usize * SECTOR_DATA_SIZE;
let sector_data =
&self.group_data[sector_data_start..sector_data_start + SECTOR_DATA_SIZE];
out[..HASHES_SIZE].fill(0);
out[HASHES_SIZE..SECTOR_SIZE].copy_from_slice(sector_data);
out[HASHES_SIZE..SECTOR_SIZE].copy_from_slice(
&self.group_data[sector_data_start..sector_data_start + SECTOR_DATA_SIZE],
);
Ok(Block::PartDecrypted { has_hashes: false })
} else {
let sector_data_start = group_sector as usize * SECTOR_SIZE;

View File

@ -1,40 +1,62 @@
// #![warn(missing_docs, rustdoc::missing_doc_code_examples)]
//! Library for traversing & reading GameCube and Wii disc images.
#![warn(missing_docs)]
//! Library for traversing & reading Nintendo Optical Disc (GameCube and Wii) images.
//!
//! Based on the C++ library [nod](https://github.com/AxioDL/nod),
//! Originally based on the C++ library [nod](https://github.com/AxioDL/nod),
//! but does not currently support authoring.
//!
//! Currently supported file formats:
//! - ISO (GCM)
//! - WIA / RVZ
//! - WBFS
//! - NFS (Wii U VC files, e.g. `hif_000000.nfs`)
//! - WBFS (+ NKit 2 lossless)
//! - CISO (+ NKit 2 lossless)
//! - NFS (Wii U VC)
//! - GCZ
//!
//! # Examples
//!
//! Opening a disc image and reading a file:
//!
//! ```no_run
//! use std::io::Read;
//!
//! use nod::{Disc, PartitionKind};
//! // Open a disc image and the first data partition.
//! let disc = nod::Disc::new("path/to/file.iso")
//! .expect("Failed to open disc");
//! let mut partition = disc.open_partition_kind(nod::PartitionKind::Data)
//! .expect("Failed to open data partition");
//!
//! fn main() -> nod::Result<()> {
//! let disc = Disc::new("path/to/file.iso")?;
//! let mut partition = disc.open_partition_kind(PartitionKind::Data)?;
//! let meta = partition.meta()?;
//! let fst = meta.fst()?;
//! if let Some((_, node)) = fst.find("/MP3/Worlds.txt") {
//! let mut s = String::new();
//! partition
//! .open_file(node)
//! .expect("Failed to open file stream")
//! .read_to_string(&mut s)
//! .expect("Failed to read file");
//! println!("{}", s);
//! }
//! Ok(())
//! // Read partition metadata and the file system table.
//! let meta = partition.meta()
//! .expect("Failed to read partition metadata");
//! let fst = meta.fst()
//! .expect("File system table is invalid");
//!
//! // Find a file by path and read it into a string.
//! if let Some((_, node)) = fst.find("/MP3/Worlds.txt") {
//! let mut s = String::new();
//! partition
//! .open_file(node)
//! .expect("Failed to open file stream")
//! .read_to_string(&mut s)
//! .expect("Failed to read file");
//! println!("{}", s);
//! }
//! ```
//!
//! Converting a disc image to raw ISO:
//!
//! ```no_run
//! // Enable `rebuild_encryption` to ensure the output is a valid ISO.
//! let options = nod::OpenOptions { rebuild_encryption: true, ..Default::default() };
//! let mut disc = nod::Disc::new_with_options("path/to/file.rvz", &options)
//! .expect("Failed to open disc");
//!
//! // Read directly from the open disc and write to the output file.
//! let mut out = std::fs::File::create("output.iso")
//! .expect("Failed to create output file");
//! std::io::copy(&mut disc, &mut out)
//! .expect("Failed to write data");
//! ```
use std::{
io::{Read, Seek},
@ -42,7 +64,7 @@ use std::{
};
pub use disc::{
AppLoaderHeader, DiscHeader, DolHeader, PartitionBase, PartitionHeader, PartitionKind,
ApploaderHeader, DiscHeader, DolHeader, PartitionBase, PartitionHeader, PartitionKind,
PartitionMeta, BI2_SIZE, BOOT_SIZE, SECTOR_SIZE,
};
pub use fst::{Fst, Node, NodeKind};
@ -80,7 +102,9 @@ impl From<String> for Error {
/// Helper result type for [`Error`].
pub type Result<T, E = Error> = core::result::Result<T, E>;
/// Helper trait for adding context to errors.
pub trait ErrorContext {
/// Adds context to an error.
fn context(self, context: impl Into<String>) -> Error;
}
@ -88,9 +112,12 @@ impl ErrorContext for std::io::Error {
fn context(self, context: impl Into<String>) -> Error { Error::Io(context.into(), self) }
}
/// Helper trait for adding context to result errors.
pub trait ResultContext<T> {
/// Adds context to a result error.
fn context(self, context: impl Into<String>) -> Result<T>;
/// Adds context to a result error using a closure.
fn with_context<F>(self, f: F) -> Result<T>
where F: FnOnce() -> String;
}
@ -108,6 +135,7 @@ where E: ErrorContext
}
}
/// Options for opening a disc image.
#[derive(Default, Debug, Clone)]
pub struct OpenOptions {
/// Wii: Rebuild partition data encryption and hashes if the underlying format stores data
@ -117,6 +145,9 @@ pub struct OpenOptions {
pub validate_hashes: bool,
}
/// An open disc image and read stream.
///
/// This is the primary entry point for reading disc images.
pub struct Disc {
reader: disc::reader::DiscReader,
options: OpenOptions,
@ -135,7 +166,7 @@ impl Disc {
Ok(Disc { reader, options: options.clone() })
}
/// The disc's header.
/// The disc's primary header.
pub fn header(&self) -> &DiscHeader { self.reader.header() }
/// Returns extra metadata included in the disc file format, if any.
@ -146,20 +177,20 @@ impl Disc {
/// A list of Wii partitions on the disc.
///
/// For GameCube discs, this will return an empty slice.
/// **GameCube**: This will return an empty slice.
pub fn partitions(&self) -> &[PartitionInfo] { self.reader.partitions() }
/// Opens a new, decrypted partition read stream for the specified partition index.
/// Opens a decrypted partition read stream for the specified partition index.
///
/// For GameCube discs, the index must always be 0.
/// **GameCube**: `index` must always be 0.
pub fn open_partition(&self, index: usize) -> Result<Box<dyn PartitionBase>> {
self.reader.open_partition(index, &self.options)
}
/// Opens a new partition read stream for the first partition matching
/// the specified type.
/// Opens a decrypted partition read stream for the first partition matching
/// the specified kind.
///
/// For GameCube discs, the kind must always be `PartitionKind::Data`.
/// **GameCube**: `kind` must always be [`PartitionKind::Data`].
pub fn open_partition_kind(&self, kind: PartitionKind) -> Result<Box<dyn PartitionBase>> {
self.reader.open_partition_kind(kind, &self.options)
}

View File

@ -1,6 +1,6 @@
[package]
name = "nodtool"
version = "1.0.0"
version = "1.1.0"
edition = "2021"
rust-version = "1.73.0"
authors = ["Luke Street <luke@street.dev>"]

View File

@ -6,4 +6,3 @@ reorder_impl_items = true
use_field_init_shorthand = true
use_small_heuristics = "Max"
where_single_line = true
format_code_in_doc_comments = true