mirror of https://github.com/encounter/nod-rs.git
Documentation updates & fixes for Wii partition streams
This commit is contained in:
parent
1895b7df3f
commit
8bd52d4075
|
@ -411,7 +411,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "nod"
|
name = "nod"
|
||||||
version = "1.0.0"
|
version = "1.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"adler",
|
"adler",
|
||||||
"aes",
|
"aes",
|
||||||
|
@ -434,7 +434,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "nodtool"
|
name = "nodtool"
|
||||||
version = "1.0.0"
|
version = "1.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"argp",
|
"argp",
|
||||||
"base16ct",
|
"base16ct",
|
||||||
|
|
51
README.md
51
README.md
|
@ -70,26 +70,45 @@ Opening a disc image and reading a file:
|
||||||
```rust
|
```rust
|
||||||
use std::io::Read;
|
use std::io::Read;
|
||||||
|
|
||||||
use nod::{Disc, PartitionKind};
|
// Open a disc image and the first data partition.
|
||||||
|
let disc = nod::Disc::new("path/to/file.iso")
|
||||||
|
.expect("Failed to open disc");
|
||||||
|
let mut partition = disc.open_partition_kind(nod::PartitionKind::Data)
|
||||||
|
.expect("Failed to open data partition");
|
||||||
|
|
||||||
fn main() -> nod::Result<()> {
|
// Read partition metadata and the file system table.
|
||||||
let disc = Disc::new("path/to/file.iso")?;
|
let meta = partition.meta()
|
||||||
let mut partition = disc.open_partition_kind(PartitionKind::Data)?;
|
.expect("Failed to read partition metadata");
|
||||||
let meta = partition.meta()?;
|
let fst = meta.fst()
|
||||||
let fst = meta.fst()?;
|
.expect("File system table is invalid");
|
||||||
if let Some((_, node)) = fst.find("/MP3/Worlds.txt") {
|
|
||||||
let mut s = String::new();
|
// Find a file by path and read it into a string.
|
||||||
partition
|
if let Some((_, node)) = fst.find("/MP3/Worlds.txt") {
|
||||||
.open_file(node)
|
let mut s = String::new();
|
||||||
.expect("Failed to open file stream")
|
partition
|
||||||
.read_to_string(&mut s)
|
.open_file(node)
|
||||||
.expect("Failed to read file");
|
.expect("Failed to open file stream")
|
||||||
println!("{}", s);
|
.read_to_string(&mut s)
|
||||||
}
|
.expect("Failed to read file");
|
||||||
Ok(())
|
println!("{}", s);
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Converting a disc image to raw ISO:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
// Enable `rebuild_encryption` to ensure the output is a valid ISO.
|
||||||
|
let options = nod::OpenOptions { rebuild_encryption: true, ..Default::default() };
|
||||||
|
let mut disc = nod::Disc::new_with_options("path/to/file.rvz", &options)
|
||||||
|
.expect("Failed to open disc");
|
||||||
|
|
||||||
|
// Read directly from the open disc and write to the output file.
|
||||||
|
let mut out = std::fs::File::create("output.iso")
|
||||||
|
.expect("Failed to create output file");
|
||||||
|
std::io::copy(&mut disc, &mut out)
|
||||||
|
.expect("Failed to write data");
|
||||||
|
```
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
Licensed under either of
|
Licensed under either of
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "nod"
|
name = "nod"
|
||||||
version = "1.0.0"
|
version = "1.1.0"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.73.0"
|
rust-version = "1.73.0"
|
||||||
authors = ["Luke Street <luke@street.dev>"]
|
authors = ["Luke Street <luke@street.dev>"]
|
||||||
|
|
|
@ -9,7 +9,7 @@ use zerocopy::{FromBytes, FromZeroes};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
disc::{
|
disc::{
|
||||||
AppLoaderHeader, DiscHeader, DolHeader, PartitionBase, PartitionHeader, PartitionMeta,
|
ApploaderHeader, DiscHeader, DolHeader, PartitionBase, PartitionHeader, PartitionMeta,
|
||||||
BI2_SIZE, BOOT_SIZE, SECTOR_SIZE,
|
BI2_SIZE, BOOT_SIZE, SECTOR_SIZE,
|
||||||
},
|
},
|
||||||
fst::{Node, NodeKind},
|
fst::{Node, NodeKind},
|
||||||
|
@ -79,7 +79,6 @@ impl Read for PartitionGC {
|
||||||
self.block.copy_raw(
|
self.block.copy_raw(
|
||||||
self.sector_buf.as_mut(),
|
self.sector_buf.as_mut(),
|
||||||
self.block_buf.as_ref(),
|
self.block_buf.as_ref(),
|
||||||
block_idx,
|
|
||||||
sector,
|
sector,
|
||||||
&self.disc_header,
|
&self.disc_header,
|
||||||
)?;
|
)?;
|
||||||
|
@ -137,33 +136,34 @@ pub(crate) fn read_part_meta(
|
||||||
|
|
||||||
// apploader.bin
|
// apploader.bin
|
||||||
let mut raw_apploader: Vec<u8> =
|
let mut raw_apploader: Vec<u8> =
|
||||||
read_vec(reader, size_of::<AppLoaderHeader>()).context("Reading apploader header")?;
|
read_vec(reader, size_of::<ApploaderHeader>()).context("Reading apploader header")?;
|
||||||
let apploader_header = AppLoaderHeader::ref_from(raw_apploader.as_slice()).unwrap();
|
let apploader_header = ApploaderHeader::ref_from(raw_apploader.as_slice()).unwrap();
|
||||||
raw_apploader.resize(
|
raw_apploader.resize(
|
||||||
size_of::<AppLoaderHeader>()
|
size_of::<ApploaderHeader>()
|
||||||
+ apploader_header.size.get() as usize
|
+ apploader_header.size.get() as usize
|
||||||
+ apploader_header.trailer_size.get() as usize,
|
+ apploader_header.trailer_size.get() as usize,
|
||||||
0,
|
0,
|
||||||
);
|
);
|
||||||
reader
|
reader
|
||||||
.read_exact(&mut raw_apploader[size_of::<AppLoaderHeader>()..])
|
.read_exact(&mut raw_apploader[size_of::<ApploaderHeader>()..])
|
||||||
.context("Reading apploader")?;
|
.context("Reading apploader")?;
|
||||||
|
|
||||||
// fst.bin
|
// fst.bin
|
||||||
reader
|
reader
|
||||||
.seek(SeekFrom::Start(partition_header.fst_off(is_wii)))
|
.seek(SeekFrom::Start(partition_header.fst_offset(is_wii)))
|
||||||
.context("Seeking to FST offset")?;
|
.context("Seeking to FST offset")?;
|
||||||
let raw_fst: Box<[u8]> = read_box_slice(reader, partition_header.fst_sz(is_wii) as usize)
|
let raw_fst: Box<[u8]> = read_box_slice(reader, partition_header.fst_size(is_wii) as usize)
|
||||||
.with_context(|| {
|
.with_context(|| {
|
||||||
format!(
|
format!(
|
||||||
"Reading partition FST (offset {}, size {})",
|
"Reading partition FST (offset {}, size {})",
|
||||||
partition_header.fst_off, partition_header.fst_sz
|
partition_header.fst_offset(is_wii),
|
||||||
|
partition_header.fst_size(is_wii)
|
||||||
)
|
)
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
// main.dol
|
// main.dol
|
||||||
reader
|
reader
|
||||||
.seek(SeekFrom::Start(partition_header.dol_off(is_wii)))
|
.seek(SeekFrom::Start(partition_header.dol_offset(is_wii)))
|
||||||
.context("Seeking to DOL offset")?;
|
.context("Seeking to DOL offset")?;
|
||||||
let mut raw_dol: Vec<u8> =
|
let mut raw_dol: Vec<u8> =
|
||||||
read_vec(reader, size_of::<DolHeader>()).context("Reading DOL header")?;
|
read_vec(reader, size_of::<DolHeader>()).context("Reading DOL header")?;
|
||||||
|
|
|
@ -82,10 +82,6 @@ pub fn rebuild_hashes(reader: &mut DiscReader) -> Result<()> {
|
||||||
// Precompute hashes for zeroed sectors.
|
// Precompute hashes for zeroed sectors.
|
||||||
const ZERO_H0_BYTES: &[u8] = &[0u8; HASHES_SIZE];
|
const ZERO_H0_BYTES: &[u8] = &[0u8; HASHES_SIZE];
|
||||||
let zero_h0_hash = hash_bytes(ZERO_H0_BYTES);
|
let zero_h0_hash = hash_bytes(ZERO_H0_BYTES);
|
||||||
let mut zero_h1_hash = Sha1::new();
|
|
||||||
for _ in 0..NUM_H0_HASHES {
|
|
||||||
zero_h1_hash.update(zero_h0_hash);
|
|
||||||
}
|
|
||||||
|
|
||||||
let partitions = reader.partitions();
|
let partitions = reader.partitions();
|
||||||
let mut hash_tables = Vec::with_capacity(partitions.len());
|
let mut hash_tables = Vec::with_capacity(partitions.len());
|
||||||
|
@ -171,6 +167,7 @@ pub fn rebuild_hashes(reader: &mut DiscReader) -> Result<()> {
|
||||||
.context("Seeking to H3 table")?;
|
.context("Seeking to H3 table")?;
|
||||||
let h3_table: Box<[HashBytes]> =
|
let h3_table: Box<[HashBytes]> =
|
||||||
read_box_slice(reader, hash_table.h3_hashes.len()).context("Reading H3 table")?;
|
read_box_slice(reader, hash_table.h3_hashes.len()).context("Reading H3 table")?;
|
||||||
|
let mut mismatches = 0;
|
||||||
for (idx, (expected_hash, h3_hash)) in
|
for (idx, (expected_hash, h3_hash)) in
|
||||||
h3_table.iter().zip(hash_table.h3_hashes.iter()).enumerate()
|
h3_table.iter().zip(hash_table.h3_hashes.iter()).enumerate()
|
||||||
{
|
{
|
||||||
|
@ -180,12 +177,16 @@ pub fn rebuild_hashes(reader: &mut DiscReader) -> Result<()> {
|
||||||
let mut expected_bytes = [0u8; 40];
|
let mut expected_bytes = [0u8; 40];
|
||||||
let expected =
|
let expected =
|
||||||
base16ct::lower::encode_str(expected_hash, &mut expected_bytes).unwrap();
|
base16ct::lower::encode_str(expected_hash, &mut expected_bytes).unwrap();
|
||||||
log::warn!(
|
log::debug!(
|
||||||
"Partition {} H3 table does not match:\n\tindex {}\n\texpected: {}\n\tgot: {}",
|
"Partition {} H3 table does not match:\n\tindex {}\n\texpected: {}\n\tgot: {}",
|
||||||
part.index, idx, expected, got
|
part.index, idx, expected, got
|
||||||
);
|
);
|
||||||
|
mismatches += 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if mismatches > 0 {
|
||||||
|
log::warn!("Partition {} H3 table has {} hash mismatches", part.index, mismatches);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (part, hash_table) in reader.partitions.iter_mut().zip(hash_tables) {
|
for (part, hash_table) in reader.partitions.iter_mut().zip(hash_tables) {
|
||||||
|
|
|
@ -25,9 +25,12 @@ pub(crate) mod hashes;
|
||||||
pub(crate) mod reader;
|
pub(crate) mod reader;
|
||||||
pub(crate) mod wii;
|
pub(crate) mod wii;
|
||||||
|
|
||||||
|
/// Size in bytes of a disc sector.
|
||||||
pub const SECTOR_SIZE: usize = 0x8000;
|
pub const SECTOR_SIZE: usize = 0x8000;
|
||||||
|
|
||||||
/// Shared GameCube & Wii disc header
|
/// Shared GameCube & Wii disc header.
|
||||||
|
///
|
||||||
|
/// This header is always at the start of the disc image and within each Wii partition.
|
||||||
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||||||
#[repr(C, align(4))]
|
#[repr(C, align(4))]
|
||||||
pub struct DiscHeader {
|
pub struct DiscHeader {
|
||||||
|
@ -78,30 +81,34 @@ impl DiscHeader {
|
||||||
pub fn is_wii(&self) -> bool { self.wii_magic.get() == 0x5D1C9EA3 }
|
pub fn is_wii(&self) -> bool { self.wii_magic.get() == 0x5D1C9EA3 }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Partition header
|
/// A header describing the contents of a disc partition.
|
||||||
|
///
|
||||||
|
/// **GameCube**: Always follows the disc header.
|
||||||
|
///
|
||||||
|
/// **Wii**: Follows the disc header within each partition.
|
||||||
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||||||
#[repr(C, align(4))]
|
#[repr(C, align(4))]
|
||||||
pub struct PartitionHeader {
|
pub struct PartitionHeader {
|
||||||
/// Debug monitor offset
|
/// Debug monitor offset
|
||||||
pub debug_mon_off: U32,
|
pub debug_mon_offset: U32,
|
||||||
/// Debug monitor load address
|
/// Debug monitor load address
|
||||||
pub debug_load_addr: U32,
|
pub debug_load_address: U32,
|
||||||
/// Padding
|
/// Padding
|
||||||
_pad1: [u8; 0x18],
|
_pad1: [u8; 0x18],
|
||||||
/// Offset to main DOL (Wii: >> 2)
|
/// Offset to main DOL (Wii: >> 2)
|
||||||
pub dol_off: U32,
|
pub dol_offset: U32,
|
||||||
/// Offset to file system table (Wii: >> 2)
|
/// Offset to file system table (Wii: >> 2)
|
||||||
pub fst_off: U32,
|
pub fst_offset: U32,
|
||||||
/// File system size (Wii: >> 2)
|
/// File system size (Wii: >> 2)
|
||||||
pub fst_sz: U32,
|
pub fst_size: U32,
|
||||||
/// File system max size (Wii: >> 2)
|
/// File system max size (Wii: >> 2)
|
||||||
pub fst_max_sz: U32,
|
pub fst_max_size: U32,
|
||||||
/// File system table load address
|
/// File system table load address
|
||||||
pub fst_memory_address: U32,
|
pub fst_memory_address: U32,
|
||||||
/// User position
|
/// User position
|
||||||
pub user_position: U32,
|
pub user_position: U32,
|
||||||
/// User size
|
/// User size
|
||||||
pub user_sz: U32,
|
pub user_size: U32,
|
||||||
/// Padding
|
/// Padding
|
||||||
_pad2: [u8; 4],
|
_pad2: [u8; 4],
|
||||||
}
|
}
|
||||||
|
@ -109,43 +116,47 @@ pub struct PartitionHeader {
|
||||||
static_assert!(size_of::<PartitionHeader>() == 0x40);
|
static_assert!(size_of::<PartitionHeader>() == 0x40);
|
||||||
|
|
||||||
impl PartitionHeader {
|
impl PartitionHeader {
|
||||||
pub fn dol_off(&self, is_wii: bool) -> u64 {
|
/// Offset within the partition to the main DOL.
|
||||||
|
pub fn dol_offset(&self, is_wii: bool) -> u64 {
|
||||||
if is_wii {
|
if is_wii {
|
||||||
self.dol_off.get() as u64 * 4
|
self.dol_offset.get() as u64 * 4
|
||||||
} else {
|
} else {
|
||||||
self.dol_off.get() as u64
|
self.dol_offset.get() as u64
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn fst_off(&self, is_wii: bool) -> u64 {
|
/// Offset within the partition to the file system table (FST).
|
||||||
|
pub fn fst_offset(&self, is_wii: bool) -> u64 {
|
||||||
if is_wii {
|
if is_wii {
|
||||||
self.fst_off.get() as u64 * 4
|
self.fst_offset.get() as u64 * 4
|
||||||
} else {
|
} else {
|
||||||
self.fst_off.get() as u64
|
self.fst_offset.get() as u64
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn fst_sz(&self, is_wii: bool) -> u64 {
|
/// Size of the file system table (FST).
|
||||||
|
pub fn fst_size(&self, is_wii: bool) -> u64 {
|
||||||
if is_wii {
|
if is_wii {
|
||||||
self.fst_sz.get() as u64 * 4
|
self.fst_size.get() as u64 * 4
|
||||||
} else {
|
} else {
|
||||||
self.fst_sz.get() as u64
|
self.fst_size.get() as u64
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn fst_max_sz(&self, is_wii: bool) -> u64 {
|
/// Maximum size of the file system table (FST) across multi-disc games.
|
||||||
|
pub fn fst_max_size(&self, is_wii: bool) -> u64 {
|
||||||
if is_wii {
|
if is_wii {
|
||||||
self.fst_max_sz.get() as u64 * 4
|
self.fst_max_size.get() as u64 * 4
|
||||||
} else {
|
} else {
|
||||||
self.fst_max_sz.get() as u64
|
self.fst_max_size.get() as u64
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Apploader header
|
/// Apploader header.
|
||||||
#[derive(Debug, PartialEq, Clone, FromBytes, FromZeroes, AsBytes)]
|
#[derive(Debug, PartialEq, Clone, FromBytes, FromZeroes, AsBytes)]
|
||||||
#[repr(C, align(4))]
|
#[repr(C, align(4))]
|
||||||
pub struct AppLoaderHeader {
|
pub struct ApploaderHeader {
|
||||||
/// Apploader build date
|
/// Apploader build date
|
||||||
pub date: [u8; 16],
|
pub date: [u8; 16],
|
||||||
/// Entry point
|
/// Entry point
|
||||||
|
@ -158,19 +169,19 @@ pub struct AppLoaderHeader {
|
||||||
_pad: [u8; 4],
|
_pad: [u8; 4],
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AppLoaderHeader {
|
impl ApploaderHeader {
|
||||||
/// Apploader build date as a string
|
/// Apploader build date as a string.
|
||||||
pub fn date_str(&self) -> Option<&str> {
|
pub fn date_str(&self) -> Option<&str> {
|
||||||
CStr::from_bytes_until_nul(&self.date).ok().and_then(|c| c.to_str().ok())
|
CStr::from_bytes_until_nul(&self.date).ok().and_then(|c| c.to_str().ok())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Maximum number of text sections in a DOL
|
/// Maximum number of text sections in a DOL.
|
||||||
pub const DOL_MAX_TEXT_SECTIONS: usize = 7;
|
pub const DOL_MAX_TEXT_SECTIONS: usize = 7;
|
||||||
/// Maximum number of data sections in a DOL
|
/// Maximum number of data sections in a DOL.
|
||||||
pub const DOL_MAX_DATA_SECTIONS: usize = 11;
|
pub const DOL_MAX_DATA_SECTIONS: usize = 11;
|
||||||
|
|
||||||
/// DOL header
|
/// Dolphin executable (DOL) header.
|
||||||
#[derive(Debug, Clone, FromBytes, FromZeroes)]
|
#[derive(Debug, Clone, FromBytes, FromZeroes)]
|
||||||
pub struct DolHeader {
|
pub struct DolHeader {
|
||||||
/// Text section offsets
|
/// Text section offsets
|
||||||
|
@ -197,12 +208,16 @@ pub struct DolHeader {
|
||||||
|
|
||||||
static_assert!(size_of::<DolHeader>() == 0x100);
|
static_assert!(size_of::<DolHeader>() == 0x100);
|
||||||
|
|
||||||
/// Partition type
|
/// The kind of disc partition.
|
||||||
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
|
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
|
||||||
pub enum PartitionKind {
|
pub enum PartitionKind {
|
||||||
|
/// Data partition.
|
||||||
Data,
|
Data,
|
||||||
|
/// Update partition.
|
||||||
Update,
|
Update,
|
||||||
|
/// Channel partition.
|
||||||
Channel,
|
Channel,
|
||||||
|
/// Other partition kind.
|
||||||
Other(u32),
|
Other(u32),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -246,7 +261,7 @@ impl From<u32> for PartitionKind {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// An open read stream for a disc partition.
|
/// An open disc partition.
|
||||||
pub trait PartitionBase: DynClone + ReadStream + Send + Sync {
|
pub trait PartitionBase: DynClone + ReadStream + Send + Sync {
|
||||||
/// Reads the partition header and file system table.
|
/// Reads the partition header and file system table.
|
||||||
fn meta(&mut self) -> Result<Box<PartitionMeta>>;
|
fn meta(&mut self) -> Result<Box<PartitionMeta>>;
|
||||||
|
@ -283,7 +298,7 @@ pub trait PartitionBase: DynClone + ReadStream + Send + Sync {
|
||||||
|
|
||||||
/// The ideal size for buffered reads from this partition.
|
/// The ideal size for buffered reads from this partition.
|
||||||
/// GameCube discs have a data block size of 0x8000,
|
/// GameCube discs have a data block size of 0x8000,
|
||||||
/// whereas Wii discs have a data block size of 0x7c00.
|
/// whereas Wii discs have a data block size of 0x7C00.
|
||||||
fn ideal_buffer_size(&self) -> usize;
|
fn ideal_buffer_size(&self) -> usize;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -294,7 +309,7 @@ pub const BOOT_SIZE: usize = size_of::<DiscHeader>() + size_of::<PartitionHeader
|
||||||
/// Size of the debug and region information (bi2.bin)
|
/// Size of the debug and region information (bi2.bin)
|
||||||
pub const BI2_SIZE: usize = 0x2000;
|
pub const BI2_SIZE: usize = 0x2000;
|
||||||
|
|
||||||
/// Disc partition metadata
|
/// Extra disc partition data. (DOL, FST, etc.)
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct PartitionMeta {
|
pub struct PartitionMeta {
|
||||||
/// Disc and partition header (boot.bin)
|
/// Disc and partition header (boot.bin)
|
||||||
|
@ -318,31 +333,50 @@ pub struct PartitionMeta {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PartitionMeta {
|
impl PartitionMeta {
|
||||||
|
/// A view into the disc header.
|
||||||
pub fn header(&self) -> &DiscHeader {
|
pub fn header(&self) -> &DiscHeader {
|
||||||
DiscHeader::ref_from(&self.raw_boot[..size_of::<DiscHeader>()]).unwrap()
|
DiscHeader::ref_from(&self.raw_boot[..size_of::<DiscHeader>()]).unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// A view into the partition header.
|
||||||
pub fn partition_header(&self) -> &PartitionHeader {
|
pub fn partition_header(&self) -> &PartitionHeader {
|
||||||
PartitionHeader::ref_from(&self.raw_boot[size_of::<DiscHeader>()..]).unwrap()
|
PartitionHeader::ref_from(&self.raw_boot[size_of::<DiscHeader>()..]).unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn apploader_header(&self) -> &AppLoaderHeader {
|
/// A view into the apploader header.
|
||||||
AppLoaderHeader::ref_from_prefix(&self.raw_apploader).unwrap()
|
pub fn apploader_header(&self) -> &ApploaderHeader {
|
||||||
|
ApploaderHeader::ref_from_prefix(&self.raw_apploader).unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// A view into the file system table (FST).
|
||||||
pub fn fst(&self) -> Result<Fst, &'static str> { Fst::new(&self.raw_fst) }
|
pub fn fst(&self) -> Result<Fst, &'static str> { Fst::new(&self.raw_fst) }
|
||||||
|
|
||||||
|
/// A view into the DOL header.
|
||||||
pub fn dol_header(&self) -> &DolHeader { DolHeader::ref_from_prefix(&self.raw_dol).unwrap() }
|
pub fn dol_header(&self) -> &DolHeader { DolHeader::ref_from_prefix(&self.raw_dol).unwrap() }
|
||||||
|
|
||||||
|
/// A view into the ticket. (Wii only)
|
||||||
pub fn ticket(&self) -> Option<&Ticket> {
|
pub fn ticket(&self) -> Option<&Ticket> {
|
||||||
self.raw_ticket.as_ref().and_then(|v| Ticket::ref_from(v))
|
self.raw_ticket.as_ref().and_then(|v| Ticket::ref_from(v))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// A view into the TMD. (Wii only)
|
||||||
pub fn tmd_header(&self) -> Option<&TmdHeader> {
|
pub fn tmd_header(&self) -> Option<&TmdHeader> {
|
||||||
self.raw_tmd.as_ref().and_then(|v| TmdHeader::ref_from_prefix(v))
|
self.raw_tmd.as_ref().and_then(|v| TmdHeader::ref_from_prefix(v))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The size of a single-layer MiniDVD. (1.4 GB)
|
||||||
|
///
|
||||||
|
/// GameCube games and some third-party Wii discs (Datel) use this format.
|
||||||
pub const MINI_DVD_SIZE: u64 = 1_459_978_240;
|
pub const MINI_DVD_SIZE: u64 = 1_459_978_240;
|
||||||
|
|
||||||
|
/// The size of a single-layer DVD. (4.7 GB)
|
||||||
|
///
|
||||||
|
/// The vast majority of Wii games use this format.
|
||||||
pub const SL_DVD_SIZE: u64 = 4_699_979_776;
|
pub const SL_DVD_SIZE: u64 = 4_699_979_776;
|
||||||
|
|
||||||
|
/// The size of a dual-layer DVD. (8.5 GB)
|
||||||
|
///
|
||||||
|
/// A few larger Wii games use this format.
|
||||||
|
/// (Super Smash Bros. Brawl, Metroid Prime Trilogy, etc.)
|
||||||
pub const DL_DVD_SIZE: u64 = 8_511_160_320;
|
pub const DL_DVD_SIZE: u64 = 8_511_160_320;
|
||||||
|
|
|
@ -83,7 +83,7 @@ impl DiscReader {
|
||||||
if reader.disc_header.is_wii() {
|
if reader.disc_header.is_wii() {
|
||||||
reader.partitions = read_partition_info(&mut reader)?;
|
reader.partitions = read_partition_info(&mut reader)?;
|
||||||
// Rebuild hashes if the format requires it
|
// Rebuild hashes if the format requires it
|
||||||
if options.rebuild_encryption && meta.needs_hash_recovery {
|
if (options.rebuild_encryption || options.validate_hashes) && meta.needs_hash_recovery {
|
||||||
rebuild_hashes(&mut reader)?;
|
rebuild_hashes(&mut reader)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -130,22 +130,22 @@ impl DiscReader {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Opens a new, decrypted partition read stream for the first partition matching
|
/// Opens a new, decrypted partition read stream for the first partition matching
|
||||||
/// the specified type.
|
/// the specified kind.
|
||||||
pub fn open_partition_kind(
|
pub fn open_partition_kind(
|
||||||
&self,
|
&self,
|
||||||
part_type: PartitionKind,
|
kind: PartitionKind,
|
||||||
options: &OpenOptions,
|
options: &OpenOptions,
|
||||||
) -> Result<Box<dyn PartitionBase>> {
|
) -> Result<Box<dyn PartitionBase>> {
|
||||||
if self.disc_header.is_gamecube() {
|
if self.disc_header.is_gamecube() {
|
||||||
if part_type == PartitionKind::Data {
|
if kind == PartitionKind::Data {
|
||||||
Ok(PartitionGC::new(self.io.clone(), self.disc_header.clone())?)
|
Ok(PartitionGC::new(self.io.clone(), self.disc_header.clone())?)
|
||||||
} else {
|
} else {
|
||||||
Err(Error::DiscFormat("GameCube discs only have a data partition".to_string()))
|
Err(Error::DiscFormat("GameCube discs only have a data partition".to_string()))
|
||||||
}
|
}
|
||||||
} else if let Some(part) = self.partitions.iter().find(|v| v.kind == part_type) {
|
} else if let Some(part) = self.partitions.iter().find(|v| v.kind == kind) {
|
||||||
Ok(PartitionWii::new(self.io.clone(), self.disc_header.clone(), part, options)?)
|
Ok(PartitionWii::new(self.io.clone(), self.disc_header.clone(), part, options)?)
|
||||||
} else {
|
} else {
|
||||||
Err(Error::DiscFormat(format!("Partition type {part_type} not found")))
|
Err(Error::DiscFormat(format!("Partition type {kind} not found")))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -176,14 +176,12 @@ impl Read for DiscReader {
|
||||||
EncryptionMode::Decrypted => self.block.decrypt(
|
EncryptionMode::Decrypted => self.block.decrypt(
|
||||||
self.sector_buf.as_mut(),
|
self.sector_buf.as_mut(),
|
||||||
self.block_buf.as_ref(),
|
self.block_buf.as_ref(),
|
||||||
block_idx,
|
|
||||||
abs_sector,
|
abs_sector,
|
||||||
partition,
|
partition,
|
||||||
)?,
|
)?,
|
||||||
EncryptionMode::Encrypted => self.block.encrypt(
|
EncryptionMode::Encrypted => self.block.encrypt(
|
||||||
self.sector_buf.as_mut(),
|
self.sector_buf.as_mut(),
|
||||||
self.block_buf.as_ref(),
|
self.block_buf.as_ref(),
|
||||||
block_idx,
|
|
||||||
abs_sector,
|
abs_sector,
|
||||||
partition,
|
partition,
|
||||||
)?,
|
)?,
|
||||||
|
@ -192,7 +190,6 @@ impl Read for DiscReader {
|
||||||
self.block.copy_raw(
|
self.block.copy_raw(
|
||||||
self.sector_buf.as_mut(),
|
self.sector_buf.as_mut(),
|
||||||
self.block_buf.as_ref(),
|
self.block_buf.as_ref(),
|
||||||
block_idx,
|
|
||||||
abs_sector,
|
abs_sector,
|
||||||
&self.disc_header,
|
&self.disc_header,
|
||||||
)?;
|
)?;
|
||||||
|
@ -225,7 +222,7 @@ impl Seek for DiscReader {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn read_partition_info(reader: &mut DiscReader) -> crate::Result<Vec<PartitionInfo>> {
|
fn read_partition_info(reader: &mut DiscReader) -> Result<Vec<PartitionInfo>> {
|
||||||
reader.seek(SeekFrom::Start(WII_PART_GROUP_OFF)).context("Seeking to partition groups")?;
|
reader.seek(SeekFrom::Start(WII_PART_GROUP_OFF)).context("Seeking to partition groups")?;
|
||||||
let part_groups: [WiiPartGroup; 4] = read_from(reader).context("Reading partition groups")?;
|
let part_groups: [WiiPartGroup; 4] = read_from(reader).context("Reading partition groups")?;
|
||||||
let mut part_info = Vec::new();
|
let mut part_info = Vec::new();
|
||||||
|
@ -306,6 +303,7 @@ fn guess_disc_size(part_info: &[PartitionInfo]) -> u64 {
|
||||||
})
|
})
|
||||||
.max()
|
.max()
|
||||||
.unwrap_or(0x50000);
|
.unwrap_or(0x50000);
|
||||||
|
// TODO add FST offsets (decrypted partitions)
|
||||||
if max_offset <= MINI_DVD_SIZE && !part_info.iter().any(|v| v.kind == PartitionKind::Data) {
|
if max_offset <= MINI_DVD_SIZE && !part_info.iter().any(|v| v.kind == PartitionKind::Data) {
|
||||||
// Datel disc
|
// Datel disc
|
||||||
MINI_DVD_SIZE
|
MINI_DVD_SIZE
|
||||||
|
|
|
@ -27,7 +27,10 @@ use crate::{
|
||||||
DiscHeader, Error, OpenOptions, Result, ResultContext,
|
DiscHeader, Error, OpenOptions, Result, ResultContext,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/// Size in bytes of the hashes block in a Wii disc sector
|
||||||
pub(crate) const HASHES_SIZE: usize = 0x400;
|
pub(crate) const HASHES_SIZE: usize = 0x400;
|
||||||
|
|
||||||
|
/// Size in bytes of the data block in a Wii disc sector (excluding hashes)
|
||||||
pub(crate) const SECTOR_DATA_SIZE: usize = SECTOR_SIZE - HASHES_SIZE; // 0x7C00
|
pub(crate) const SECTOR_DATA_SIZE: usize = SECTOR_SIZE - HASHES_SIZE; // 0x7C00
|
||||||
|
|
||||||
// ppki (Retail)
|
// ppki (Retail)
|
||||||
|
@ -312,12 +315,13 @@ impl PartitionWii {
|
||||||
|
|
||||||
impl Read for PartitionWii {
|
impl Read for PartitionWii {
|
||||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||||
let partition_sector = (self.pos / SECTOR_DATA_SIZE as u64) as u32;
|
let part_sector = (self.pos / SECTOR_DATA_SIZE as u64) as u32;
|
||||||
let sector = self.partition.data_start_sector + partition_sector;
|
let abs_sector = self.partition.data_start_sector + part_sector;
|
||||||
if sector >= self.partition.data_end_sector {
|
if abs_sector >= self.partition.data_end_sector {
|
||||||
return Ok(0);
|
return Ok(0);
|
||||||
}
|
}
|
||||||
let block_idx = (sector as u64 * SECTOR_SIZE as u64 / self.block_buf.len() as u64) as u32;
|
let block_idx =
|
||||||
|
(abs_sector as u64 * SECTOR_SIZE as u64 / self.block_buf.len() as u64) as u32;
|
||||||
|
|
||||||
// Read new block if necessary
|
// Read new block if necessary
|
||||||
if block_idx != self.block_idx {
|
if block_idx != self.block_idx {
|
||||||
|
@ -327,18 +331,17 @@ impl Read for PartitionWii {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Decrypt sector if necessary
|
// Decrypt sector if necessary
|
||||||
if sector != self.sector {
|
if abs_sector != self.sector {
|
||||||
self.block.decrypt(
|
self.block.decrypt(
|
||||||
self.sector_buf.as_mut(),
|
self.sector_buf.as_mut(),
|
||||||
self.block_buf.as_ref(),
|
self.block_buf.as_ref(),
|
||||||
block_idx,
|
abs_sector,
|
||||||
sector,
|
|
||||||
&self.partition,
|
&self.partition,
|
||||||
)?;
|
)?;
|
||||||
if self.verify {
|
if self.verify {
|
||||||
verify_hashes(&self.sector_buf, sector)?;
|
verify_hashes(self.sector_buf.as_ref(), part_sector, self.raw_h3_table.as_ref())?;
|
||||||
}
|
}
|
||||||
self.sector = sector;
|
self.sector = abs_sector;
|
||||||
}
|
}
|
||||||
|
|
||||||
let offset = (self.pos % SECTOR_DATA_SIZE as u64) as usize;
|
let offset = (self.pos % SECTOR_DATA_SIZE as u64) as usize;
|
||||||
|
@ -369,9 +372,9 @@ impl Seek for PartitionWii {
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub(crate) fn as_digest(slice: &[u8; 20]) -> digest::Output<Sha1> { (*slice).into() }
|
pub(crate) fn as_digest(slice: &[u8; 20]) -> digest::Output<Sha1> { (*slice).into() }
|
||||||
|
|
||||||
fn verify_hashes(buf: &[u8; SECTOR_SIZE], sector: u32) -> io::Result<()> {
|
fn verify_hashes(buf: &[u8; SECTOR_SIZE], part_sector: u32, h3_table: &[u8]) -> io::Result<()> {
|
||||||
let (mut group, sub_group) = div_rem(sector as usize, 8);
|
let (cluster, sector) = div_rem(part_sector as usize, 8);
|
||||||
group %= 8;
|
let (group, sub_group) = div_rem(cluster, 8);
|
||||||
|
|
||||||
// H0 hashes
|
// H0 hashes
|
||||||
for i in 0..31 {
|
for i in 0..31 {
|
||||||
|
@ -391,14 +394,14 @@ fn verify_hashes(buf: &[u8; SECTOR_SIZE], sector: u32) -> io::Result<()> {
|
||||||
{
|
{
|
||||||
let mut hash = Sha1::new();
|
let mut hash = Sha1::new();
|
||||||
hash.update(array_ref![buf, 0, 0x26C]);
|
hash.update(array_ref![buf, 0, 0x26C]);
|
||||||
let expected = as_digest(array_ref![buf, 0x280 + sub_group * 20, 20]);
|
let expected = as_digest(array_ref![buf, 0x280 + sector * 20, 20]);
|
||||||
let output = hash.finalize();
|
let output = hash.finalize();
|
||||||
if output != expected {
|
if output != expected {
|
||||||
return Err(io::Error::new(
|
return Err(io::Error::new(
|
||||||
io::ErrorKind::InvalidData,
|
io::ErrorKind::InvalidData,
|
||||||
format!(
|
format!(
|
||||||
"Invalid H1 hash! (subgroup {:?}) {:x}\n\texpected {:x}",
|
"Invalid H1 hash! (subgroup {:?}) {:x}\n\texpected {:x}",
|
||||||
sub_group, output, expected
|
sector, output, expected
|
||||||
),
|
),
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
@ -408,19 +411,33 @@ fn verify_hashes(buf: &[u8; SECTOR_SIZE], sector: u32) -> io::Result<()> {
|
||||||
{
|
{
|
||||||
let mut hash = Sha1::new();
|
let mut hash = Sha1::new();
|
||||||
hash.update(array_ref![buf, 0x280, 0xA0]);
|
hash.update(array_ref![buf, 0x280, 0xA0]);
|
||||||
let expected = as_digest(array_ref![buf, 0x340 + group * 20, 20]);
|
let expected = as_digest(array_ref![buf, 0x340 + sub_group * 20, 20]);
|
||||||
let output = hash.finalize();
|
let output = hash.finalize();
|
||||||
if output != expected {
|
if output != expected {
|
||||||
return Err(io::Error::new(
|
return Err(io::Error::new(
|
||||||
io::ErrorKind::InvalidData,
|
io::ErrorKind::InvalidData,
|
||||||
format!(
|
format!(
|
||||||
"Invalid H2 hash! (group {:?}) {:x}\n\texpected {:x}",
|
"Invalid H2 hash! (group {:?}) {:x}\n\texpected {:x}",
|
||||||
group, output, expected
|
sub_group, output, expected
|
||||||
),
|
),
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// TODO H3 hash
|
|
||||||
|
// H3 hash
|
||||||
|
{
|
||||||
|
let mut hash = Sha1::new();
|
||||||
|
hash.update(array_ref![buf, 0x340, 0xA0]);
|
||||||
|
let expected = as_digest(array_ref![h3_table, group * 20, 20]);
|
||||||
|
let output = hash.finalize();
|
||||||
|
if output != expected {
|
||||||
|
return Err(io::Error::new(
|
||||||
|
io::ErrorKind::InvalidData,
|
||||||
|
format!("Invalid H3 hash! {:x}\n\texpected {:x}", output, expected),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -32,7 +32,7 @@ pub struct Node {
|
||||||
static_assert!(size_of::<Node>() == 12);
|
static_assert!(size_of::<Node>() == 12);
|
||||||
|
|
||||||
impl Node {
|
impl Node {
|
||||||
/// File system node type.
|
/// File system node kind.
|
||||||
pub fn kind(&self) -> NodeKind {
|
pub fn kind(&self) -> NodeKind {
|
||||||
match self.kind {
|
match self.kind {
|
||||||
0 => NodeKind::File,
|
0 => NodeKind::File,
|
||||||
|
@ -71,9 +71,11 @@ impl Node {
|
||||||
pub fn length(&self) -> u64 { self.length.get() as u64 }
|
pub fn length(&self) -> u64 { self.length.get() as u64 }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A view into the file system tree (FST).
|
/// A view into the file system table (FST).
|
||||||
pub struct Fst<'a> {
|
pub struct Fst<'a> {
|
||||||
|
/// The nodes in the FST.
|
||||||
pub nodes: &'a [Node],
|
pub nodes: &'a [Node],
|
||||||
|
/// The string table containing all file and directory names.
|
||||||
pub string_table: &'a [u8],
|
pub string_table: &'a [u8],
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -135,20 +135,32 @@ pub fn open(filename: &Path) -> Result<Box<dyn BlockIO>> {
|
||||||
Ok(io)
|
Ok(io)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Wii partition information.
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct PartitionInfo {
|
pub struct PartitionInfo {
|
||||||
|
/// The partition index.
|
||||||
pub index: usize,
|
pub index: usize,
|
||||||
|
/// The kind of disc partition.
|
||||||
pub kind: PartitionKind,
|
pub kind: PartitionKind,
|
||||||
|
/// The start sector of the partition.
|
||||||
pub start_sector: u32,
|
pub start_sector: u32,
|
||||||
|
/// The start sector of the partition's (encrypted) data.
|
||||||
pub data_start_sector: u32,
|
pub data_start_sector: u32,
|
||||||
|
/// The end sector of the partition's (encrypted) data.
|
||||||
pub data_end_sector: u32,
|
pub data_end_sector: u32,
|
||||||
|
/// The AES key for the partition, also known as the "title key".
|
||||||
pub key: KeyBytes,
|
pub key: KeyBytes,
|
||||||
|
/// The Wii partition header.
|
||||||
pub header: Box<WiiPartitionHeader>,
|
pub header: Box<WiiPartitionHeader>,
|
||||||
|
/// The disc header within the partition.
|
||||||
pub disc_header: Box<DiscHeader>,
|
pub disc_header: Box<DiscHeader>,
|
||||||
|
/// The partition header within the partition.
|
||||||
pub partition_header: Box<PartitionHeader>,
|
pub partition_header: Box<PartitionHeader>,
|
||||||
|
/// The hash table for the partition, if rebuilt.
|
||||||
pub hash_table: Option<HashTable>,
|
pub hash_table: Option<HashTable>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The block kind returned by [`BlockIO::read_block`].
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
||||||
pub enum Block {
|
pub enum Block {
|
||||||
/// Raw data or encrypted Wii partition data
|
/// Raw data or encrypted Wii partition data
|
||||||
|
@ -171,29 +183,28 @@ impl Block {
|
||||||
self,
|
self,
|
||||||
out: &mut [u8; SECTOR_SIZE],
|
out: &mut [u8; SECTOR_SIZE],
|
||||||
data: &[u8],
|
data: &[u8],
|
||||||
block_idx: u32,
|
|
||||||
abs_sector: u32,
|
abs_sector: u32,
|
||||||
partition: &PartitionInfo,
|
partition: &PartitionInfo,
|
||||||
) -> io::Result<()> {
|
) -> io::Result<()> {
|
||||||
let rel_sector = abs_sector - self.start_sector(block_idx, data.len());
|
let part_sector = abs_sector - partition.data_start_sector;
|
||||||
match self {
|
match self {
|
||||||
Block::Raw => {
|
Block::Raw => {
|
||||||
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, rel_sector)?);
|
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, abs_sector)?);
|
||||||
decrypt_sector(out, partition);
|
decrypt_sector(out, partition);
|
||||||
}
|
}
|
||||||
Block::PartDecrypted { has_hashes } => {
|
Block::PartDecrypted { has_hashes } => {
|
||||||
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, rel_sector)?);
|
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, abs_sector)?);
|
||||||
if !has_hashes {
|
if !has_hashes {
|
||||||
rebuild_hash_block(out, abs_sector, partition);
|
rebuild_hash_block(out, part_sector, partition);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Block::Junk => {
|
Block::Junk => {
|
||||||
generate_junk(out, abs_sector, Some(partition), &partition.disc_header);
|
generate_junk(out, part_sector, Some(partition), &partition.disc_header);
|
||||||
rebuild_hash_block(out, abs_sector, partition);
|
rebuild_hash_block(out, part_sector, partition);
|
||||||
}
|
}
|
||||||
Block::Zero => {
|
Block::Zero => {
|
||||||
out.fill(0);
|
out.fill(0);
|
||||||
rebuild_hash_block(out, abs_sector, partition);
|
rebuild_hash_block(out, part_sector, partition);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -204,30 +215,29 @@ impl Block {
|
||||||
self,
|
self,
|
||||||
out: &mut [u8; SECTOR_SIZE],
|
out: &mut [u8; SECTOR_SIZE],
|
||||||
data: &[u8],
|
data: &[u8],
|
||||||
block_idx: u32,
|
|
||||||
abs_sector: u32,
|
abs_sector: u32,
|
||||||
partition: &PartitionInfo,
|
partition: &PartitionInfo,
|
||||||
) -> io::Result<()> {
|
) -> io::Result<()> {
|
||||||
let rel_sector = abs_sector - self.start_sector(block_idx, data.len());
|
let part_sector = abs_sector - partition.data_start_sector;
|
||||||
match self {
|
match self {
|
||||||
Block::Raw => {
|
Block::Raw => {
|
||||||
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, rel_sector)?);
|
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, abs_sector)?);
|
||||||
}
|
}
|
||||||
Block::PartDecrypted { has_hashes } => {
|
Block::PartDecrypted { has_hashes } => {
|
||||||
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, rel_sector)?);
|
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, abs_sector)?);
|
||||||
if !has_hashes {
|
if !has_hashes {
|
||||||
rebuild_hash_block(out, abs_sector, partition);
|
rebuild_hash_block(out, part_sector, partition);
|
||||||
}
|
}
|
||||||
encrypt_sector(out, partition);
|
encrypt_sector(out, partition);
|
||||||
}
|
}
|
||||||
Block::Junk => {
|
Block::Junk => {
|
||||||
generate_junk(out, abs_sector, Some(partition), &partition.disc_header);
|
generate_junk(out, part_sector, Some(partition), &partition.disc_header);
|
||||||
rebuild_hash_block(out, abs_sector, partition);
|
rebuild_hash_block(out, part_sector, partition);
|
||||||
encrypt_sector(out, partition);
|
encrypt_sector(out, partition);
|
||||||
}
|
}
|
||||||
Block::Zero => {
|
Block::Zero => {
|
||||||
out.fill(0);
|
out.fill(0);
|
||||||
rebuild_hash_block(out, abs_sector, partition);
|
rebuild_hash_block(out, part_sector, partition);
|
||||||
encrypt_sector(out, partition);
|
encrypt_sector(out, partition);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -239,16 +249,12 @@ impl Block {
|
||||||
self,
|
self,
|
||||||
out: &mut [u8; SECTOR_SIZE],
|
out: &mut [u8; SECTOR_SIZE],
|
||||||
data: &[u8],
|
data: &[u8],
|
||||||
block_idx: u32,
|
|
||||||
abs_sector: u32,
|
abs_sector: u32,
|
||||||
disc_header: &DiscHeader,
|
disc_header: &DiscHeader,
|
||||||
) -> io::Result<()> {
|
) -> io::Result<()> {
|
||||||
match self {
|
match self {
|
||||||
Block::Raw => {
|
Block::Raw => {
|
||||||
out.copy_from_slice(block_sector::<SECTOR_SIZE>(
|
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, abs_sector)?);
|
||||||
data,
|
|
||||||
abs_sector - self.start_sector(block_idx, data.len()),
|
|
||||||
)?);
|
|
||||||
}
|
}
|
||||||
Block::PartDecrypted { .. } => {
|
Block::PartDecrypted { .. } => {
|
||||||
return Err(io::Error::new(
|
return Err(io::Error::new(
|
||||||
|
@ -261,11 +267,6 @@ impl Block {
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the start sector of the block.
|
|
||||||
fn start_sector(&self, index: u32, block_size: usize) -> u32 {
|
|
||||||
(index as u64 * block_size as u64 / SECTOR_SIZE as u64) as u32
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
|
@ -276,14 +277,15 @@ fn block_sector<const N: usize>(data: &[u8], sector_idx: u32) -> io::Result<&[u8
|
||||||
format!("Expected block size {} to be a multiple of {}", data.len(), N),
|
format!("Expected block size {} to be a multiple of {}", data.len(), N),
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
let offset = sector_idx as usize * N;
|
let rel_sector = sector_idx % (data.len() / N) as u32;
|
||||||
|
let offset = rel_sector as usize * N;
|
||||||
data.get(offset..offset + N)
|
data.get(offset..offset + N)
|
||||||
.ok_or_else(|| {
|
.ok_or_else(|| {
|
||||||
io::Error::new(
|
io::Error::new(
|
||||||
io::ErrorKind::InvalidData,
|
io::ErrorKind::InvalidData,
|
||||||
format!(
|
format!(
|
||||||
"Sector {} out of range (block size {}, sector size {})",
|
"Sector {} out of range (block size {}, sector size {})",
|
||||||
sector_idx,
|
rel_sector,
|
||||||
data.len(),
|
data.len(),
|
||||||
N
|
N
|
||||||
),
|
),
|
||||||
|
@ -298,12 +300,11 @@ fn generate_junk(
|
||||||
partition: Option<&PartitionInfo>,
|
partition: Option<&PartitionInfo>,
|
||||||
disc_header: &DiscHeader,
|
disc_header: &DiscHeader,
|
||||||
) {
|
) {
|
||||||
let mut pos = if let Some(partition) = partition {
|
let (mut pos, mut offset) = if partition.is_some() {
|
||||||
(sector - partition.data_start_sector) as u64 * SECTOR_DATA_SIZE as u64
|
(sector as u64 * SECTOR_DATA_SIZE as u64, HASHES_SIZE)
|
||||||
} else {
|
} else {
|
||||||
sector as u64 * SECTOR_SIZE as u64
|
(sector as u64 * SECTOR_SIZE as u64, 0)
|
||||||
};
|
};
|
||||||
let mut offset = if partition.is_some() { HASHES_SIZE } else { 0 };
|
|
||||||
out[..offset].fill(0);
|
out[..offset].fill(0);
|
||||||
while offset < SECTOR_SIZE {
|
while offset < SECTOR_SIZE {
|
||||||
// The LFG spans a single sector of the decrypted data,
|
// The LFG spans a single sector of the decrypted data,
|
||||||
|
@ -318,11 +319,11 @@ fn generate_junk(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn rebuild_hash_block(out: &mut [u8; SECTOR_SIZE], sector: u32, partition: &PartitionInfo) {
|
fn rebuild_hash_block(out: &mut [u8; SECTOR_SIZE], part_sector: u32, partition: &PartitionInfo) {
|
||||||
let Some(hash_table) = partition.hash_table.as_ref() else {
|
let Some(hash_table) = partition.hash_table.as_ref() else {
|
||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
let sector_idx = (sector - partition.data_start_sector) as usize;
|
let sector_idx = part_sector as usize;
|
||||||
let h0_hashes: &[u8; 0x26C] =
|
let h0_hashes: &[u8; 0x26C] =
|
||||||
transmute_ref!(array_ref![hash_table.h0_hashes, sector_idx * 31, 31]);
|
transmute_ref!(array_ref![hash_table.h0_hashes, sector_idx * 31, 31]);
|
||||||
out[0..0x26C].copy_from_slice(h0_hashes);
|
out[0..0x26C].copy_from_slice(h0_hashes);
|
||||||
|
|
|
@ -80,8 +80,6 @@ impl DiscIOCISO {
|
||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
// Reset reader
|
|
||||||
inner.reset();
|
|
||||||
Ok(Box::new(Self { inner, header, block_map, nkit_header }))
|
Ok(Box::new(Self { inner, header, block_map, nkit_header }))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -78,9 +78,6 @@ impl DiscIOGCZ {
|
||||||
|
|
||||||
// header + block_count * (u64 + u32)
|
// header + block_count * (u64 + u32)
|
||||||
let data_offset = size_of::<GCZHeader>() as u64 + block_count as u64 * 12;
|
let data_offset = size_of::<GCZHeader>() as u64 + block_count as u64 * 12;
|
||||||
|
|
||||||
// Reset reader
|
|
||||||
inner.reset();
|
|
||||||
let block_buf = <u8>::new_box_slice_zeroed(header.block_size.get() as usize);
|
let block_buf = <u8>::new_box_slice_zeroed(header.block_size.get() as usize);
|
||||||
Ok(Box::new(Self { inner, header, block_map, block_hashes, block_buf, data_offset }))
|
Ok(Box::new(Self { inner, header, block_map, block_hashes, block_buf, data_offset }))
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,12 +22,13 @@ pub(crate) type KeyBytes = [u8; 16];
|
||||||
/// Magic bytes
|
/// Magic bytes
|
||||||
pub(crate) type MagicBytes = [u8; 4];
|
pub(crate) type MagicBytes = [u8; 4];
|
||||||
|
|
||||||
|
/// The disc file format.
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
||||||
pub enum Format {
|
pub enum Format {
|
||||||
/// Raw ISO
|
/// ISO / GCM (GameCube master disc)
|
||||||
#[default]
|
#[default]
|
||||||
Iso,
|
Iso,
|
||||||
/// CISO
|
/// CISO (Compact ISO)
|
||||||
Ciso,
|
Ciso,
|
||||||
/// GCZ
|
/// GCZ
|
||||||
Gcz,
|
Gcz,
|
||||||
|
@ -55,6 +56,7 @@ impl fmt::Display for Format {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The disc file format's compression algorithm.
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
||||||
pub enum Compression {
|
pub enum Compression {
|
||||||
/// No compression
|
/// No compression
|
||||||
|
|
|
@ -100,11 +100,6 @@ impl SplitFileReader {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn reset(&mut self) {
|
|
||||||
self.open_file = None;
|
|
||||||
self.pos = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn len(&self) -> u64 { self.files.last().map_or(0, |f| f.begin + f.size) }
|
pub fn len(&self) -> u64 { self.files.last().map_or(0, |f| f.begin + f.size) }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -102,8 +102,6 @@ impl DiscIOWBFS {
|
||||||
inner.seek(SeekFrom::Start(0x10000)).context("Seeking to NKit header")?;
|
inner.seek(SeekFrom::Start(0x10000)).context("Seeking to NKit header")?;
|
||||||
let nkit_header = NKitHeader::try_read_from(&mut inner, header.block_size(), true);
|
let nkit_header = NKitHeader::try_read_from(&mut inner, header.block_size(), true);
|
||||||
|
|
||||||
// Reset reader
|
|
||||||
inner.reset();
|
|
||||||
Ok(Box::new(Self { inner, header, block_map, nkit_header }))
|
Ok(Box::new(Self { inner, header, block_map, nkit_header }))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -91,16 +91,16 @@ impl WIAFileHeader {
|
||||||
pub fn is_rvz(&self) -> bool { self.magic == RVZ_MAGIC }
|
pub fn is_rvz(&self) -> bool { self.magic == RVZ_MAGIC }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Disc type
|
/// Disc kind
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||||
pub enum DiscType {
|
pub enum DiscKind {
|
||||||
/// GameCube disc
|
/// GameCube disc
|
||||||
GameCube,
|
GameCube,
|
||||||
/// Wii disc
|
/// Wii disc
|
||||||
Wii,
|
Wii,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TryFrom<u32> for DiscType {
|
impl TryFrom<u32> for DiscKind {
|
||||||
type Error = Error;
|
type Error = Error;
|
||||||
|
|
||||||
fn try_from(value: u32) -> Result<Self> {
|
fn try_from(value: u32) -> Result<Self> {
|
||||||
|
@ -225,11 +225,11 @@ static_assert!(size_of::<WIADisc>() == 0xDC);
|
||||||
|
|
||||||
impl WIADisc {
|
impl WIADisc {
|
||||||
pub fn validate(&self) -> Result<()> {
|
pub fn validate(&self) -> Result<()> {
|
||||||
DiscType::try_from(self.disc_type.get())?;
|
DiscKind::try_from(self.disc_type.get())?;
|
||||||
WIACompression::try_from(self.compression.get())?;
|
WIACompression::try_from(self.compression.get())?;
|
||||||
if self.partition_type_size.get() != size_of::<WIAPartition>() as u32 {
|
if self.partition_type_size.get() != size_of::<WIAPartition>() as u32 {
|
||||||
return Err(Error::DiscFormat(format!(
|
return Err(Error::DiscFormat(format!(
|
||||||
"WIA partition type size is {}, expected {}",
|
"WIA/RVZ partition type size is {}, expected {}",
|
||||||
self.partition_type_size.get(),
|
self.partition_type_size.get(),
|
||||||
size_of::<WIAPartition>()
|
size_of::<WIAPartition>()
|
||||||
)));
|
)));
|
||||||
|
@ -518,12 +518,12 @@ pub struct DiscIOWIA {
|
||||||
impl Clone for DiscIOWIA {
|
impl Clone for DiscIOWIA {
|
||||||
fn clone(&self) -> Self {
|
fn clone(&self) -> Self {
|
||||||
Self {
|
Self {
|
||||||
|
inner: self.inner.clone(),
|
||||||
header: self.header.clone(),
|
header: self.header.clone(),
|
||||||
disc: self.disc.clone(),
|
disc: self.disc.clone(),
|
||||||
partitions: self.partitions.clone(),
|
partitions: self.partitions.clone(),
|
||||||
raw_data: self.raw_data.clone(),
|
raw_data: self.raw_data.clone(),
|
||||||
groups: self.groups.clone(),
|
groups: self.groups.clone(),
|
||||||
inner: self.inner.clone(),
|
|
||||||
nkit_header: self.nkit_header.clone(),
|
nkit_header: self.nkit_header.clone(),
|
||||||
decompressor: self.decompressor.clone(),
|
decompressor: self.decompressor.clone(),
|
||||||
group: u32::MAX,
|
group: u32::MAX,
|
||||||
|
@ -541,7 +541,7 @@ fn verify_hash(buf: &[u8], expected: &HashBytes) -> Result<()> {
|
||||||
let mut expected_bytes = [0u8; 40];
|
let mut expected_bytes = [0u8; 40];
|
||||||
let expected = base16ct::lower::encode_str(expected, &mut expected_bytes).unwrap(); // Safe: fixed buffer size
|
let expected = base16ct::lower::encode_str(expected, &mut expected_bytes).unwrap(); // Safe: fixed buffer size
|
||||||
return Err(Error::DiscFormat(format!(
|
return Err(Error::DiscFormat(format!(
|
||||||
"WIA hash mismatch: {}, expected {}",
|
"WIA/RVZ hash mismatch: {}, expected {}",
|
||||||
got, expected
|
got, expected
|
||||||
)));
|
)));
|
||||||
}
|
}
|
||||||
|
@ -685,18 +685,10 @@ impl BlockIO for DiscIOWIA {
|
||||||
sector: u32,
|
sector: u32,
|
||||||
partition: Option<&PartitionInfo>,
|
partition: Option<&PartitionInfo>,
|
||||||
) -> io::Result<Block> {
|
) -> io::Result<Block> {
|
||||||
let mut chunk_size = self.disc.chunk_size.get();
|
let chunk_size = self.disc.chunk_size.get();
|
||||||
let sectors_per_chunk = chunk_size / SECTOR_SIZE as u32;
|
let sectors_per_chunk = chunk_size / SECTOR_SIZE as u32;
|
||||||
let disc_offset = sector as u64 * SECTOR_SIZE as u64;
|
|
||||||
let mut partition_offset = disc_offset;
|
|
||||||
if let Some(partition) = partition {
|
|
||||||
// Within a partition, hashes are excluded from the data size
|
|
||||||
chunk_size = (chunk_size * SECTOR_DATA_SIZE as u32) / SECTOR_SIZE as u32;
|
|
||||||
partition_offset =
|
|
||||||
(sector - partition.data_start_sector) as u64 * SECTOR_DATA_SIZE as u64;
|
|
||||||
}
|
|
||||||
|
|
||||||
let (group_index, group_sector) = if let Some(partition) = partition {
|
let (group_index, group_sector, partition_offset) = if let Some(partition) = partition {
|
||||||
// Find the partition
|
// Find the partition
|
||||||
let Some(wia_part) = self.partitions.get(partition.index) else {
|
let Some(wia_part) = self.partitions.get(partition.index) else {
|
||||||
return Err(io::Error::new(
|
return Err(io::Error::new(
|
||||||
|
@ -747,7 +739,12 @@ impl BlockIO for DiscIOWIA {
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
(pd.group_index.get() + part_group_index, part_group_sector)
|
// Calculate the group offset within the partition
|
||||||
|
let part_group_offset =
|
||||||
|
(((part_group_index * sectors_per_chunk) + pd.first_sector.get())
|
||||||
|
- wia_part.partition_data[0].first_sector.get()) as u64
|
||||||
|
* SECTOR_DATA_SIZE as u64;
|
||||||
|
(pd.group_index.get() + part_group_index, part_group_sector, part_group_offset)
|
||||||
} else {
|
} else {
|
||||||
let Some(rd) = self.raw_data.iter().find(|d| d.contains(sector)) else {
|
let Some(rd) = self.raw_data.iter().find(|d| d.contains(sector)) else {
|
||||||
return Err(io::Error::new(
|
return Err(io::Error::new(
|
||||||
|
@ -771,7 +768,9 @@ impl BlockIO for DiscIOWIA {
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
(rd.group_index.get() + group_index, group_sector)
|
// Calculate the group offset
|
||||||
|
let group_offset = rd.raw_data_offset.get() + (group_index * chunk_size) as u64;
|
||||||
|
(rd.group_index.get() + group_index, group_sector, group_offset)
|
||||||
};
|
};
|
||||||
|
|
||||||
// Fetch the group
|
// Fetch the group
|
||||||
|
@ -790,7 +789,13 @@ impl BlockIO for DiscIOWIA {
|
||||||
|
|
||||||
// Read group data if necessary
|
// Read group data if necessary
|
||||||
if group_index != self.group {
|
if group_index != self.group {
|
||||||
self.group_data = Vec::with_capacity(chunk_size as usize);
|
let group_data_size = if partition.is_some() {
|
||||||
|
// Within a partition, hashes are excluded from the data size
|
||||||
|
(sectors_per_chunk * SECTOR_DATA_SIZE as u32) as usize
|
||||||
|
} else {
|
||||||
|
chunk_size as usize
|
||||||
|
};
|
||||||
|
self.group_data = Vec::with_capacity(group_data_size);
|
||||||
let group_data_start = group.data_offset.get() as u64 * 4;
|
let group_data_start = group.data_offset.get() as u64 * 4;
|
||||||
self.inner.seek(SeekFrom::Start(group_data_start))?;
|
self.inner.seek(SeekFrom::Start(group_data_start))?;
|
||||||
|
|
||||||
|
@ -864,10 +869,10 @@ impl BlockIO for DiscIOWIA {
|
||||||
// Read sector from cached group data
|
// Read sector from cached group data
|
||||||
if partition.is_some() {
|
if partition.is_some() {
|
||||||
let sector_data_start = group_sector as usize * SECTOR_DATA_SIZE;
|
let sector_data_start = group_sector as usize * SECTOR_DATA_SIZE;
|
||||||
let sector_data =
|
|
||||||
&self.group_data[sector_data_start..sector_data_start + SECTOR_DATA_SIZE];
|
|
||||||
out[..HASHES_SIZE].fill(0);
|
out[..HASHES_SIZE].fill(0);
|
||||||
out[HASHES_SIZE..SECTOR_SIZE].copy_from_slice(sector_data);
|
out[HASHES_SIZE..SECTOR_SIZE].copy_from_slice(
|
||||||
|
&self.group_data[sector_data_start..sector_data_start + SECTOR_DATA_SIZE],
|
||||||
|
);
|
||||||
Ok(Block::PartDecrypted { has_hashes: false })
|
Ok(Block::PartDecrypted { has_hashes: false })
|
||||||
} else {
|
} else {
|
||||||
let sector_data_start = group_sector as usize * SECTOR_SIZE;
|
let sector_data_start = group_sector as usize * SECTOR_SIZE;
|
||||||
|
|
|
@ -1,40 +1,62 @@
|
||||||
// #![warn(missing_docs, rustdoc::missing_doc_code_examples)]
|
#![warn(missing_docs)]
|
||||||
//! Library for traversing & reading GameCube and Wii disc images.
|
//! Library for traversing & reading Nintendo Optical Disc (GameCube and Wii) images.
|
||||||
//!
|
//!
|
||||||
//! Based on the C++ library [nod](https://github.com/AxioDL/nod),
|
//! Originally based on the C++ library [nod](https://github.com/AxioDL/nod),
|
||||||
//! but does not currently support authoring.
|
//! but does not currently support authoring.
|
||||||
//!
|
//!
|
||||||
//! Currently supported file formats:
|
//! Currently supported file formats:
|
||||||
//! - ISO (GCM)
|
//! - ISO (GCM)
|
||||||
//! - WIA / RVZ
|
//! - WIA / RVZ
|
||||||
//! - WBFS
|
//! - WBFS (+ NKit 2 lossless)
|
||||||
//! - NFS (Wii U VC files, e.g. `hif_000000.nfs`)
|
//! - CISO (+ NKit 2 lossless)
|
||||||
|
//! - NFS (Wii U VC)
|
||||||
|
//! - GCZ
|
||||||
//!
|
//!
|
||||||
//! # Examples
|
//! # Examples
|
||||||
//!
|
//!
|
||||||
//! Opening a disc image and reading a file:
|
//! Opening a disc image and reading a file:
|
||||||
|
//!
|
||||||
//! ```no_run
|
//! ```no_run
|
||||||
//! use std::io::Read;
|
//! use std::io::Read;
|
||||||
//!
|
//!
|
||||||
//! use nod::{Disc, PartitionKind};
|
//! // Open a disc image and the first data partition.
|
||||||
|
//! let disc = nod::Disc::new("path/to/file.iso")
|
||||||
|
//! .expect("Failed to open disc");
|
||||||
|
//! let mut partition = disc.open_partition_kind(nod::PartitionKind::Data)
|
||||||
|
//! .expect("Failed to open data partition");
|
||||||
//!
|
//!
|
||||||
//! fn main() -> nod::Result<()> {
|
//! // Read partition metadata and the file system table.
|
||||||
//! let disc = Disc::new("path/to/file.iso")?;
|
//! let meta = partition.meta()
|
||||||
//! let mut partition = disc.open_partition_kind(PartitionKind::Data)?;
|
//! .expect("Failed to read partition metadata");
|
||||||
//! let meta = partition.meta()?;
|
//! let fst = meta.fst()
|
||||||
//! let fst = meta.fst()?;
|
//! .expect("File system table is invalid");
|
||||||
//! if let Some((_, node)) = fst.find("/MP3/Worlds.txt") {
|
//!
|
||||||
//! let mut s = String::new();
|
//! // Find a file by path and read it into a string.
|
||||||
//! partition
|
//! if let Some((_, node)) = fst.find("/MP3/Worlds.txt") {
|
||||||
//! .open_file(node)
|
//! let mut s = String::new();
|
||||||
//! .expect("Failed to open file stream")
|
//! partition
|
||||||
//! .read_to_string(&mut s)
|
//! .open_file(node)
|
||||||
//! .expect("Failed to read file");
|
//! .expect("Failed to open file stream")
|
||||||
//! println!("{}", s);
|
//! .read_to_string(&mut s)
|
||||||
//! }
|
//! .expect("Failed to read file");
|
||||||
//! Ok(())
|
//! println!("{}", s);
|
||||||
//! }
|
//! }
|
||||||
//! ```
|
//! ```
|
||||||
|
//!
|
||||||
|
//! Converting a disc image to raw ISO:
|
||||||
|
//!
|
||||||
|
//! ```no_run
|
||||||
|
//! // Enable `rebuild_encryption` to ensure the output is a valid ISO.
|
||||||
|
//! let options = nod::OpenOptions { rebuild_encryption: true, ..Default::default() };
|
||||||
|
//! let mut disc = nod::Disc::new_with_options("path/to/file.rvz", &options)
|
||||||
|
//! .expect("Failed to open disc");
|
||||||
|
//!
|
||||||
|
//! // Read directly from the open disc and write to the output file.
|
||||||
|
//! let mut out = std::fs::File::create("output.iso")
|
||||||
|
//! .expect("Failed to create output file");
|
||||||
|
//! std::io::copy(&mut disc, &mut out)
|
||||||
|
//! .expect("Failed to write data");
|
||||||
|
//! ```
|
||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
io::{Read, Seek},
|
io::{Read, Seek},
|
||||||
|
@ -42,7 +64,7 @@ use std::{
|
||||||
};
|
};
|
||||||
|
|
||||||
pub use disc::{
|
pub use disc::{
|
||||||
AppLoaderHeader, DiscHeader, DolHeader, PartitionBase, PartitionHeader, PartitionKind,
|
ApploaderHeader, DiscHeader, DolHeader, PartitionBase, PartitionHeader, PartitionKind,
|
||||||
PartitionMeta, BI2_SIZE, BOOT_SIZE, SECTOR_SIZE,
|
PartitionMeta, BI2_SIZE, BOOT_SIZE, SECTOR_SIZE,
|
||||||
};
|
};
|
||||||
pub use fst::{Fst, Node, NodeKind};
|
pub use fst::{Fst, Node, NodeKind};
|
||||||
|
@ -80,7 +102,9 @@ impl From<String> for Error {
|
||||||
/// Helper result type for [`Error`].
|
/// Helper result type for [`Error`].
|
||||||
pub type Result<T, E = Error> = core::result::Result<T, E>;
|
pub type Result<T, E = Error> = core::result::Result<T, E>;
|
||||||
|
|
||||||
|
/// Helper trait for adding context to errors.
|
||||||
pub trait ErrorContext {
|
pub trait ErrorContext {
|
||||||
|
/// Adds context to an error.
|
||||||
fn context(self, context: impl Into<String>) -> Error;
|
fn context(self, context: impl Into<String>) -> Error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -88,9 +112,12 @@ impl ErrorContext for std::io::Error {
|
||||||
fn context(self, context: impl Into<String>) -> Error { Error::Io(context.into(), self) }
|
fn context(self, context: impl Into<String>) -> Error { Error::Io(context.into(), self) }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Helper trait for adding context to result errors.
|
||||||
pub trait ResultContext<T> {
|
pub trait ResultContext<T> {
|
||||||
|
/// Adds context to a result error.
|
||||||
fn context(self, context: impl Into<String>) -> Result<T>;
|
fn context(self, context: impl Into<String>) -> Result<T>;
|
||||||
|
|
||||||
|
/// Adds context to a result error using a closure.
|
||||||
fn with_context<F>(self, f: F) -> Result<T>
|
fn with_context<F>(self, f: F) -> Result<T>
|
||||||
where F: FnOnce() -> String;
|
where F: FnOnce() -> String;
|
||||||
}
|
}
|
||||||
|
@ -108,6 +135,7 @@ where E: ErrorContext
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Options for opening a disc image.
|
||||||
#[derive(Default, Debug, Clone)]
|
#[derive(Default, Debug, Clone)]
|
||||||
pub struct OpenOptions {
|
pub struct OpenOptions {
|
||||||
/// Wii: Rebuild partition data encryption and hashes if the underlying format stores data
|
/// Wii: Rebuild partition data encryption and hashes if the underlying format stores data
|
||||||
|
@ -117,6 +145,9 @@ pub struct OpenOptions {
|
||||||
pub validate_hashes: bool,
|
pub validate_hashes: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// An open disc image and read stream.
|
||||||
|
///
|
||||||
|
/// This is the primary entry point for reading disc images.
|
||||||
pub struct Disc {
|
pub struct Disc {
|
||||||
reader: disc::reader::DiscReader,
|
reader: disc::reader::DiscReader,
|
||||||
options: OpenOptions,
|
options: OpenOptions,
|
||||||
|
@ -135,7 +166,7 @@ impl Disc {
|
||||||
Ok(Disc { reader, options: options.clone() })
|
Ok(Disc { reader, options: options.clone() })
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The disc's header.
|
/// The disc's primary header.
|
||||||
pub fn header(&self) -> &DiscHeader { self.reader.header() }
|
pub fn header(&self) -> &DiscHeader { self.reader.header() }
|
||||||
|
|
||||||
/// Returns extra metadata included in the disc file format, if any.
|
/// Returns extra metadata included in the disc file format, if any.
|
||||||
|
@ -146,20 +177,20 @@ impl Disc {
|
||||||
|
|
||||||
/// A list of Wii partitions on the disc.
|
/// A list of Wii partitions on the disc.
|
||||||
///
|
///
|
||||||
/// For GameCube discs, this will return an empty slice.
|
/// **GameCube**: This will return an empty slice.
|
||||||
pub fn partitions(&self) -> &[PartitionInfo] { self.reader.partitions() }
|
pub fn partitions(&self) -> &[PartitionInfo] { self.reader.partitions() }
|
||||||
|
|
||||||
/// Opens a new, decrypted partition read stream for the specified partition index.
|
/// Opens a decrypted partition read stream for the specified partition index.
|
||||||
///
|
///
|
||||||
/// For GameCube discs, the index must always be 0.
|
/// **GameCube**: `index` must always be 0.
|
||||||
pub fn open_partition(&self, index: usize) -> Result<Box<dyn PartitionBase>> {
|
pub fn open_partition(&self, index: usize) -> Result<Box<dyn PartitionBase>> {
|
||||||
self.reader.open_partition(index, &self.options)
|
self.reader.open_partition(index, &self.options)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Opens a new partition read stream for the first partition matching
|
/// Opens a decrypted partition read stream for the first partition matching
|
||||||
/// the specified type.
|
/// the specified kind.
|
||||||
///
|
///
|
||||||
/// For GameCube discs, the kind must always be `PartitionKind::Data`.
|
/// **GameCube**: `kind` must always be [`PartitionKind::Data`].
|
||||||
pub fn open_partition_kind(&self, kind: PartitionKind) -> Result<Box<dyn PartitionBase>> {
|
pub fn open_partition_kind(&self, kind: PartitionKind) -> Result<Box<dyn PartitionBase>> {
|
||||||
self.reader.open_partition_kind(kind, &self.options)
|
self.reader.open_partition_kind(kind, &self.options)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "nodtool"
|
name = "nodtool"
|
||||||
version = "1.0.0"
|
version = "1.1.0"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.73.0"
|
rust-version = "1.73.0"
|
||||||
authors = ["Luke Street <luke@street.dev>"]
|
authors = ["Luke Street <luke@street.dev>"]
|
||||||
|
|
|
@ -6,4 +6,3 @@ reorder_impl_items = true
|
||||||
use_field_init_shorthand = true
|
use_field_init_shorthand = true
|
||||||
use_small_heuristics = "Max"
|
use_small_heuristics = "Max"
|
||||||
where_single_line = true
|
where_single_line = true
|
||||||
format_code_in_doc_comments = true
|
|
||||||
|
|
Loading…
Reference in New Issue