Compare commits

...

4 Commits
v1.0.0 ... main

30 changed files with 946 additions and 819 deletions

4
Cargo.lock generated
View File

@ -411,7 +411,7 @@ dependencies = [
[[package]]
name = "nod"
version = "1.0.0"
version = "1.2.0"
dependencies = [
"adler",
"aes",
@ -434,7 +434,7 @@ dependencies = [
[[package]]
name = "nodtool"
version = "1.0.0"
version = "1.2.0"
dependencies = [
"argp",
"base16ct",

View File

@ -70,26 +70,45 @@ Opening a disc image and reading a file:
```rust
use std::io::Read;
use nod::{Disc, PartitionKind};
// Open a disc image and the first data partition.
let disc = nod::Disc::new("path/to/file.iso")
.expect("Failed to open disc");
let mut partition = disc.open_partition_kind(nod::PartitionKind::Data)
.expect("Failed to open data partition");
fn main() -> nod::Result<()> {
let disc = Disc::new("path/to/file.iso")?;
let mut partition = disc.open_partition_kind(PartitionKind::Data)?;
let meta = partition.meta()?;
let fst = meta.fst()?;
if let Some((_, node)) = fst.find("/MP3/Worlds.txt") {
let mut s = String::new();
partition
.open_file(node)
.expect("Failed to open file stream")
.read_to_string(&mut s)
.expect("Failed to read file");
println!("{}", s);
}
Ok(())
// Read partition metadata and the file system table.
let meta = partition.meta()
.expect("Failed to read partition metadata");
let fst = meta.fst()
.expect("File system table is invalid");
// Find a file by path and read it into a string.
if let Some((_, node)) = fst.find("/MP3/Worlds.txt") {
let mut s = String::new();
partition
.open_file(node)
.expect("Failed to open file stream")
.read_to_string(&mut s)
.expect("Failed to read file");
println!("{}", s);
}
```
Converting a disc image to raw ISO:
```rust
// Enable `rebuild_encryption` to ensure the output is a valid ISO.
let options = nod::OpenOptions { rebuild_encryption: true, ..Default::default() };
let mut disc = nod::Disc::new_with_options("path/to/file.rvz", &options)
.expect("Failed to open disc");
// Read directly from the open disc and write to the output file.
let mut out = std::fs::File::create("output.iso")
.expect("Failed to create output file");
std::io::copy(&mut disc, &mut out)
.expect("Failed to write data");
```
## License
Licensed under either of

View File

@ -1,6 +1,6 @@
[package]
name = "nod"
version = "1.0.0"
version = "1.2.0"
edition = "2021"
rust-version = "1.73.0"
authors = ["Luke Street <luke@street.dev>"]

View File

@ -9,7 +9,7 @@ use zerocopy::{FromBytes, FromZeroes};
use crate::{
disc::{
AppLoaderHeader, DiscHeader, DolHeader, PartitionBase, PartitionHeader, PartitionMeta,
ApploaderHeader, DiscHeader, DolHeader, PartitionBase, PartitionHeader, PartitionMeta,
BI2_SIZE, BOOT_SIZE, SECTOR_SIZE,
},
fst::{Node, NodeKind},
@ -79,7 +79,6 @@ impl Read for PartitionGC {
self.block.copy_raw(
self.sector_buf.as_mut(),
self.block_buf.as_ref(),
block_idx,
sector,
&self.disc_header,
)?;
@ -137,33 +136,34 @@ pub(crate) fn read_part_meta(
// apploader.bin
let mut raw_apploader: Vec<u8> =
read_vec(reader, size_of::<AppLoaderHeader>()).context("Reading apploader header")?;
let apploader_header = AppLoaderHeader::ref_from(raw_apploader.as_slice()).unwrap();
read_vec(reader, size_of::<ApploaderHeader>()).context("Reading apploader header")?;
let apploader_header = ApploaderHeader::ref_from(raw_apploader.as_slice()).unwrap();
raw_apploader.resize(
size_of::<AppLoaderHeader>()
size_of::<ApploaderHeader>()
+ apploader_header.size.get() as usize
+ apploader_header.trailer_size.get() as usize,
0,
);
reader
.read_exact(&mut raw_apploader[size_of::<AppLoaderHeader>()..])
.read_exact(&mut raw_apploader[size_of::<ApploaderHeader>()..])
.context("Reading apploader")?;
// fst.bin
reader
.seek(SeekFrom::Start(partition_header.fst_off(is_wii)))
.seek(SeekFrom::Start(partition_header.fst_offset(is_wii)))
.context("Seeking to FST offset")?;
let raw_fst: Box<[u8]> = read_box_slice(reader, partition_header.fst_sz(is_wii) as usize)
let raw_fst: Box<[u8]> = read_box_slice(reader, partition_header.fst_size(is_wii) as usize)
.with_context(|| {
format!(
"Reading partition FST (offset {}, size {})",
partition_header.fst_off, partition_header.fst_sz
partition_header.fst_offset(is_wii),
partition_header.fst_size(is_wii)
)
})?;
// main.dol
reader
.seek(SeekFrom::Start(partition_header.dol_off(is_wii)))
.seek(SeekFrom::Start(partition_header.dol_offset(is_wii)))
.context("Seeking to DOL offset")?;
let mut raw_dol: Vec<u8> =
read_vec(reader, size_of::<DolHeader>()).context("Reading DOL header")?;

View File

@ -82,10 +82,6 @@ pub fn rebuild_hashes(reader: &mut DiscReader) -> Result<()> {
// Precompute hashes for zeroed sectors.
const ZERO_H0_BYTES: &[u8] = &[0u8; HASHES_SIZE];
let zero_h0_hash = hash_bytes(ZERO_H0_BYTES);
let mut zero_h1_hash = Sha1::new();
for _ in 0..NUM_H0_HASHES {
zero_h1_hash.update(zero_h0_hash);
}
let partitions = reader.partitions();
let mut hash_tables = Vec::with_capacity(partitions.len());
@ -171,6 +167,7 @@ pub fn rebuild_hashes(reader: &mut DiscReader) -> Result<()> {
.context("Seeking to H3 table")?;
let h3_table: Box<[HashBytes]> =
read_box_slice(reader, hash_table.h3_hashes.len()).context("Reading H3 table")?;
let mut mismatches = 0;
for (idx, (expected_hash, h3_hash)) in
h3_table.iter().zip(hash_table.h3_hashes.iter()).enumerate()
{
@ -180,12 +177,16 @@ pub fn rebuild_hashes(reader: &mut DiscReader) -> Result<()> {
let mut expected_bytes = [0u8; 40];
let expected =
base16ct::lower::encode_str(expected_hash, &mut expected_bytes).unwrap();
log::warn!(
log::debug!(
"Partition {} H3 table does not match:\n\tindex {}\n\texpected: {}\n\tgot: {}",
part.index, idx, expected, got
);
mismatches += 1;
}
}
if mismatches > 0 {
log::warn!("Partition {} H3 table has {} hash mismatches", part.index, mismatches);
}
}
for (part, hash_table) in reader.partitions.iter_mut().zip(hash_tables) {

View File

@ -25,9 +25,12 @@ pub(crate) mod hashes;
pub(crate) mod reader;
pub(crate) mod wii;
/// Size in bytes of a disc sector.
pub const SECTOR_SIZE: usize = 0x8000;
/// Shared GameCube & Wii disc header
/// Shared GameCube & Wii disc header.
///
/// This header is always at the start of the disc image and within each Wii partition.
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
#[repr(C, align(4))]
pub struct DiscHeader {
@ -78,30 +81,34 @@ impl DiscHeader {
pub fn is_wii(&self) -> bool { self.wii_magic.get() == 0x5D1C9EA3 }
}
/// Partition header
/// A header describing the contents of a disc partition.
///
/// **GameCube**: Always follows the disc header.
///
/// **Wii**: Follows the disc header within each partition.
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
#[repr(C, align(4))]
pub struct PartitionHeader {
/// Debug monitor offset
pub debug_mon_off: U32,
pub debug_mon_offset: U32,
/// Debug monitor load address
pub debug_load_addr: U32,
pub debug_load_address: U32,
/// Padding
_pad1: [u8; 0x18],
/// Offset to main DOL (Wii: >> 2)
pub dol_off: U32,
pub dol_offset: U32,
/// Offset to file system table (Wii: >> 2)
pub fst_off: U32,
pub fst_offset: U32,
/// File system size (Wii: >> 2)
pub fst_sz: U32,
pub fst_size: U32,
/// File system max size (Wii: >> 2)
pub fst_max_sz: U32,
pub fst_max_size: U32,
/// File system table load address
pub fst_memory_address: U32,
/// User position
pub user_position: U32,
/// User size
pub user_sz: U32,
pub user_size: U32,
/// Padding
_pad2: [u8; 4],
}
@ -109,43 +116,47 @@ pub struct PartitionHeader {
static_assert!(size_of::<PartitionHeader>() == 0x40);
impl PartitionHeader {
pub fn dol_off(&self, is_wii: bool) -> u64 {
/// Offset within the partition to the main DOL.
pub fn dol_offset(&self, is_wii: bool) -> u64 {
if is_wii {
self.dol_off.get() as u64 * 4
self.dol_offset.get() as u64 * 4
} else {
self.dol_off.get() as u64
self.dol_offset.get() as u64
}
}
pub fn fst_off(&self, is_wii: bool) -> u64 {
/// Offset within the partition to the file system table (FST).
pub fn fst_offset(&self, is_wii: bool) -> u64 {
if is_wii {
self.fst_off.get() as u64 * 4
self.fst_offset.get() as u64 * 4
} else {
self.fst_off.get() as u64
self.fst_offset.get() as u64
}
}
pub fn fst_sz(&self, is_wii: bool) -> u64 {
/// Size of the file system table (FST).
pub fn fst_size(&self, is_wii: bool) -> u64 {
if is_wii {
self.fst_sz.get() as u64 * 4
self.fst_size.get() as u64 * 4
} else {
self.fst_sz.get() as u64
self.fst_size.get() as u64
}
}
pub fn fst_max_sz(&self, is_wii: bool) -> u64 {
/// Maximum size of the file system table (FST) across multi-disc games.
pub fn fst_max_size(&self, is_wii: bool) -> u64 {
if is_wii {
self.fst_max_sz.get() as u64 * 4
self.fst_max_size.get() as u64 * 4
} else {
self.fst_max_sz.get() as u64
self.fst_max_size.get() as u64
}
}
}
/// Apploader header
/// Apploader header.
#[derive(Debug, PartialEq, Clone, FromBytes, FromZeroes, AsBytes)]
#[repr(C, align(4))]
pub struct AppLoaderHeader {
pub struct ApploaderHeader {
/// Apploader build date
pub date: [u8; 16],
/// Entry point
@ -158,19 +169,19 @@ pub struct AppLoaderHeader {
_pad: [u8; 4],
}
impl AppLoaderHeader {
/// Apploader build date as a string
impl ApploaderHeader {
/// Apploader build date as a string.
pub fn date_str(&self) -> Option<&str> {
CStr::from_bytes_until_nul(&self.date).ok().and_then(|c| c.to_str().ok())
}
}
/// Maximum number of text sections in a DOL
/// Maximum number of text sections in a DOL.
pub const DOL_MAX_TEXT_SECTIONS: usize = 7;
/// Maximum number of data sections in a DOL
/// Maximum number of data sections in a DOL.
pub const DOL_MAX_DATA_SECTIONS: usize = 11;
/// DOL header
/// Dolphin executable (DOL) header.
#[derive(Debug, Clone, FromBytes, FromZeroes)]
pub struct DolHeader {
/// Text section offsets
@ -197,12 +208,16 @@ pub struct DolHeader {
static_assert!(size_of::<DolHeader>() == 0x100);
/// Partition type
/// The kind of disc partition.
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub enum PartitionKind {
/// Data partition.
Data,
/// Update partition.
Update,
/// Channel partition.
Channel,
/// Other partition kind.
Other(u32),
}
@ -246,7 +261,7 @@ impl From<u32> for PartitionKind {
}
}
/// An open read stream for a disc partition.
/// An open disc partition.
pub trait PartitionBase: DynClone + ReadStream + Send + Sync {
/// Reads the partition header and file system table.
fn meta(&mut self) -> Result<Box<PartitionMeta>>;
@ -283,7 +298,7 @@ pub trait PartitionBase: DynClone + ReadStream + Send + Sync {
/// The ideal size for buffered reads from this partition.
/// GameCube discs have a data block size of 0x8000,
/// whereas Wii discs have a data block size of 0x7c00.
/// whereas Wii discs have a data block size of 0x7C00.
fn ideal_buffer_size(&self) -> usize;
}
@ -294,7 +309,7 @@ pub const BOOT_SIZE: usize = size_of::<DiscHeader>() + size_of::<PartitionHeader
/// Size of the debug and region information (bi2.bin)
pub const BI2_SIZE: usize = 0x2000;
/// Disc partition metadata
/// Extra disc partition data. (DOL, FST, etc.)
#[derive(Clone, Debug)]
pub struct PartitionMeta {
/// Disc and partition header (boot.bin)
@ -318,31 +333,50 @@ pub struct PartitionMeta {
}
impl PartitionMeta {
/// A view into the disc header.
pub fn header(&self) -> &DiscHeader {
DiscHeader::ref_from(&self.raw_boot[..size_of::<DiscHeader>()]).unwrap()
}
/// A view into the partition header.
pub fn partition_header(&self) -> &PartitionHeader {
PartitionHeader::ref_from(&self.raw_boot[size_of::<DiscHeader>()..]).unwrap()
}
pub fn apploader_header(&self) -> &AppLoaderHeader {
AppLoaderHeader::ref_from_prefix(&self.raw_apploader).unwrap()
/// A view into the apploader header.
pub fn apploader_header(&self) -> &ApploaderHeader {
ApploaderHeader::ref_from_prefix(&self.raw_apploader).unwrap()
}
/// A view into the file system table (FST).
pub fn fst(&self) -> Result<Fst, &'static str> { Fst::new(&self.raw_fst) }
/// A view into the DOL header.
pub fn dol_header(&self) -> &DolHeader { DolHeader::ref_from_prefix(&self.raw_dol).unwrap() }
/// A view into the ticket. (Wii only)
pub fn ticket(&self) -> Option<&Ticket> {
self.raw_ticket.as_ref().and_then(|v| Ticket::ref_from(v))
}
/// A view into the TMD. (Wii only)
pub fn tmd_header(&self) -> Option<&TmdHeader> {
self.raw_tmd.as_ref().and_then(|v| TmdHeader::ref_from_prefix(v))
}
}
/// The size of a single-layer MiniDVD. (1.4 GB)
///
/// GameCube games and some third-party Wii discs (Datel) use this format.
pub const MINI_DVD_SIZE: u64 = 1_459_978_240;
/// The size of a single-layer DVD. (4.7 GB)
///
/// The vast majority of Wii games use this format.
pub const SL_DVD_SIZE: u64 = 4_699_979_776;
/// The size of a dual-layer DVD. (8.5 GB)
///
/// A few larger Wii games use this format.
/// (Super Smash Bros. Brawl, Metroid Prime Trilogy, etc.)
pub const DL_DVD_SIZE: u64 = 8_511_160_320;

View File

@ -83,7 +83,7 @@ impl DiscReader {
if reader.disc_header.is_wii() {
reader.partitions = read_partition_info(&mut reader)?;
// Rebuild hashes if the format requires it
if options.rebuild_encryption && meta.needs_hash_recovery {
if (options.rebuild_encryption || options.validate_hashes) && meta.needs_hash_recovery {
rebuild_hashes(&mut reader)?;
}
}
@ -130,22 +130,22 @@ impl DiscReader {
}
/// Opens a new, decrypted partition read stream for the first partition matching
/// the specified type.
/// the specified kind.
pub fn open_partition_kind(
&self,
part_type: PartitionKind,
kind: PartitionKind,
options: &OpenOptions,
) -> Result<Box<dyn PartitionBase>> {
if self.disc_header.is_gamecube() {
if part_type == PartitionKind::Data {
if kind == PartitionKind::Data {
Ok(PartitionGC::new(self.io.clone(), self.disc_header.clone())?)
} else {
Err(Error::DiscFormat("GameCube discs only have a data partition".to_string()))
}
} else if let Some(part) = self.partitions.iter().find(|v| v.kind == part_type) {
} else if let Some(part) = self.partitions.iter().find(|v| v.kind == kind) {
Ok(PartitionWii::new(self.io.clone(), self.disc_header.clone(), part, options)?)
} else {
Err(Error::DiscFormat(format!("Partition type {part_type} not found")))
Err(Error::DiscFormat(format!("Partition type {kind} not found")))
}
}
}
@ -176,14 +176,12 @@ impl Read for DiscReader {
EncryptionMode::Decrypted => self.block.decrypt(
self.sector_buf.as_mut(),
self.block_buf.as_ref(),
block_idx,
abs_sector,
partition,
)?,
EncryptionMode::Encrypted => self.block.encrypt(
self.sector_buf.as_mut(),
self.block_buf.as_ref(),
block_idx,
abs_sector,
partition,
)?,
@ -192,7 +190,6 @@ impl Read for DiscReader {
self.block.copy_raw(
self.sector_buf.as_mut(),
self.block_buf.as_ref(),
block_idx,
abs_sector,
&self.disc_header,
)?;
@ -225,7 +222,7 @@ impl Seek for DiscReader {
}
}
fn read_partition_info(reader: &mut DiscReader) -> crate::Result<Vec<PartitionInfo>> {
fn read_partition_info(reader: &mut DiscReader) -> Result<Vec<PartitionInfo>> {
reader.seek(SeekFrom::Start(WII_PART_GROUP_OFF)).context("Seeking to partition groups")?;
let part_groups: [WiiPartGroup; 4] = read_from(reader).context("Reading partition groups")?;
let mut part_info = Vec::new();
@ -306,6 +303,7 @@ fn guess_disc_size(part_info: &[PartitionInfo]) -> u64 {
})
.max()
.unwrap_or(0x50000);
// TODO add FST offsets (decrypted partitions)
if max_offset <= MINI_DVD_SIZE && !part_info.iter().any(|v| v.kind == PartitionKind::Data) {
// Datel disc
MINI_DVD_SIZE

View File

@ -13,7 +13,7 @@ use crate::{
array_ref,
disc::{
gcn::{read_part_meta, PartitionGC},
PartitionBase, PartitionKind, PartitionMeta, SECTOR_SIZE,
PartitionBase, PartitionMeta, SECTOR_SIZE,
},
fst::{Node, NodeKind},
io::{
@ -27,7 +27,10 @@ use crate::{
DiscHeader, Error, OpenOptions, Result, ResultContext,
};
/// Size in bytes of the hashes block in a Wii disc sector
pub(crate) const HASHES_SIZE: usize = 0x400;
/// Size in bytes of the data block in a Wii disc sector (excluding hashes)
pub(crate) const SECTOR_DATA_SIZE: usize = SECTOR_SIZE - HASHES_SIZE; // 0x7C00
// ppki (Retail)
@ -67,18 +70,6 @@ impl WiiPartEntry {
pub(crate) fn offset(&self) -> u64 { (self.offset.get() as u64) << 2 }
}
#[derive(Debug, PartialEq)]
pub(crate) struct WiiPartInfo {
pub(crate) group_idx: u32,
pub(crate) part_idx: u32,
pub(crate) offset: u64,
pub(crate) kind: PartitionKind,
pub(crate) header: WiiPartitionHeader,
pub(crate) junk_id: [u8; 4],
pub(crate) junk_start: u64,
pub(crate) title_key: KeyBytes,
}
pub(crate) const WII_PART_GROUP_OFF: u64 = 0x40000;
#[derive(Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
@ -312,12 +303,13 @@ impl PartitionWii {
impl Read for PartitionWii {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let partition_sector = (self.pos / SECTOR_DATA_SIZE as u64) as u32;
let sector = self.partition.data_start_sector + partition_sector;
if sector >= self.partition.data_end_sector {
let part_sector = (self.pos / SECTOR_DATA_SIZE as u64) as u32;
let abs_sector = self.partition.data_start_sector + part_sector;
if abs_sector >= self.partition.data_end_sector {
return Ok(0);
}
let block_idx = (sector as u64 * SECTOR_SIZE as u64 / self.block_buf.len() as u64) as u32;
let block_idx =
(abs_sector as u64 * SECTOR_SIZE as u64 / self.block_buf.len() as u64) as u32;
// Read new block if necessary
if block_idx != self.block_idx {
@ -327,18 +319,17 @@ impl Read for PartitionWii {
}
// Decrypt sector if necessary
if sector != self.sector {
if abs_sector != self.sector {
self.block.decrypt(
self.sector_buf.as_mut(),
self.block_buf.as_ref(),
block_idx,
sector,
abs_sector,
&self.partition,
)?;
if self.verify {
verify_hashes(&self.sector_buf, sector)?;
verify_hashes(self.sector_buf.as_ref(), part_sector, self.raw_h3_table.as_ref())?;
}
self.sector = sector;
self.sector = abs_sector;
}
let offset = (self.pos % SECTOR_DATA_SIZE as u64) as usize;
@ -369,9 +360,9 @@ impl Seek for PartitionWii {
#[inline(always)]
pub(crate) fn as_digest(slice: &[u8; 20]) -> digest::Output<Sha1> { (*slice).into() }
fn verify_hashes(buf: &[u8; SECTOR_SIZE], sector: u32) -> io::Result<()> {
let (mut group, sub_group) = div_rem(sector as usize, 8);
group %= 8;
fn verify_hashes(buf: &[u8; SECTOR_SIZE], part_sector: u32, h3_table: &[u8]) -> io::Result<()> {
let (cluster, sector) = div_rem(part_sector as usize, 8);
let (group, sub_group) = div_rem(cluster, 8);
// H0 hashes
for i in 0..31 {
@ -391,14 +382,14 @@ fn verify_hashes(buf: &[u8; SECTOR_SIZE], sector: u32) -> io::Result<()> {
{
let mut hash = Sha1::new();
hash.update(array_ref![buf, 0, 0x26C]);
let expected = as_digest(array_ref![buf, 0x280 + sub_group * 20, 20]);
let expected = as_digest(array_ref![buf, 0x280 + sector * 20, 20]);
let output = hash.finalize();
if output != expected {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"Invalid H1 hash! (subgroup {:?}) {:x}\n\texpected {:x}",
sub_group, output, expected
sector, output, expected
),
));
}
@ -408,19 +399,33 @@ fn verify_hashes(buf: &[u8; SECTOR_SIZE], sector: u32) -> io::Result<()> {
{
let mut hash = Sha1::new();
hash.update(array_ref![buf, 0x280, 0xA0]);
let expected = as_digest(array_ref![buf, 0x340 + group * 20, 20]);
let expected = as_digest(array_ref![buf, 0x340 + sub_group * 20, 20]);
let output = hash.finalize();
if output != expected {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"Invalid H2 hash! (group {:?}) {:x}\n\texpected {:x}",
group, output, expected
sub_group, output, expected
),
));
}
}
// TODO H3 hash
// H3 hash
{
let mut hash = Sha1::new();
hash.update(array_ref![buf, 0x340, 0xA0]);
let expected = as_digest(array_ref![h3_table, group * 20, 20]);
let output = hash.finalize();
if output != expected {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("Invalid H3 hash! {:x}\n\texpected {:x}", output, expected),
));
}
}
Ok(())
}

View File

@ -32,7 +32,7 @@ pub struct Node {
static_assert!(size_of::<Node>() == 12);
impl Node {
/// File system node type.
/// File system node kind.
pub fn kind(&self) -> NodeKind {
match self.kind {
0 => NodeKind::File,
@ -71,9 +71,11 @@ impl Node {
pub fn length(&self) -> u64 { self.length.get() as u64 }
}
/// A view into the file system tree (FST).
/// A view into the file system table (FST).
pub struct Fst<'a> {
/// The nodes in the FST.
pub nodes: &'a [Node],
/// The string table containing all file and directory names.
pub string_table: &'a [u8],
}

View File

@ -135,20 +135,32 @@ pub fn open(filename: &Path) -> Result<Box<dyn BlockIO>> {
Ok(io)
}
/// Wii partition information.
#[derive(Debug, Clone)]
pub struct PartitionInfo {
/// The partition index.
pub index: usize,
/// The kind of disc partition.
pub kind: PartitionKind,
/// The start sector of the partition.
pub start_sector: u32,
/// The start sector of the partition's (encrypted) data.
pub data_start_sector: u32,
/// The end sector of the partition's (encrypted) data.
pub data_end_sector: u32,
/// The AES key for the partition, also known as the "title key".
pub key: KeyBytes,
/// The Wii partition header.
pub header: Box<WiiPartitionHeader>,
/// The disc header within the partition.
pub disc_header: Box<DiscHeader>,
/// The partition header within the partition.
pub partition_header: Box<PartitionHeader>,
/// The hash table for the partition, if rebuilt.
pub hash_table: Option<HashTable>,
}
/// The block kind returned by [`BlockIO::read_block`].
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
pub enum Block {
/// Raw data or encrypted Wii partition data
@ -171,29 +183,28 @@ impl Block {
self,
out: &mut [u8; SECTOR_SIZE],
data: &[u8],
block_idx: u32,
abs_sector: u32,
partition: &PartitionInfo,
) -> io::Result<()> {
let rel_sector = abs_sector - self.start_sector(block_idx, data.len());
let part_sector = abs_sector - partition.data_start_sector;
match self {
Block::Raw => {
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, rel_sector)?);
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, abs_sector)?);
decrypt_sector(out, partition);
}
Block::PartDecrypted { has_hashes } => {
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, rel_sector)?);
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, abs_sector)?);
if !has_hashes {
rebuild_hash_block(out, abs_sector, partition);
rebuild_hash_block(out, part_sector, partition);
}
}
Block::Junk => {
generate_junk(out, abs_sector, Some(partition), &partition.disc_header);
rebuild_hash_block(out, abs_sector, partition);
generate_junk(out, part_sector, Some(partition), &partition.disc_header);
rebuild_hash_block(out, part_sector, partition);
}
Block::Zero => {
out.fill(0);
rebuild_hash_block(out, abs_sector, partition);
rebuild_hash_block(out, part_sector, partition);
}
}
Ok(())
@ -204,30 +215,29 @@ impl Block {
self,
out: &mut [u8; SECTOR_SIZE],
data: &[u8],
block_idx: u32,
abs_sector: u32,
partition: &PartitionInfo,
) -> io::Result<()> {
let rel_sector = abs_sector - self.start_sector(block_idx, data.len());
let part_sector = abs_sector - partition.data_start_sector;
match self {
Block::Raw => {
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, rel_sector)?);
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, abs_sector)?);
}
Block::PartDecrypted { has_hashes } => {
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, rel_sector)?);
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, abs_sector)?);
if !has_hashes {
rebuild_hash_block(out, abs_sector, partition);
rebuild_hash_block(out, part_sector, partition);
}
encrypt_sector(out, partition);
}
Block::Junk => {
generate_junk(out, abs_sector, Some(partition), &partition.disc_header);
rebuild_hash_block(out, abs_sector, partition);
generate_junk(out, part_sector, Some(partition), &partition.disc_header);
rebuild_hash_block(out, part_sector, partition);
encrypt_sector(out, partition);
}
Block::Zero => {
out.fill(0);
rebuild_hash_block(out, abs_sector, partition);
rebuild_hash_block(out, part_sector, partition);
encrypt_sector(out, partition);
}
}
@ -239,16 +249,12 @@ impl Block {
self,
out: &mut [u8; SECTOR_SIZE],
data: &[u8],
block_idx: u32,
abs_sector: u32,
disc_header: &DiscHeader,
) -> io::Result<()> {
match self {
Block::Raw => {
out.copy_from_slice(block_sector::<SECTOR_SIZE>(
data,
abs_sector - self.start_sector(block_idx, data.len()),
)?);
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, abs_sector)?);
}
Block::PartDecrypted { .. } => {
return Err(io::Error::new(
@ -261,11 +267,6 @@ impl Block {
}
Ok(())
}
/// Returns the start sector of the block.
fn start_sector(&self, index: u32, block_size: usize) -> u32 {
(index as u64 * block_size as u64 / SECTOR_SIZE as u64) as u32
}
}
#[inline(always)]
@ -276,14 +277,15 @@ fn block_sector<const N: usize>(data: &[u8], sector_idx: u32) -> io::Result<&[u8
format!("Expected block size {} to be a multiple of {}", data.len(), N),
));
}
let offset = sector_idx as usize * N;
let rel_sector = sector_idx % (data.len() / N) as u32;
let offset = rel_sector as usize * N;
data.get(offset..offset + N)
.ok_or_else(|| {
io::Error::new(
io::ErrorKind::InvalidData,
format!(
"Sector {} out of range (block size {}, sector size {})",
sector_idx,
rel_sector,
data.len(),
N
),
@ -298,12 +300,11 @@ fn generate_junk(
partition: Option<&PartitionInfo>,
disc_header: &DiscHeader,
) {
let mut pos = if let Some(partition) = partition {
(sector - partition.data_start_sector) as u64 * SECTOR_DATA_SIZE as u64
let (mut pos, mut offset) = if partition.is_some() {
(sector as u64 * SECTOR_DATA_SIZE as u64, HASHES_SIZE)
} else {
sector as u64 * SECTOR_SIZE as u64
(sector as u64 * SECTOR_SIZE as u64, 0)
};
let mut offset = if partition.is_some() { HASHES_SIZE } else { 0 };
out[..offset].fill(0);
while offset < SECTOR_SIZE {
// The LFG spans a single sector of the decrypted data,
@ -318,11 +319,11 @@ fn generate_junk(
}
}
fn rebuild_hash_block(out: &mut [u8; SECTOR_SIZE], sector: u32, partition: &PartitionInfo) {
fn rebuild_hash_block(out: &mut [u8; SECTOR_SIZE], part_sector: u32, partition: &PartitionInfo) {
let Some(hash_table) = partition.hash_table.as_ref() else {
return;
};
let sector_idx = (sector - partition.data_start_sector) as usize;
let sector_idx = part_sector as usize;
let h0_hashes: &[u8; 0x26C] =
transmute_ref!(array_ref![hash_table.h0_hashes, sector_idx * 31, 31]);
out[0..0x26C].copy_from_slice(h0_hashes);

View File

@ -80,8 +80,6 @@ impl DiscIOCISO {
None
};
// Reset reader
inner.reset();
Ok(Box::new(Self { inner, header, block_map, nkit_header }))
}
}

View File

@ -78,9 +78,6 @@ impl DiscIOGCZ {
// header + block_count * (u64 + u32)
let data_offset = size_of::<GCZHeader>() as u64 + block_count as u64 * 12;
// Reset reader
inner.reset();
let block_buf = <u8>::new_box_slice_zeroed(header.block_size.get() as usize);
Ok(Box::new(Self { inner, header, block_map, block_hashes, block_buf, data_offset }))
}

View File

@ -22,12 +22,13 @@ pub(crate) type KeyBytes = [u8; 16];
/// Magic bytes
pub(crate) type MagicBytes = [u8; 4];
/// The disc file format.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
pub enum Format {
/// Raw ISO
/// ISO / GCM (GameCube master disc)
#[default]
Iso,
/// CISO
/// CISO (Compact ISO)
Ciso,
/// GCZ
Gcz,
@ -55,6 +56,7 @@ impl fmt::Display for Format {
}
}
/// The disc file format's compression algorithm.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
pub enum Compression {
/// No compression

View File

@ -100,11 +100,6 @@ impl SplitFileReader {
Ok(())
}
pub fn reset(&mut self) {
self.open_file = None;
self.pos = 0;
}
pub fn len(&self) -> u64 { self.files.last().map_or(0, |f| f.begin + f.size) }
}

View File

@ -102,8 +102,6 @@ impl DiscIOWBFS {
inner.seek(SeekFrom::Start(0x10000)).context("Seeking to NKit header")?;
let nkit_header = NKitHeader::try_read_from(&mut inner, header.block_size(), true);
// Reset reader
inner.reset();
Ok(Box::new(Self { inner, header, block_map, nkit_header }))
}
}

View File

@ -91,16 +91,16 @@ impl WIAFileHeader {
pub fn is_rvz(&self) -> bool { self.magic == RVZ_MAGIC }
}
/// Disc type
/// Disc kind
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum DiscType {
pub enum DiscKind {
/// GameCube disc
GameCube,
/// Wii disc
Wii,
}
impl TryFrom<u32> for DiscType {
impl TryFrom<u32> for DiscKind {
type Error = Error;
fn try_from(value: u32) -> Result<Self> {
@ -225,11 +225,11 @@ static_assert!(size_of::<WIADisc>() == 0xDC);
impl WIADisc {
pub fn validate(&self) -> Result<()> {
DiscType::try_from(self.disc_type.get())?;
DiscKind::try_from(self.disc_type.get())?;
WIACompression::try_from(self.compression.get())?;
if self.partition_type_size.get() != size_of::<WIAPartition>() as u32 {
return Err(Error::DiscFormat(format!(
"WIA partition type size is {}, expected {}",
"WIA/RVZ partition type size is {}, expected {}",
self.partition_type_size.get(),
size_of::<WIAPartition>()
)));
@ -518,12 +518,12 @@ pub struct DiscIOWIA {
impl Clone for DiscIOWIA {
fn clone(&self) -> Self {
Self {
inner: self.inner.clone(),
header: self.header.clone(),
disc: self.disc.clone(),
partitions: self.partitions.clone(),
raw_data: self.raw_data.clone(),
groups: self.groups.clone(),
inner: self.inner.clone(),
nkit_header: self.nkit_header.clone(),
decompressor: self.decompressor.clone(),
group: u32::MAX,
@ -541,7 +541,7 @@ fn verify_hash(buf: &[u8], expected: &HashBytes) -> Result<()> {
let mut expected_bytes = [0u8; 40];
let expected = base16ct::lower::encode_str(expected, &mut expected_bytes).unwrap(); // Safe: fixed buffer size
return Err(Error::DiscFormat(format!(
"WIA hash mismatch: {}, expected {}",
"WIA/RVZ hash mismatch: {}, expected {}",
got, expected
)));
}
@ -685,18 +685,10 @@ impl BlockIO for DiscIOWIA {
sector: u32,
partition: Option<&PartitionInfo>,
) -> io::Result<Block> {
let mut chunk_size = self.disc.chunk_size.get();
let chunk_size = self.disc.chunk_size.get();
let sectors_per_chunk = chunk_size / SECTOR_SIZE as u32;
let disc_offset = sector as u64 * SECTOR_SIZE as u64;
let mut partition_offset = disc_offset;
if let Some(partition) = partition {
// Within a partition, hashes are excluded from the data size
chunk_size = (chunk_size * SECTOR_DATA_SIZE as u32) / SECTOR_SIZE as u32;
partition_offset =
(sector - partition.data_start_sector) as u64 * SECTOR_DATA_SIZE as u64;
}
let (group_index, group_sector) = if let Some(partition) = partition {
let (group_index, group_sector, partition_offset) = if let Some(partition) = partition {
// Find the partition
let Some(wia_part) = self.partitions.get(partition.index) else {
return Err(io::Error::new(
@ -747,7 +739,12 @@ impl BlockIO for DiscIOWIA {
));
}
(pd.group_index.get() + part_group_index, part_group_sector)
// Calculate the group offset within the partition
let part_group_offset =
(((part_group_index * sectors_per_chunk) + pd.first_sector.get())
- wia_part.partition_data[0].first_sector.get()) as u64
* SECTOR_DATA_SIZE as u64;
(pd.group_index.get() + part_group_index, part_group_sector, part_group_offset)
} else {
let Some(rd) = self.raw_data.iter().find(|d| d.contains(sector)) else {
return Err(io::Error::new(
@ -771,7 +768,7 @@ impl BlockIO for DiscIOWIA {
));
}
(rd.group_index.get() + group_index, group_sector)
(rd.group_index.get() + group_index, group_sector, 0)
};
// Fetch the group
@ -790,7 +787,13 @@ impl BlockIO for DiscIOWIA {
// Read group data if necessary
if group_index != self.group {
self.group_data = Vec::with_capacity(chunk_size as usize);
let group_data_size = if partition.is_some() {
// Within a partition, hashes are excluded from the data size
(sectors_per_chunk * SECTOR_DATA_SIZE as u32) as usize
} else {
chunk_size as usize
};
self.group_data = Vec::with_capacity(group_data_size);
let group_data_start = group.data_offset.get() as u64 * 4;
self.inner.seek(SeekFrom::Start(group_data_start))?;
@ -864,10 +867,10 @@ impl BlockIO for DiscIOWIA {
// Read sector from cached group data
if partition.is_some() {
let sector_data_start = group_sector as usize * SECTOR_DATA_SIZE;
let sector_data =
&self.group_data[sector_data_start..sector_data_start + SECTOR_DATA_SIZE];
out[..HASHES_SIZE].fill(0);
out[HASHES_SIZE..SECTOR_SIZE].copy_from_slice(sector_data);
out[HASHES_SIZE..SECTOR_SIZE].copy_from_slice(
&self.group_data[sector_data_start..sector_data_start + SECTOR_DATA_SIZE],
);
Ok(Block::PartDecrypted { has_hashes: false })
} else {
let sector_data_start = group_sector as usize * SECTOR_SIZE;

View File

@ -1,40 +1,62 @@
// #![warn(missing_docs, rustdoc::missing_doc_code_examples)]
//! Library for traversing & reading GameCube and Wii disc images.
#![warn(missing_docs)]
//! Library for traversing & reading Nintendo Optical Disc (GameCube and Wii) images.
//!
//! Based on the C++ library [nod](https://github.com/AxioDL/nod),
//! Originally based on the C++ library [nod](https://github.com/AxioDL/nod),
//! but does not currently support authoring.
//!
//! Currently supported file formats:
//! - ISO (GCM)
//! - WIA / RVZ
//! - WBFS
//! - NFS (Wii U VC files, e.g. `hif_000000.nfs`)
//! - WBFS (+ NKit 2 lossless)
//! - CISO (+ NKit 2 lossless)
//! - NFS (Wii U VC)
//! - GCZ
//!
//! # Examples
//!
//! Opening a disc image and reading a file:
//!
//! ```no_run
//! use std::io::Read;
//!
//! use nod::{Disc, PartitionKind};
//! // Open a disc image and the first data partition.
//! let disc = nod::Disc::new("path/to/file.iso")
//! .expect("Failed to open disc");
//! let mut partition = disc.open_partition_kind(nod::PartitionKind::Data)
//! .expect("Failed to open data partition");
//!
//! fn main() -> nod::Result<()> {
//! let disc = Disc::new("path/to/file.iso")?;
//! let mut partition = disc.open_partition_kind(PartitionKind::Data)?;
//! let meta = partition.meta()?;
//! let fst = meta.fst()?;
//! if let Some((_, node)) = fst.find("/MP3/Worlds.txt") {
//! let mut s = String::new();
//! partition
//! .open_file(node)
//! .expect("Failed to open file stream")
//! .read_to_string(&mut s)
//! .expect("Failed to read file");
//! println!("{}", s);
//! }
//! Ok(())
//! // Read partition metadata and the file system table.
//! let meta = partition.meta()
//! .expect("Failed to read partition metadata");
//! let fst = meta.fst()
//! .expect("File system table is invalid");
//!
//! // Find a file by path and read it into a string.
//! if let Some((_, node)) = fst.find("/MP3/Worlds.txt") {
//! let mut s = String::new();
//! partition
//! .open_file(node)
//! .expect("Failed to open file stream")
//! .read_to_string(&mut s)
//! .expect("Failed to read file");
//! println!("{}", s);
//! }
//! ```
//!
//! Converting a disc image to raw ISO:
//!
//! ```no_run
//! // Enable `rebuild_encryption` to ensure the output is a valid ISO.
//! let options = nod::OpenOptions { rebuild_encryption: true, ..Default::default() };
//! let mut disc = nod::Disc::new_with_options("path/to/file.rvz", &options)
//! .expect("Failed to open disc");
//!
//! // Read directly from the open disc and write to the output file.
//! let mut out = std::fs::File::create("output.iso")
//! .expect("Failed to create output file");
//! std::io::copy(&mut disc, &mut out)
//! .expect("Failed to write data");
//! ```
use std::{
io::{Read, Seek},
@ -42,7 +64,7 @@ use std::{
};
pub use disc::{
AppLoaderHeader, DiscHeader, DolHeader, PartitionBase, PartitionHeader, PartitionKind,
ApploaderHeader, DiscHeader, DolHeader, PartitionBase, PartitionHeader, PartitionKind,
PartitionMeta, BI2_SIZE, BOOT_SIZE, SECTOR_SIZE,
};
pub use fst::{Fst, Node, NodeKind};
@ -80,7 +102,9 @@ impl From<String> for Error {
/// Helper result type for [`Error`].
pub type Result<T, E = Error> = core::result::Result<T, E>;
/// Helper trait for adding context to errors.
pub trait ErrorContext {
/// Adds context to an error.
fn context(self, context: impl Into<String>) -> Error;
}
@ -88,9 +112,12 @@ impl ErrorContext for std::io::Error {
fn context(self, context: impl Into<String>) -> Error { Error::Io(context.into(), self) }
}
/// Helper trait for adding context to result errors.
pub trait ResultContext<T> {
/// Adds context to a result error.
fn context(self, context: impl Into<String>) -> Result<T>;
/// Adds context to a result error using a closure.
fn with_context<F>(self, f: F) -> Result<T>
where F: FnOnce() -> String;
}
@ -108,6 +135,7 @@ where E: ErrorContext
}
}
/// Options for opening a disc image.
#[derive(Default, Debug, Clone)]
pub struct OpenOptions {
/// Wii: Rebuild partition data encryption and hashes if the underlying format stores data
@ -117,6 +145,9 @@ pub struct OpenOptions {
pub validate_hashes: bool,
}
/// An open disc image and read stream.
///
/// This is the primary entry point for reading disc images.
pub struct Disc {
reader: disc::reader::DiscReader,
options: OpenOptions,
@ -135,7 +166,7 @@ impl Disc {
Ok(Disc { reader, options: options.clone() })
}
/// The disc's header.
/// The disc's primary header.
pub fn header(&self) -> &DiscHeader { self.reader.header() }
/// Returns extra metadata included in the disc file format, if any.
@ -146,20 +177,20 @@ impl Disc {
/// A list of Wii partitions on the disc.
///
/// For GameCube discs, this will return an empty slice.
/// **GameCube**: This will return an empty slice.
pub fn partitions(&self) -> &[PartitionInfo] { self.reader.partitions() }
/// Opens a new, decrypted partition read stream for the specified partition index.
/// Opens a decrypted partition read stream for the specified partition index.
///
/// For GameCube discs, the index must always be 0.
/// **GameCube**: `index` must always be 0.
pub fn open_partition(&self, index: usize) -> Result<Box<dyn PartitionBase>> {
self.reader.open_partition(index, &self.options)
}
/// Opens a new partition read stream for the first partition matching
/// the specified type.
/// Opens a decrypted partition read stream for the first partition matching
/// the specified kind.
///
/// For GameCube discs, the kind must always be `PartitionKind::Data`.
/// **GameCube**: `kind` must always be [`PartitionKind::Data`].
pub fn open_partition_kind(&self, kind: PartitionKind) -> Result<Box<dyn PartitionBase>> {
self.reader.open_partition_kind(kind, &self.options)
}

View File

@ -1,6 +1,6 @@
[package]
name = "nodtool"
version = "1.0.0"
version = "1.2.0"
edition = "2021"
rust-version = "1.73.0"
authors = ["Luke Street <luke@street.dev>"]

View File

@ -0,0 +1,24 @@
use std::path::PathBuf;
use argp::FromArgs;
use crate::util::shared::convert_and_verify;
#[derive(FromArgs, Debug)]
/// Converts a disc image to ISO.
#[argp(subcommand, name = "convert")]
pub struct ConvertArgs {
#[argp(positional)]
/// path to disc image
file: PathBuf,
#[argp(positional)]
/// output ISO file
out: PathBuf,
#[argp(switch)]
/// enable MD5 hashing (slower)
md5: bool,
}
pub fn convert(args: ConvertArgs) -> nod::Result<()> {
convert_and_verify(&args.file, Some(&args.out), args.md5)
}

214
nodtool/src/cmd/extract.rs Normal file
View File

@ -0,0 +1,214 @@
use std::{
borrow::Cow,
fs,
fs::File,
io,
io::{BufWriter, Write},
path::{Path, PathBuf},
};
use argp::FromArgs;
use itertools::Itertools;
use nod::{
Disc, DiscHeader, Fst, Node, OpenOptions, PartitionBase, PartitionKind, PartitionMeta,
ResultContext,
};
use size::{Base, Size};
use zerocopy::AsBytes;
use crate::util::{display, has_extension};
#[derive(FromArgs, Debug)]
/// Extract a disc image.
#[argp(subcommand, name = "extract")]
pub struct ExtractArgs {
#[argp(positional)]
/// Path to disc image
file: PathBuf,
#[argp(positional)]
/// Output directory (optional)
out: Option<PathBuf>,
#[argp(switch, short = 'q')]
/// Quiet output
quiet: bool,
#[argp(switch, short = 'h')]
/// Validate data hashes (Wii only)
validate: bool,
#[argp(option, short = 'p')]
/// Partition to extract (default: data)
/// Options: all, data, update, channel, or a partition index
partition: Option<String>,
}
pub fn extract(args: ExtractArgs) -> nod::Result<()> {
let output_dir: PathBuf;
if let Some(dir) = args.out {
output_dir = dir;
} else if has_extension(&args.file, "nfs") {
// Special logic to extract from content/hif_*.nfs to extracted/..
if let Some(parent) = args.file.parent() {
output_dir = parent.with_file_name("extracted");
} else {
output_dir = args.file.with_extension("");
}
} else {
output_dir = args.file.with_extension("");
}
let disc = Disc::new_with_options(&args.file, &OpenOptions {
rebuild_encryption: false,
validate_hashes: args.validate,
})?;
let header = disc.header();
let is_wii = header.is_wii();
if let Some(partition) = args.partition {
if partition.eq_ignore_ascii_case("all") {
for info in disc.partitions() {
let mut out_dir = output_dir.clone();
out_dir.push(info.kind.dir_name().as_ref());
let mut partition = disc.open_partition(info.index)?;
extract_partition(header, partition.as_mut(), &out_dir, is_wii, args.quiet)?;
}
} else if partition.eq_ignore_ascii_case("data") {
let mut partition = disc.open_partition_kind(PartitionKind::Data)?;
extract_partition(header, partition.as_mut(), &output_dir, is_wii, args.quiet)?;
} else if partition.eq_ignore_ascii_case("update") {
let mut partition = disc.open_partition_kind(PartitionKind::Update)?;
extract_partition(header, partition.as_mut(), &output_dir, is_wii, args.quiet)?;
} else if partition.eq_ignore_ascii_case("channel") {
let mut partition = disc.open_partition_kind(PartitionKind::Channel)?;
extract_partition(header, partition.as_mut(), &output_dir, is_wii, args.quiet)?;
} else {
let idx = partition.parse::<usize>().map_err(|_| "Invalid partition index")?;
let mut partition = disc.open_partition(idx)?;
extract_partition(header, partition.as_mut(), &output_dir, is_wii, args.quiet)?;
}
} else {
let mut partition = disc.open_partition_kind(PartitionKind::Data)?;
extract_partition(header, partition.as_mut(), &output_dir, is_wii, args.quiet)?;
}
Ok(())
}
fn extract_partition(
header: &DiscHeader,
partition: &mut dyn PartitionBase,
out_dir: &Path,
is_wii: bool,
quiet: bool,
) -> nod::Result<()> {
let meta = partition.meta()?;
extract_sys_files(header, meta.as_ref(), out_dir, quiet)?;
// Extract FST
let files_dir = out_dir.join("files");
fs::create_dir_all(&files_dir)
.with_context(|| format!("Creating directory {}", display(&files_dir)))?;
let fst = Fst::new(&meta.raw_fst)?;
let mut path_segments = Vec::<(Cow<str>, usize)>::new();
for (idx, node, name) in fst.iter() {
// Remove ended path segments
let mut new_size = 0;
for (_, end) in path_segments.iter() {
if *end == idx {
break;
}
new_size += 1;
}
path_segments.truncate(new_size);
// Add the new path segment
let end = if node.is_dir() { node.length() as usize } else { idx + 1 };
path_segments.push((name?, end));
let path = path_segments.iter().map(|(name, _)| name.as_ref()).join("/");
if node.is_dir() {
fs::create_dir_all(files_dir.join(&path))
.with_context(|| format!("Creating directory {}", path))?;
} else {
extract_node(node, partition, &files_dir, &path, is_wii, quiet)?;
}
}
Ok(())
}
fn extract_sys_files(
header: &DiscHeader,
data: &PartitionMeta,
out_dir: &Path,
quiet: bool,
) -> nod::Result<()> {
let sys_dir = out_dir.join("sys");
fs::create_dir_all(&sys_dir)
.with_context(|| format!("Creating directory {}", display(&sys_dir)))?;
extract_file(data.raw_boot.as_ref(), &sys_dir.join("boot.bin"), quiet)?;
extract_file(data.raw_bi2.as_ref(), &sys_dir.join("bi2.bin"), quiet)?;
extract_file(data.raw_apploader.as_ref(), &sys_dir.join("apploader.img"), quiet)?;
extract_file(data.raw_fst.as_ref(), &sys_dir.join("fst.bin"), quiet)?;
extract_file(data.raw_dol.as_ref(), &sys_dir.join("main.dol"), quiet)?;
// Wii files
if header.is_wii() {
let disc_dir = out_dir.join("disc");
fs::create_dir_all(&disc_dir)
.with_context(|| format!("Creating directory {}", display(&disc_dir)))?;
extract_file(&header.as_bytes()[..0x100], &disc_dir.join("header.bin"), quiet)?;
}
if let Some(ticket) = data.raw_ticket.as_deref() {
extract_file(ticket, &out_dir.join("ticket.bin"), quiet)?;
}
if let Some(tmd) = data.raw_tmd.as_deref() {
extract_file(tmd, &out_dir.join("tmd.bin"), quiet)?;
}
if let Some(cert_chain) = data.raw_cert_chain.as_deref() {
extract_file(cert_chain, &out_dir.join("cert.bin"), quiet)?;
}
if let Some(h3_table) = data.raw_h3_table.as_deref() {
extract_file(h3_table, &out_dir.join("h3.bin"), quiet)?;
}
Ok(())
}
fn extract_file(bytes: &[u8], out_path: &Path, quiet: bool) -> nod::Result<()> {
if !quiet {
println!(
"Extracting {} (size: {})",
display(out_path),
Size::from_bytes(bytes.len()).format().with_base(Base::Base10)
);
}
fs::write(out_path, bytes).with_context(|| format!("Writing file {}", display(out_path)))?;
Ok(())
}
fn extract_node(
node: &Node,
partition: &mut dyn PartitionBase,
base_path: &Path,
name: &str,
is_wii: bool,
quiet: bool,
) -> nod::Result<()> {
let file_path = base_path.join(name);
if !quiet {
println!(
"Extracting {} (size: {})",
display(&file_path),
Size::from_bytes(node.length()).format().with_base(Base::Base10)
);
}
let file = File::create(&file_path)
.with_context(|| format!("Creating file {}", display(&file_path)))?;
let mut w = BufWriter::with_capacity(partition.ideal_buffer_size(), file);
let mut r = partition.open_file(node).with_context(|| {
format!(
"Opening file {} on disc for reading (offset {}, size {})",
name,
node.offset(is_wii),
node.length()
)
})?;
io::copy(&mut r, &mut w).with_context(|| format!("Extracting file {}", display(&file_path)))?;
w.flush().with_context(|| format!("Flushing file {}", display(&file_path)))?;
Ok(())
}

103
nodtool/src/cmd/info.rs Normal file
View File

@ -0,0 +1,103 @@
use std::path::{Path, PathBuf};
use argp::FromArgs;
use nod::{Disc, OpenOptions, SECTOR_SIZE};
use size::Size;
use crate::util::{display, shared::print_header};
#[derive(FromArgs, Debug)]
/// Displays information about disc images.
#[argp(subcommand, name = "info")]
pub struct InfoArgs {
#[argp(positional)]
/// Path to disc image(s)
file: Vec<PathBuf>,
}
pub fn info(args: InfoArgs) -> nod::Result<()> {
for file in &args.file {
info_file(file)?;
}
Ok(())
}
fn info_file(path: &Path) -> nod::Result<()> {
log::info!("Loading {}", display(path));
let disc = Disc::new_with_options(path, &OpenOptions {
rebuild_encryption: false,
validate_hashes: false,
})?;
let header = disc.header();
let meta = disc.meta();
print_header(header, &meta);
if header.is_wii() {
for (idx, info) in disc.partitions().iter().enumerate() {
println!();
println!("Partition {}", idx);
println!("\tType: {}", info.kind);
let offset = info.start_sector as u64 * SECTOR_SIZE as u64;
println!("\tStart sector: {} (offset {:#X})", info.start_sector, offset);
let data_size =
(info.data_end_sector - info.data_start_sector) as u64 * SECTOR_SIZE as u64;
println!(
"\tData offset / size: {:#X} / {:#X} ({})",
info.data_start_sector as u64 * SECTOR_SIZE as u64,
data_size,
Size::from_bytes(data_size)
);
println!(
"\tTMD offset / size: {:#X} / {:#X}",
offset + info.header.tmd_off(),
info.header.tmd_size()
);
println!(
"\tCert offset / size: {:#X} / {:#X}",
offset + info.header.cert_chain_off(),
info.header.cert_chain_size()
);
println!(
"\tH3 offset / size: {:#X} / {:#X}",
offset + info.header.h3_table_off(),
info.header.h3_table_size()
);
let mut partition = disc.open_partition(idx)?;
let meta = partition.meta()?;
let tmd = meta.tmd_header();
let title_id_str = if let Some(tmd) = tmd {
format!(
"{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}",
tmd.title_id[0],
tmd.title_id[1],
tmd.title_id[2],
tmd.title_id[3],
tmd.title_id[4],
tmd.title_id[5],
tmd.title_id[6],
tmd.title_id[7]
)
} else {
"N/A".to_string()
};
println!("\tTitle: {}", info.disc_header.game_title_str());
println!("\tGame ID: {} ({})", info.disc_header.game_id_str(), title_id_str);
println!(
"\tDisc {}, Revision {}",
info.disc_header.disc_num + 1,
info.disc_header.disc_version
);
}
} else if header.is_gamecube() {
// TODO
} else {
println!(
"Invalid GC/Wii magic: {:#010X}/{:#010X}",
header.gcn_magic.get(),
header.wii_magic.get()
);
}
println!();
Ok(())
}

4
nodtool/src/cmd/mod.rs Normal file
View File

@ -0,0 +1,4 @@
pub mod convert;
pub mod extract;
pub mod info;
pub mod verify;

25
nodtool/src/cmd/verify.rs Normal file
View File

@ -0,0 +1,25 @@
use std::path::PathBuf;
use argp::FromArgs;
use crate::util::shared::convert_and_verify;
#[derive(FromArgs, Debug)]
/// Verifies disc images.
#[argp(subcommand, name = "verify")]
pub struct VerifyArgs {
#[argp(positional)]
/// path to disc image(s)
file: Vec<PathBuf>,
#[argp(switch)]
/// enable MD5 hashing (slower)
md5: bool,
}
pub fn verify(args: VerifyArgs) -> nod::Result<()> {
for file in &args.file {
convert_and_verify(file, None, args.md5)?;
println!();
}
Ok(())
}

22
nodtool/src/lib.rs Normal file
View File

@ -0,0 +1,22 @@
use argp::FromArgs;
pub mod cmd;
pub(crate) mod util;
#[derive(FromArgs, Debug)]
#[argp(subcommand)]
pub enum SubCommand {
Info(cmd::info::InfoArgs),
Extract(cmd::extract::ExtractArgs),
Convert(cmd::convert::ConvertArgs),
Verify(cmd::verify::VerifyArgs),
}
pub fn run(command: SubCommand) -> nod::Result<()> {
match command {
SubCommand::Info(c_args) => cmd::info::info(c_args),
SubCommand::Convert(c_args) => cmd::convert::convert(c_args),
SubCommand::Extract(c_args) => cmd::extract::extract(c_args),
SubCommand::Verify(c_args) => cmd::verify::verify(c_args),
}
}

View File

@ -1,37 +1,13 @@
mod argp_version;
mod digest;
mod redump;
use std::{
borrow::Cow,
cmp::min,
env,
error::Error,
ffi::OsStr,
fmt, fs,
fs::File,
io,
io::{BufWriter, Read, Write},
path::{Path, PathBuf, MAIN_SEPARATOR},
str::FromStr,
sync::{mpsc::sync_channel, Arc},
thread,
};
use std::{env, error::Error, ffi::OsStr, fmt, path::PathBuf, str::FromStr};
use argp::{FromArgValue, FromArgs};
use digest::{digest_thread, DigestResult};
use enable_ansi_support::enable_ansi_support;
use indicatif::{ProgressBar, ProgressState, ProgressStyle};
use itertools::Itertools;
use nod::{
Compression, Disc, DiscHeader, DiscMeta, Fst, Node, OpenOptions, PartitionBase, PartitionKind,
PartitionMeta, Result, ResultContext, SECTOR_SIZE,
};
use size::{Base, Size};
use nodtool::{run, SubCommand};
use supports_color::Stream;
use tracing::level_filters::LevelFilter;
use tracing_subscriber::EnvFilter;
use zerocopy::{AsBytes, FromZeroes};
#[derive(FromArgs, Debug)]
/// Tool for reading GameCube and Wii disc images.
@ -54,73 +30,6 @@ struct TopLevel {
no_color: bool,
}
#[derive(FromArgs, Debug)]
#[argp(subcommand)]
enum SubCommand {
Info(InfoArgs),
Extract(ExtractArgs),
Convert(ConvertArgs),
Verify(VerifyArgs),
}
#[derive(FromArgs, Debug)]
/// Displays information about disc images.
#[argp(subcommand, name = "info")]
struct InfoArgs {
#[argp(positional)]
/// Path to disc image(s)
file: Vec<PathBuf>,
}
#[derive(FromArgs, Debug)]
/// Extract a disc image.
#[argp(subcommand, name = "extract")]
struct ExtractArgs {
#[argp(positional)]
/// Path to disc image
file: PathBuf,
#[argp(positional)]
/// Output directory (optional)
out: Option<PathBuf>,
#[argp(switch, short = 'q')]
/// Quiet output
quiet: bool,
#[argp(switch, short = 'h')]
/// Validate data hashes (Wii only)
validate: bool,
#[argp(option, short = 'p')]
/// Partition to extract (default: data)
/// Options: all, data, update, channel, or a partition index
partition: Option<String>,
}
#[derive(FromArgs, Debug)]
/// Converts a disc image to ISO.
#[argp(subcommand, name = "convert")]
struct ConvertArgs {
#[argp(positional)]
/// path to disc image
file: PathBuf,
#[argp(positional)]
/// output ISO file
out: PathBuf,
#[argp(switch)]
/// enable MD5 hashing (slower)
md5: bool,
}
#[derive(FromArgs, Debug)]
/// Verifies disc images.
#[argp(subcommand, name = "verify")]
struct VerifyArgs {
#[argp(positional)]
/// path to disc image(s)
file: Vec<PathBuf>,
#[argp(switch)]
/// enable MD5 hashing (slower)
md5: bool,
}
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
enum LogLevel {
Error,
@ -133,7 +42,7 @@ enum LogLevel {
impl FromStr for LogLevel {
type Err = ();
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(match s {
"error" => Self::Error,
"warn" => Self::Warn,
@ -158,7 +67,7 @@ impl fmt::Display for LogLevel {
}
impl FromArgValue for LogLevel {
fn from_arg_value(value: &OsStr) -> std::result::Result<Self, String> {
fn from_arg_value(value: &OsStr) -> Result<Self, String> {
String::from_arg_value(value)
.and_then(|s| Self::from_str(&s).map_err(|_| "Invalid log level".to_string()))
}
@ -210,15 +119,10 @@ fn main() {
let mut result = Ok(());
if let Some(dir) = &args.chdir {
result = env::set_current_dir(dir).map_err(|e| {
nod::Error::Io(format!("Failed to change working directory to '{}'", display(dir)), e)
nod::Error::Io(format!("Failed to change working directory to '{}'", dir.display()), e)
});
}
result = result.and_then(|_| match args.command {
SubCommand::Info(c_args) => info(c_args),
SubCommand::Convert(c_args) => convert(c_args),
SubCommand::Extract(c_args) => extract(c_args),
SubCommand::Verify(c_args) => verify(c_args),
});
result = result.and_then(|_| run(args.command));
if let Err(e) = result {
eprintln!("Failed: {}", e);
if let Some(source) = e.source() {
@ -227,499 +131,3 @@ fn main() {
std::process::exit(1);
}
}
fn print_header(header: &DiscHeader, meta: &DiscMeta) {
println!("Format: {}", meta.format);
if meta.compression != Compression::None {
println!("Compression: {}", meta.compression);
}
if let Some(block_size) = meta.block_size {
println!("Block size: {}", Size::from_bytes(block_size));
}
println!("Lossless: {}", meta.lossless);
println!(
"Verification data: {}",
meta.crc32.is_some()
|| meta.md5.is_some()
|| meta.sha1.is_some()
|| meta.xxhash64.is_some()
);
println!();
println!("Title: {}", header.game_title_str());
println!("Game ID: {}", header.game_id_str());
println!("Disc {}, Revision {}", header.disc_num + 1, header.disc_version);
if header.no_partition_hashes != 0 {
println!("[!] Disc has no hashes");
}
if header.no_partition_encryption != 0 {
println!("[!] Disc is not encrypted");
}
}
fn info(args: InfoArgs) -> Result<()> {
for file in &args.file {
info_file(file)?;
}
Ok(())
}
fn info_file(path: &Path) -> Result<()> {
log::info!("Loading {}", display(path));
let disc = Disc::new_with_options(path, &OpenOptions {
rebuild_encryption: false,
validate_hashes: false,
})?;
let header = disc.header();
let meta = disc.meta();
print_header(header, &meta);
if header.is_wii() {
for (idx, info) in disc.partitions().iter().enumerate() {
println!();
println!("Partition {}", idx);
println!("\tType: {}", info.kind);
let offset = info.start_sector as u64 * SECTOR_SIZE as u64;
println!("\tStart sector: {} (offset {:#X})", info.start_sector, offset);
let data_size =
(info.data_end_sector - info.data_start_sector) as u64 * SECTOR_SIZE as u64;
println!(
"\tData offset / size: {:#X} / {:#X} ({})",
info.data_start_sector as u64 * SECTOR_SIZE as u64,
data_size,
Size::from_bytes(data_size)
);
println!(
"\tTMD offset / size: {:#X} / {:#X}",
offset + info.header.tmd_off(),
info.header.tmd_size()
);
println!(
"\tCert offset / size: {:#X} / {:#X}",
offset + info.header.cert_chain_off(),
info.header.cert_chain_size()
);
println!(
"\tH3 offset / size: {:#X} / {:#X}",
offset + info.header.h3_table_off(),
info.header.h3_table_size()
);
let mut partition = disc.open_partition(idx)?;
let meta = partition.meta()?;
let tmd = meta.tmd_header();
let title_id_str = if let Some(tmd) = tmd {
format!(
"{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}",
tmd.title_id[0],
tmd.title_id[1],
tmd.title_id[2],
tmd.title_id[3],
tmd.title_id[4],
tmd.title_id[5],
tmd.title_id[6],
tmd.title_id[7]
)
} else {
"N/A".to_string()
};
println!("\tTitle: {}", info.disc_header.game_title_str());
println!("\tGame ID: {} ({})", info.disc_header.game_id_str(), title_id_str);
println!(
"\tDisc {}, Revision {}",
info.disc_header.disc_num + 1,
info.disc_header.disc_version
);
}
} else if header.is_gamecube() {
// TODO
} else {
println!(
"Invalid GC/Wii magic: {:#010X}/{:#010X}",
header.gcn_magic.get(),
header.wii_magic.get()
);
}
println!();
Ok(())
}
fn convert(args: ConvertArgs) -> Result<()> {
convert_and_verify(&args.file, Some(&args.out), args.md5)
}
fn verify(args: VerifyArgs) -> Result<()> {
for file in &args.file {
convert_and_verify(file, None, args.md5)?;
println!();
}
Ok(())
}
fn convert_and_verify(in_file: &Path, out_file: Option<&Path>, md5: bool) -> Result<()> {
println!("Loading {}", display(in_file));
let mut disc = Disc::new_with_options(in_file, &OpenOptions {
rebuild_encryption: true,
validate_hashes: false,
})?;
let header = disc.header();
let meta = disc.meta();
print_header(header, &meta);
let disc_size = disc.disc_size();
let mut file = if let Some(out_file) = out_file {
Some(
File::create(out_file)
.with_context(|| format!("Creating file {}", display(out_file)))?,
)
} else {
None
};
if out_file.is_some() {
println!("\nConverting...");
} else {
println!("\nVerifying...");
}
let pb = ProgressBar::new(disc_size);
pb.set_style(ProgressStyle::with_template("{spinner:.green} [{elapsed_precise}] [{wide_bar:.cyan/blue}] {bytes}/{total_bytes} ({bytes_per_sec}, {eta})")
.unwrap()
.with_key("eta", |state: &ProgressState, w: &mut dyn fmt::Write| {
write!(w, "{:.1}s", state.eta().as_secs_f64()).unwrap()
})
.progress_chars("#>-"));
const BUFFER_SIZE: usize = 1015808; // LCM(0x8000, 0x7C00)
let digest_threads = if md5 {
vec![
digest_thread::<crc32fast::Hasher>(),
digest_thread::<md5::Md5>(),
digest_thread::<sha1::Sha1>(),
digest_thread::<xxhash_rust::xxh64::Xxh64>(),
]
} else {
vec![
digest_thread::<crc32fast::Hasher>(),
digest_thread::<sha1::Sha1>(),
digest_thread::<xxhash_rust::xxh64::Xxh64>(),
]
};
let (w_tx, w_rx) = sync_channel::<Arc<[u8]>>(1);
let w_thread = thread::spawn(move || {
let mut total_written = 0u64;
while let Ok(data) = w_rx.recv() {
if let Some(file) = &mut file {
file.write_all(data.as_ref())
.with_context(|| {
format!("Writing {} bytes at offset {}", data.len(), total_written)
})
.unwrap();
}
total_written += data.len() as u64;
pb.set_position(total_written);
}
if let Some(mut file) = file {
file.flush().context("Flushing output file").unwrap();
}
pb.finish();
});
let mut total_read = 0u64;
let mut buf = <u8>::new_box_slice_zeroed(BUFFER_SIZE);
while total_read < disc_size {
let read = min(BUFFER_SIZE as u64, disc_size - total_read) as usize;
disc.read_exact(&mut buf[..read]).with_context(|| {
format!("Reading {} bytes at disc offset {}", BUFFER_SIZE, total_read)
})?;
let arc = Arc::<[u8]>::from(&buf[..read]);
for (tx, _) in &digest_threads {
tx.send(arc.clone()).map_err(|_| "Sending data to hash thread")?;
}
w_tx.send(arc).map_err(|_| "Sending data to write thread")?;
total_read += read as u64;
}
drop(w_tx); // Close channel
w_thread.join().unwrap();
println!();
if let Some(path) = out_file {
println!("Wrote {} to {}", Size::from_bytes(total_read), display(path));
}
println!();
let mut crc32 = None;
let mut md5 = None;
let mut sha1 = None;
let mut xxh64 = None;
for (tx, handle) in digest_threads {
drop(tx); // Close channel
match handle.join().unwrap() {
DigestResult::Crc32(v) => crc32 = Some(v),
DigestResult::Md5(v) => md5 = Some(v),
DigestResult::Sha1(v) => sha1 = Some(v),
DigestResult::Xxh64(v) => xxh64 = Some(v),
}
}
let redump_entry = crc32.and_then(redump::find_by_crc32);
let expected_crc32 = meta.crc32.or(redump_entry.as_ref().map(|e| e.crc32));
let expected_md5 = meta.md5.or(redump_entry.as_ref().map(|e| e.md5));
let expected_sha1 = meta.sha1.or(redump_entry.as_ref().map(|e| e.sha1));
let expected_xxh64 = meta.xxhash64;
fn print_digest(value: DigestResult, expected: Option<DigestResult>) {
print!("{:<6}: ", value.name());
if let Some(expected) = expected {
if expected != value {
print!("{} ❌ (expected: {})", value, expected);
} else {
print!("{}", value);
}
} else {
print!("{}", value);
}
println!();
}
if let Some(entry) = &redump_entry {
let mut full_match = true;
if let Some(md5) = md5 {
if entry.md5 != md5 {
full_match = false;
}
}
if let Some(sha1) = sha1 {
if entry.sha1 != sha1 {
full_match = false;
}
}
if full_match {
println!("Redump: {}", entry.name);
} else {
println!("Redump: {} ❓ (partial match)", entry.name);
}
} else {
println!("Redump: Not found ❌");
}
if let Some(crc32) = crc32 {
print_digest(DigestResult::Crc32(crc32), expected_crc32.map(DigestResult::Crc32));
}
if let Some(md5) = md5 {
print_digest(DigestResult::Md5(md5), expected_md5.map(DigestResult::Md5));
}
if let Some(sha1) = sha1 {
print_digest(DigestResult::Sha1(sha1), expected_sha1.map(DigestResult::Sha1));
}
if let Some(xxh64) = xxh64 {
print_digest(DigestResult::Xxh64(xxh64), expected_xxh64.map(DigestResult::Xxh64));
}
Ok(())
}
pub fn has_extension(filename: &Path, extension: &str) -> bool {
match filename.extension() {
Some(ext) => ext.eq_ignore_ascii_case(extension),
None => false,
}
}
fn extract(args: ExtractArgs) -> Result<()> {
let output_dir: PathBuf;
if let Some(dir) = args.out {
output_dir = dir;
} else if has_extension(&args.file, "nfs") {
// Special logic to extract from content/hif_*.nfs to extracted/..
if let Some(parent) = args.file.parent() {
output_dir = parent.with_file_name("extracted");
} else {
output_dir = args.file.with_extension("");
}
} else {
output_dir = args.file.with_extension("");
}
let disc = Disc::new_with_options(&args.file, &OpenOptions {
rebuild_encryption: false,
validate_hashes: args.validate,
})?;
let header = disc.header();
let is_wii = header.is_wii();
if let Some(partition) = args.partition {
if partition.eq_ignore_ascii_case("all") {
for info in disc.partitions() {
let mut out_dir = output_dir.clone();
out_dir.push(info.kind.dir_name().as_ref());
let mut partition = disc.open_partition(info.index)?;
extract_partition(header, partition.as_mut(), &out_dir, is_wii, args.quiet)?;
}
} else if partition.eq_ignore_ascii_case("data") {
let mut partition = disc.open_partition_kind(PartitionKind::Data)?;
extract_partition(header, partition.as_mut(), &output_dir, is_wii, args.quiet)?;
} else if partition.eq_ignore_ascii_case("update") {
let mut partition = disc.open_partition_kind(PartitionKind::Update)?;
extract_partition(header, partition.as_mut(), &output_dir, is_wii, args.quiet)?;
} else if partition.eq_ignore_ascii_case("channel") {
let mut partition = disc.open_partition_kind(PartitionKind::Channel)?;
extract_partition(header, partition.as_mut(), &output_dir, is_wii, args.quiet)?;
} else {
let idx = partition.parse::<usize>().map_err(|_| "Invalid partition index")?;
let mut partition = disc.open_partition(idx)?;
extract_partition(header, partition.as_mut(), &output_dir, is_wii, args.quiet)?;
}
} else {
let mut partition = disc.open_partition_kind(PartitionKind::Data)?;
extract_partition(header, partition.as_mut(), &output_dir, is_wii, args.quiet)?;
}
Ok(())
}
fn extract_partition(
header: &DiscHeader,
partition: &mut dyn PartitionBase,
out_dir: &Path,
is_wii: bool,
quiet: bool,
) -> Result<()> {
let meta = partition.meta()?;
extract_sys_files(header, meta.as_ref(), out_dir, quiet)?;
// Extract FST
let files_dir = out_dir.join("files");
fs::create_dir_all(&files_dir)
.with_context(|| format!("Creating directory {}", display(&files_dir)))?;
let fst = Fst::new(&meta.raw_fst)?;
let mut path_segments = Vec::<(Cow<str>, usize)>::new();
for (idx, node, name) in fst.iter() {
// Remove ended path segments
let mut new_size = 0;
for (_, end) in path_segments.iter() {
if *end == idx {
break;
}
new_size += 1;
}
path_segments.truncate(new_size);
// Add the new path segment
let end = if node.is_dir() { node.length() as usize } else { idx + 1 };
path_segments.push((name?, end));
let path = path_segments.iter().map(|(name, _)| name.as_ref()).join("/");
if node.is_dir() {
fs::create_dir_all(files_dir.join(&path))
.with_context(|| format!("Creating directory {}", path))?;
} else {
extract_node(node, partition, &files_dir, &path, is_wii, quiet)?;
}
}
Ok(())
}
fn extract_sys_files(
header: &DiscHeader,
data: &PartitionMeta,
out_dir: &Path,
quiet: bool,
) -> Result<()> {
let sys_dir = out_dir.join("sys");
fs::create_dir_all(&sys_dir)
.with_context(|| format!("Creating directory {}", display(&sys_dir)))?;
extract_file(data.raw_boot.as_ref(), &sys_dir.join("boot.bin"), quiet)?;
extract_file(data.raw_bi2.as_ref(), &sys_dir.join("bi2.bin"), quiet)?;
extract_file(data.raw_apploader.as_ref(), &sys_dir.join("apploader.img"), quiet)?;
extract_file(data.raw_fst.as_ref(), &sys_dir.join("fst.bin"), quiet)?;
extract_file(data.raw_dol.as_ref(), &sys_dir.join("main.dol"), quiet)?;
// Wii files
if header.is_wii() {
let disc_dir = out_dir.join("disc");
fs::create_dir_all(&disc_dir)
.with_context(|| format!("Creating directory {}", display(&disc_dir)))?;
extract_file(&header.as_bytes()[..0x100], &disc_dir.join("header.bin"), quiet)?;
}
if let Some(ticket) = data.raw_ticket.as_deref() {
extract_file(ticket, &out_dir.join("ticket.bin"), quiet)?;
}
if let Some(tmd) = data.raw_tmd.as_deref() {
extract_file(tmd, &out_dir.join("tmd.bin"), quiet)?;
}
if let Some(cert_chain) = data.raw_cert_chain.as_deref() {
extract_file(cert_chain, &out_dir.join("cert.bin"), quiet)?;
}
if let Some(h3_table) = data.raw_h3_table.as_deref() {
extract_file(h3_table, &out_dir.join("h3.bin"), quiet)?;
}
Ok(())
}
fn extract_file(bytes: &[u8], out_path: &Path, quiet: bool) -> Result<()> {
if !quiet {
println!(
"Extracting {} (size: {})",
display(out_path),
Size::from_bytes(bytes.len()).format().with_base(Base::Base10)
);
}
fs::write(out_path, bytes).with_context(|| format!("Writing file {}", display(out_path)))?;
Ok(())
}
fn extract_node(
node: &Node,
partition: &mut dyn PartitionBase,
base_path: &Path,
name: &str,
is_wii: bool,
quiet: bool,
) -> Result<()> {
let file_path = base_path.join(name);
if !quiet {
println!(
"Extracting {} (size: {})",
display(&file_path),
Size::from_bytes(node.length()).format().with_base(Base::Base10)
);
}
let file = File::create(&file_path)
.with_context(|| format!("Creating file {}", display(&file_path)))?;
let mut w = BufWriter::with_capacity(partition.ideal_buffer_size(), file);
let mut r = partition.open_file(node).with_context(|| {
format!(
"Opening file {} on disc for reading (offset {}, size {})",
name,
node.offset(is_wii),
node.length()
)
})?;
io::copy(&mut r, &mut w).with_context(|| format!("Extracting file {}", display(&file_path)))?;
w.flush().with_context(|| format!("Flushing file {}", display(&file_path)))?;
Ok(())
}
fn display(path: &Path) -> PathDisplay { PathDisplay { path } }
struct PathDisplay<'a> {
path: &'a Path,
}
impl<'a> fmt::Display for PathDisplay<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use fmt::Write;
let mut first = true;
for segment in self.path.iter() {
let segment_str = segment.to_string_lossy();
if segment_str == "." {
continue;
}
if first {
first = false;
} else {
f.write_char(MAIN_SEPARATOR)?;
}
f.write_str(&segment_str)?;
}
Ok(())
}
}

41
nodtool/src/util/mod.rs Normal file
View File

@ -0,0 +1,41 @@
pub mod digest;
pub mod redump;
pub mod shared;
use std::{
fmt,
fmt::Write,
path::{Path, MAIN_SEPARATOR},
};
pub fn display(path: &Path) -> PathDisplay { PathDisplay { path } }
pub struct PathDisplay<'a> {
path: &'a Path,
}
impl<'a> fmt::Display for PathDisplay<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut first = true;
for segment in self.path.iter() {
let segment_str = segment.to_string_lossy();
if segment_str == "." {
continue;
}
if first {
first = false;
} else {
f.write_char(MAIN_SEPARATOR)?;
}
f.write_str(&segment_str)?;
}
Ok(())
}
}
pub fn has_extension(filename: &Path, extension: &str) -> bool {
match filename.extension() {
Some(ext) => ext.eq_ignore_ascii_case(extension),
None => false,
}
}

View File

@ -1,6 +1,6 @@
use std::{mem::size_of, str};
use nod::{array_ref, SECTOR_SIZE};
use nod::array_ref;
use zerocopy::{FromBytes, FromZeroes};
#[derive(Clone, Debug)]
@ -9,7 +9,6 @@ pub struct GameResult {
pub crc32: u32,
pub md5: [u8; 16],
pub sha1: [u8; 20],
pub size: u64,
}
pub fn find_by_crc32(crc32: u32) -> Option<GameResult> {
@ -30,13 +29,7 @@ pub fn find_by_crc32(crc32: u32) -> Option<GameResult> {
let offset = entry.string_table_offset as usize;
let name_size = u32::from_ne_bytes(*array_ref![string_table, offset, 4]) as usize;
let name = str::from_utf8(&string_table[offset + 4..offset + 4 + name_size]).unwrap();
Some(GameResult {
name,
crc32: entry.crc32,
md5: entry.md5,
sha1: entry.sha1,
size: entry.sectors as u64 * SECTOR_SIZE as u64,
})
Some(GameResult { name, crc32: entry.crc32, md5: entry.md5, sha1: entry.sha1 })
}
#[repr(C, align(4))]

210
nodtool/src/util/shared.rs Normal file
View File

@ -0,0 +1,210 @@
use std::{
cmp::min,
fmt,
fs::File,
io::{Read, Write},
path::Path,
sync::{mpsc::sync_channel, Arc},
thread,
};
use indicatif::{ProgressBar, ProgressState, ProgressStyle};
use nod::{Compression, Disc, DiscHeader, DiscMeta, OpenOptions, Result, ResultContext};
use size::Size;
use zerocopy::FromZeroes;
use crate::util::{
digest::{digest_thread, DigestResult},
display, redump,
};
pub fn print_header(header: &DiscHeader, meta: &DiscMeta) {
println!("Format: {}", meta.format);
if meta.compression != Compression::None {
println!("Compression: {}", meta.compression);
}
if let Some(block_size) = meta.block_size {
println!("Block size: {}", Size::from_bytes(block_size));
}
println!("Lossless: {}", meta.lossless);
println!(
"Verification data: {}",
meta.crc32.is_some()
|| meta.md5.is_some()
|| meta.sha1.is_some()
|| meta.xxhash64.is_some()
);
println!();
println!("Title: {}", header.game_title_str());
println!("Game ID: {}", header.game_id_str());
println!("Disc {}, Revision {}", header.disc_num + 1, header.disc_version);
if header.no_partition_hashes != 0 {
println!("[!] Disc has no hashes");
}
if header.no_partition_encryption != 0 {
println!("[!] Disc is not encrypted");
}
}
pub fn convert_and_verify(in_file: &Path, out_file: Option<&Path>, md5: bool) -> Result<()> {
println!("Loading {}", display(in_file));
let mut disc = Disc::new_with_options(in_file, &OpenOptions {
rebuild_encryption: true,
validate_hashes: false,
})?;
let header = disc.header();
let meta = disc.meta();
print_header(header, &meta);
let disc_size = disc.disc_size();
let mut file = if let Some(out_file) = out_file {
Some(
File::create(out_file)
.with_context(|| format!("Creating file {}", display(out_file)))?,
)
} else {
None
};
if out_file.is_some() {
println!("\nConverting...");
} else {
println!("\nVerifying...");
}
let pb = ProgressBar::new(disc_size);
pb.set_style(ProgressStyle::with_template("{spinner:.green} [{elapsed_precise}] [{wide_bar:.cyan/blue}] {bytes}/{total_bytes} ({bytes_per_sec}, {eta})")
.unwrap()
.with_key("eta", |state: &ProgressState, w: &mut dyn fmt::Write| {
write!(w, "{:.1}s", state.eta().as_secs_f64()).unwrap()
})
.progress_chars("#>-"));
const BUFFER_SIZE: usize = 1015808; // LCM(0x8000, 0x7C00)
let digest_threads = if md5 {
vec![
digest_thread::<crc32fast::Hasher>(),
digest_thread::<md5::Md5>(),
digest_thread::<sha1::Sha1>(),
digest_thread::<xxhash_rust::xxh64::Xxh64>(),
]
} else {
vec![
digest_thread::<crc32fast::Hasher>(),
digest_thread::<sha1::Sha1>(),
digest_thread::<xxhash_rust::xxh64::Xxh64>(),
]
};
let (w_tx, w_rx) = sync_channel::<Arc<[u8]>>(1);
let w_thread = thread::spawn(move || {
let mut total_written = 0u64;
while let Ok(data) = w_rx.recv() {
if let Some(file) = &mut file {
file.write_all(data.as_ref())
.with_context(|| {
format!("Writing {} bytes at offset {}", data.len(), total_written)
})
.unwrap();
}
total_written += data.len() as u64;
pb.set_position(total_written);
}
if let Some(mut file) = file {
file.flush().context("Flushing output file").unwrap();
}
pb.finish();
});
let mut total_read = 0u64;
let mut buf = <u8>::new_box_slice_zeroed(BUFFER_SIZE);
while total_read < disc_size {
let read = min(BUFFER_SIZE as u64, disc_size - total_read) as usize;
disc.read_exact(&mut buf[..read]).with_context(|| {
format!("Reading {} bytes at disc offset {}", BUFFER_SIZE, total_read)
})?;
let arc = Arc::<[u8]>::from(&buf[..read]);
for (tx, _) in &digest_threads {
tx.send(arc.clone()).map_err(|_| "Sending data to hash thread")?;
}
w_tx.send(arc).map_err(|_| "Sending data to write thread")?;
total_read += read as u64;
}
drop(w_tx); // Close channel
w_thread.join().unwrap();
println!();
if let Some(path) = out_file {
println!("Wrote {} to {}", Size::from_bytes(total_read), display(path));
}
println!();
let mut crc32 = None;
let mut md5 = None;
let mut sha1 = None;
let mut xxh64 = None;
for (tx, handle) in digest_threads {
drop(tx); // Close channel
match handle.join().unwrap() {
DigestResult::Crc32(v) => crc32 = Some(v),
DigestResult::Md5(v) => md5 = Some(v),
DigestResult::Sha1(v) => sha1 = Some(v),
DigestResult::Xxh64(v) => xxh64 = Some(v),
}
}
let redump_entry = crc32.and_then(redump::find_by_crc32);
let expected_crc32 = meta.crc32.or(redump_entry.as_ref().map(|e| e.crc32));
let expected_md5 = meta.md5.or(redump_entry.as_ref().map(|e| e.md5));
let expected_sha1 = meta.sha1.or(redump_entry.as_ref().map(|e| e.sha1));
let expected_xxh64 = meta.xxhash64;
fn print_digest(value: DigestResult, expected: Option<DigestResult>) {
print!("{:<6}: ", value.name());
if let Some(expected) = expected {
if expected != value {
print!("{} ❌ (expected: {})", value, expected);
} else {
print!("{}", value);
}
} else {
print!("{}", value);
}
println!();
}
if let Some(entry) = &redump_entry {
let mut full_match = true;
if let Some(md5) = md5 {
if entry.md5 != md5 {
full_match = false;
}
}
if let Some(sha1) = sha1 {
if entry.sha1 != sha1 {
full_match = false;
}
}
if full_match {
println!("Redump: {}", entry.name);
} else {
println!("Redump: {} ❓ (partial match)", entry.name);
}
} else {
println!("Redump: Not found ❌");
}
if let Some(crc32) = crc32 {
print_digest(DigestResult::Crc32(crc32), expected_crc32.map(DigestResult::Crc32));
}
if let Some(md5) = md5 {
print_digest(DigestResult::Md5(md5), expected_md5.map(DigestResult::Md5));
}
if let Some(sha1) = sha1 {
print_digest(DigestResult::Sha1(sha1), expected_sha1.map(DigestResult::Sha1));
}
if let Some(xxh64) = xxh64 {
print_digest(DigestResult::Xxh64(xxh64), expected_xxh64.map(DigestResult::Xxh64));
}
Ok(())
}

View File

@ -6,4 +6,3 @@ reorder_impl_items = true
use_field_init_shorthand = true
use_small_heuristics = "Max"
where_single_line = true
format_code_in_doc_comments = true