mirror of
https://github.com/encounter/nod-rs.git
synced 2025-12-14 07:36:16 +00:00
Restore all functionality, split lib/bin & integrate redump validation
This commit is contained in:
39
nod/Cargo.toml
Normal file
39
nod/Cargo.toml
Normal file
@@ -0,0 +1,39 @@
|
||||
[package]
|
||||
name = "nod"
|
||||
version = "0.2.0"
|
||||
edition = "2021"
|
||||
rust-version = "1.59.0"
|
||||
authors = ["Luke Street <luke@street.dev>"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
repository = "https://github.com/encounter/nod-rs"
|
||||
documentation = "https://docs.rs/nod"
|
||||
readme = "../README.md"
|
||||
description = """
|
||||
Library for reading GameCube and Wii disc images.
|
||||
"""
|
||||
keywords = ["gamecube", "wii", "iso", "wbfs", "rvz"]
|
||||
categories = ["command-line-utilities", "parser-implementations"]
|
||||
|
||||
[features]
|
||||
default = ["compress-bzip2", "compress-lzma", "compress-zstd"]
|
||||
asm = ["sha1/asm"]
|
||||
compress-bzip2 = ["bzip2"]
|
||||
compress-lzma = ["liblzma"]
|
||||
compress-zstd = ["zstd"]
|
||||
|
||||
[dependencies]
|
||||
aes = "0.8.4"
|
||||
base16ct = "0.2.0"
|
||||
bzip2 = { version = "0.4.4", features = ["static"], optional = true }
|
||||
cbc = "0.1.2"
|
||||
digest = "0.10.7"
|
||||
dyn-clone = "1.0.16"
|
||||
encoding_rs = "0.8.33"
|
||||
itertools = "0.12.1"
|
||||
liblzma = { version = "0.2.3", features = ["static"], optional = true }
|
||||
log = "0.4.20"
|
||||
rayon = "1.8.1"
|
||||
sha1 = "0.10.6"
|
||||
thiserror = "1.0.57"
|
||||
zerocopy = { version = "0.7.32", features = ["alloc", "derive"] }
|
||||
zstd = { version = "0.13.0", optional = true }
|
||||
202
nod/src/disc/gcn.rs
Normal file
202
nod/src/disc/gcn.rs
Normal file
@@ -0,0 +1,202 @@
|
||||
use std::{
|
||||
cmp::min,
|
||||
io,
|
||||
io::{Read, Seek, SeekFrom},
|
||||
mem::size_of,
|
||||
};
|
||||
|
||||
use zerocopy::{FromBytes, FromZeroes};
|
||||
|
||||
use crate::{
|
||||
disc::{
|
||||
AppLoaderHeader, DiscHeader, DolHeader, PartitionBase, PartitionHeader, PartitionMeta,
|
||||
BI2_SIZE, BOOT_SIZE, SECTOR_SIZE,
|
||||
},
|
||||
fst::{Node, NodeKind},
|
||||
io::block::{Block, BlockIO},
|
||||
streams::{ReadStream, SharedWindowedReadStream},
|
||||
util::read::{read_box, read_box_slice, read_vec},
|
||||
Result, ResultContext,
|
||||
};
|
||||
|
||||
pub struct PartitionGC {
|
||||
io: Box<dyn BlockIO>,
|
||||
block: Option<Block>,
|
||||
block_buf: Box<[u8]>,
|
||||
block_idx: u32,
|
||||
sector_buf: Box<[u8; SECTOR_SIZE]>,
|
||||
sector: u32,
|
||||
pos: u64,
|
||||
disc_header: Box<DiscHeader>,
|
||||
}
|
||||
|
||||
impl Clone for PartitionGC {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
io: self.io.clone(),
|
||||
block: None,
|
||||
block_buf: <u8>::new_box_slice_zeroed(self.block_buf.len()),
|
||||
block_idx: u32::MAX,
|
||||
sector_buf: <[u8; SECTOR_SIZE]>::new_box_zeroed(),
|
||||
sector: u32::MAX,
|
||||
pos: 0,
|
||||
disc_header: self.disc_header.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PartitionGC {
|
||||
pub fn new(inner: Box<dyn BlockIO>, disc_header: Box<DiscHeader>) -> Result<Box<Self>> {
|
||||
let block_size = inner.block_size();
|
||||
Ok(Box::new(Self {
|
||||
io: inner,
|
||||
block: None,
|
||||
block_buf: <u8>::new_box_slice_zeroed(block_size as usize),
|
||||
block_idx: u32::MAX,
|
||||
sector_buf: <[u8; SECTOR_SIZE]>::new_box_zeroed(),
|
||||
sector: u32::MAX,
|
||||
pos: 0,
|
||||
disc_header,
|
||||
}))
|
||||
}
|
||||
|
||||
pub fn into_inner(self) -> Box<dyn BlockIO> { self.io }
|
||||
}
|
||||
|
||||
impl Read for PartitionGC {
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
let sector = (self.pos / SECTOR_SIZE as u64) as u32;
|
||||
let block_idx = (sector as u64 * SECTOR_SIZE as u64 / self.block_buf.len() as u64) as u32;
|
||||
|
||||
// Read new block if necessary
|
||||
if block_idx != self.block_idx {
|
||||
self.block = self.io.read_block(self.block_buf.as_mut(), block_idx, None)?;
|
||||
self.block_idx = block_idx;
|
||||
}
|
||||
|
||||
// Copy sector if necessary
|
||||
if sector != self.sector {
|
||||
let Some(block) = &self.block else {
|
||||
return Ok(0);
|
||||
};
|
||||
block.copy_raw(
|
||||
self.sector_buf.as_mut(),
|
||||
self.block_buf.as_ref(),
|
||||
block_idx,
|
||||
sector,
|
||||
&self.disc_header,
|
||||
)?;
|
||||
self.sector = sector;
|
||||
}
|
||||
|
||||
let offset = (self.pos % SECTOR_SIZE as u64) as usize;
|
||||
let len = min(buf.len(), SECTOR_SIZE - offset);
|
||||
buf[..len].copy_from_slice(&self.sector_buf[offset..offset + len]);
|
||||
self.pos += len as u64;
|
||||
Ok(len)
|
||||
}
|
||||
}
|
||||
|
||||
impl Seek for PartitionGC {
|
||||
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
|
||||
self.pos = match pos {
|
||||
SeekFrom::Start(v) => v,
|
||||
SeekFrom::End(_) => {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Unsupported,
|
||||
"GCPartitionReader: SeekFrom::End is not supported".to_string(),
|
||||
));
|
||||
}
|
||||
SeekFrom::Current(v) => self.pos.saturating_add_signed(v),
|
||||
};
|
||||
Ok(self.pos)
|
||||
}
|
||||
}
|
||||
|
||||
impl PartitionBase for PartitionGC {
|
||||
fn meta(&mut self) -> Result<Box<PartitionMeta>> {
|
||||
self.seek(SeekFrom::Start(0)).context("Seeking to partition metadata")?;
|
||||
read_part_meta(self, false)
|
||||
}
|
||||
|
||||
fn open_file(&mut self, node: &Node) -> io::Result<SharedWindowedReadStream> {
|
||||
assert_eq!(node.kind(), NodeKind::File);
|
||||
self.new_window(node.offset(false), node.length(false))
|
||||
}
|
||||
|
||||
fn ideal_buffer_size(&self) -> usize { SECTOR_SIZE }
|
||||
}
|
||||
|
||||
pub(crate) fn read_part_meta(
|
||||
reader: &mut dyn ReadStream,
|
||||
is_wii: bool,
|
||||
) -> Result<Box<PartitionMeta>> {
|
||||
// boot.bin
|
||||
let raw_boot: Box<[u8; BOOT_SIZE]> = read_box(reader).context("Reading boot.bin")?;
|
||||
let partition_header = PartitionHeader::ref_from(&raw_boot[size_of::<DiscHeader>()..]).unwrap();
|
||||
|
||||
// bi2.bin
|
||||
let raw_bi2: Box<[u8; BI2_SIZE]> = read_box(reader).context("Reading bi2.bin")?;
|
||||
|
||||
// apploader.bin
|
||||
let mut raw_apploader: Vec<u8> =
|
||||
read_vec(reader, size_of::<AppLoaderHeader>()).context("Reading apploader header")?;
|
||||
let apploader_header = AppLoaderHeader::ref_from(raw_apploader.as_slice()).unwrap();
|
||||
raw_apploader.resize(
|
||||
size_of::<AppLoaderHeader>()
|
||||
+ apploader_header.size.get() as usize
|
||||
+ apploader_header.trailer_size.get() as usize,
|
||||
0,
|
||||
);
|
||||
reader
|
||||
.read_exact(&mut raw_apploader[size_of::<AppLoaderHeader>()..])
|
||||
.context("Reading apploader")?;
|
||||
|
||||
// fst.bin
|
||||
reader
|
||||
.seek(SeekFrom::Start(partition_header.fst_off(is_wii)))
|
||||
.context("Seeking to FST offset")?;
|
||||
let raw_fst: Box<[u8]> = read_box_slice(reader, partition_header.fst_sz(is_wii) as usize)
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Reading partition FST (offset {}, size {})",
|
||||
partition_header.fst_off, partition_header.fst_sz
|
||||
)
|
||||
})?;
|
||||
|
||||
// main.dol
|
||||
reader
|
||||
.seek(SeekFrom::Start(partition_header.dol_off(is_wii)))
|
||||
.context("Seeking to DOL offset")?;
|
||||
let mut raw_dol: Vec<u8> =
|
||||
read_vec(reader, size_of::<DolHeader>()).context("Reading DOL header")?;
|
||||
let dol_header = DolHeader::ref_from(raw_dol.as_slice()).unwrap();
|
||||
let dol_size = dol_header
|
||||
.text_offs
|
||||
.iter()
|
||||
.zip(&dol_header.text_sizes)
|
||||
.map(|(offs, size)| offs.get() + size.get())
|
||||
.chain(
|
||||
dol_header
|
||||
.data_offs
|
||||
.iter()
|
||||
.zip(&dol_header.data_sizes)
|
||||
.map(|(offs, size)| offs.get() + size.get()),
|
||||
)
|
||||
.max()
|
||||
.unwrap_or(size_of::<DolHeader>() as u32);
|
||||
raw_dol.resize(dol_size as usize, 0);
|
||||
reader.read_exact(&mut raw_dol[size_of::<DolHeader>()..]).context("Reading DOL")?;
|
||||
|
||||
Ok(Box::new(PartitionMeta {
|
||||
raw_boot,
|
||||
raw_bi2,
|
||||
raw_apploader: raw_apploader.into_boxed_slice(),
|
||||
raw_fst,
|
||||
raw_dol: raw_dol.into_boxed_slice(),
|
||||
raw_ticket: None,
|
||||
raw_tmd: None,
|
||||
raw_cert_chain: None,
|
||||
raw_h3_table: None,
|
||||
}))
|
||||
}
|
||||
203
nod/src/disc/hashes.rs
Normal file
203
nod/src/disc/hashes.rs
Normal file
@@ -0,0 +1,203 @@
|
||||
use std::{
|
||||
io::{Read, Seek, SeekFrom},
|
||||
sync::{Arc, Mutex},
|
||||
time::Instant,
|
||||
};
|
||||
|
||||
use rayon::iter::{IntoParallelIterator, ParallelIterator};
|
||||
use sha1::{Digest, Sha1};
|
||||
use zerocopy::FromZeroes;
|
||||
|
||||
use crate::{
|
||||
array_ref, array_ref_mut,
|
||||
disc::{
|
||||
reader::DiscReader,
|
||||
wii::{HASHES_SIZE, SECTOR_DATA_SIZE},
|
||||
},
|
||||
io::HashBytes,
|
||||
util::read::read_box_slice,
|
||||
OpenOptions, Result, ResultContext, SECTOR_SIZE,
|
||||
};
|
||||
|
||||
/// In a sector, following the 0x400 byte block of hashes, each 0x400 bytes of decrypted data is
|
||||
/// hashed, yielding 31 H0 hashes.
|
||||
/// Then, 8 sectors are aggregated into a subgroup, and the 31 H0 hashes for each sector are hashed,
|
||||
/// yielding 8 H1 hashes.
|
||||
/// Then, 8 subgroups are aggregated into a group, and the 8 H1 hashes for each subgroup are hashed,
|
||||
/// yielding 8 H2 hashes.
|
||||
/// Finally, the 8 H2 hashes for each group are hashed, yielding 1 H3 hash.
|
||||
/// The H3 hashes for each group are stored in the partition's H3 table.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct HashTable {
|
||||
/// SHA-1 hash of each 0x400 byte block of decrypted data.
|
||||
pub h0_hashes: Box<[HashBytes]>,
|
||||
/// SHA-1 hash of the 31 H0 hashes for each sector.
|
||||
pub h1_hashes: Box<[HashBytes]>,
|
||||
/// SHA-1 hash of the 8 H1 hashes for each subgroup.
|
||||
pub h2_hashes: Box<[HashBytes]>,
|
||||
/// SHA-1 hash of the 8 H2 hashes for each group.
|
||||
pub h3_hashes: Box<[HashBytes]>,
|
||||
}
|
||||
|
||||
#[derive(Clone, FromZeroes)]
|
||||
struct HashResult {
|
||||
h0_hashes: [HashBytes; 1984],
|
||||
h1_hashes: [HashBytes; 64],
|
||||
h2_hashes: [HashBytes; 8],
|
||||
h3_hash: HashBytes,
|
||||
}
|
||||
|
||||
impl HashTable {
|
||||
fn new(num_sectors: u32) -> Self {
|
||||
let num_sectors = num_sectors.next_multiple_of(64) as usize;
|
||||
let num_data_hashes = num_sectors * 31;
|
||||
let num_subgroups = num_sectors / 8;
|
||||
let num_groups = num_subgroups / 8;
|
||||
Self {
|
||||
h0_hashes: HashBytes::new_box_slice_zeroed(num_data_hashes),
|
||||
h1_hashes: HashBytes::new_box_slice_zeroed(num_sectors),
|
||||
h2_hashes: HashBytes::new_box_slice_zeroed(num_subgroups),
|
||||
h3_hashes: HashBytes::new_box_slice_zeroed(num_groups),
|
||||
}
|
||||
}
|
||||
|
||||
fn extend(&mut self, group_index: usize, result: &HashResult) {
|
||||
*array_ref_mut![self.h0_hashes, group_index * 1984, 1984] = result.h0_hashes;
|
||||
*array_ref_mut![self.h1_hashes, group_index * 64, 64] = result.h1_hashes;
|
||||
*array_ref_mut![self.h2_hashes, group_index * 8, 8] = result.h2_hashes;
|
||||
self.h3_hashes[group_index] = result.h3_hash;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn rebuild_hashes(reader: &mut DiscReader) -> Result<()> {
|
||||
const NUM_H0_HASHES: usize = SECTOR_DATA_SIZE / HASHES_SIZE;
|
||||
|
||||
log::info!(
|
||||
"Rebuilding hashes for Wii partition data (using {} threads)",
|
||||
rayon::current_num_threads()
|
||||
);
|
||||
|
||||
let start = Instant::now();
|
||||
|
||||
// Precompute hashes for zeroed sectors.
|
||||
const ZERO_H0_BYTES: &[u8] = &[0u8; HASHES_SIZE];
|
||||
let zero_h0_hash = hash_bytes(ZERO_H0_BYTES);
|
||||
let mut zero_h1_hash = Sha1::new();
|
||||
for _ in 0..NUM_H0_HASHES {
|
||||
zero_h1_hash.update(zero_h0_hash);
|
||||
}
|
||||
|
||||
let partitions = reader.partitions();
|
||||
let mut hash_tables = Vec::with_capacity(partitions.len());
|
||||
for part in partitions {
|
||||
let part_sectors = part.data_end_sector - part.data_start_sector;
|
||||
let hash_table = HashTable::new(part_sectors);
|
||||
log::debug!(
|
||||
"Rebuilding hashes: {} sectors, {} subgroups, {} groups",
|
||||
hash_table.h1_hashes.len(),
|
||||
hash_table.h2_hashes.len(),
|
||||
hash_table.h3_hashes.len()
|
||||
);
|
||||
|
||||
let group_count = hash_table.h3_hashes.len();
|
||||
let mutex = Arc::new(Mutex::new(hash_table));
|
||||
(0..group_count).into_par_iter().try_for_each_with(
|
||||
(reader.open_partition(part.index, &OpenOptions::default())?, mutex.clone()),
|
||||
|(stream, mutex), h3_index| -> Result<()> {
|
||||
let mut result = HashResult::new_box_zeroed();
|
||||
let mut data_buf = <u8>::new_box_slice_zeroed(SECTOR_DATA_SIZE);
|
||||
let mut h3_hasher = Sha1::new();
|
||||
for h2_index in 0..8 {
|
||||
let mut h2_hasher = Sha1::new();
|
||||
for h1_index in 0..8 {
|
||||
let sector = h1_index + h2_index * 8;
|
||||
let part_sector = sector as u32 + h3_index as u32 * 64;
|
||||
let mut h1_hasher = Sha1::new();
|
||||
if part_sector >= part_sectors {
|
||||
for h0_index in 0..NUM_H0_HASHES {
|
||||
result.h0_hashes[h0_index + sector * 31] = zero_h0_hash;
|
||||
h1_hasher.update(zero_h0_hash);
|
||||
}
|
||||
} else {
|
||||
stream
|
||||
.seek(SeekFrom::Start(part_sector as u64 * SECTOR_DATA_SIZE as u64))
|
||||
.with_context(|| format!("Seeking to sector {}", part_sector))?;
|
||||
stream
|
||||
.read_exact(&mut data_buf)
|
||||
.with_context(|| format!("Reading sector {}", part_sector))?;
|
||||
for h0_index in 0..NUM_H0_HASHES {
|
||||
let h0_hash = hash_bytes(array_ref![
|
||||
data_buf,
|
||||
h0_index * HASHES_SIZE,
|
||||
HASHES_SIZE
|
||||
]);
|
||||
result.h0_hashes[h0_index + sector * 31] = h0_hash;
|
||||
h1_hasher.update(h0_hash);
|
||||
}
|
||||
};
|
||||
let h1_hash = h1_hasher.finalize().into();
|
||||
result.h1_hashes[sector] = h1_hash;
|
||||
h2_hasher.update(h1_hash);
|
||||
}
|
||||
let h2_hash = h2_hasher.finalize().into();
|
||||
result.h2_hashes[h2_index] = h2_hash;
|
||||
h3_hasher.update(h2_hash);
|
||||
}
|
||||
result.h3_hash = h3_hasher.finalize().into();
|
||||
let mut hash_table = mutex.lock().map_err(|_| "Failed to lock mutex")?;
|
||||
hash_table.extend(h3_index, &result);
|
||||
Ok(())
|
||||
},
|
||||
)?;
|
||||
|
||||
let hash_table = Arc::try_unwrap(mutex)
|
||||
.map_err(|_| "Failed to unwrap Arc")?
|
||||
.into_inner()
|
||||
.map_err(|_| "Failed to lock mutex")?;
|
||||
hash_tables.push(hash_table);
|
||||
}
|
||||
|
||||
// Verify against H3 table
|
||||
for (part, hash_table) in reader.partitions.clone().iter().zip(hash_tables.iter()) {
|
||||
log::debug!(
|
||||
"Verifying H3 table for partition {} (count {})",
|
||||
part.index,
|
||||
hash_table.h3_hashes.len()
|
||||
);
|
||||
reader
|
||||
.seek(SeekFrom::Start(
|
||||
part.start_sector as u64 * SECTOR_SIZE as u64 + part.header.h3_table_off(),
|
||||
))
|
||||
.context("Seeking to H3 table")?;
|
||||
let h3_table: Box<[HashBytes]> =
|
||||
read_box_slice(reader, hash_table.h3_hashes.len()).context("Reading H3 table")?;
|
||||
for (idx, (expected_hash, h3_hash)) in
|
||||
h3_table.iter().zip(hash_table.h3_hashes.iter()).enumerate()
|
||||
{
|
||||
if expected_hash != h3_hash {
|
||||
let mut got_bytes = [0u8; 40];
|
||||
let got = base16ct::lower::encode_str(h3_hash, &mut got_bytes).unwrap();
|
||||
let mut expected_bytes = [0u8; 40];
|
||||
let expected =
|
||||
base16ct::lower::encode_str(expected_hash, &mut expected_bytes).unwrap();
|
||||
log::warn!(
|
||||
"Partition {} H3 table does not match:\n\tindex {}\n\texpected: {}\n\tgot: {}",
|
||||
part.index, idx, expected, got
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (part, hash_table) in reader.partitions.iter_mut().zip(hash_tables) {
|
||||
part.hash_table = Some(hash_table);
|
||||
}
|
||||
log::info!("Rebuilt hashes in {:?}", start.elapsed());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn hash_bytes(buf: &[u8]) -> HashBytes {
|
||||
let mut hasher = Sha1::new();
|
||||
hasher.update(buf);
|
||||
hasher.finalize().into()
|
||||
}
|
||||
348
nod/src/disc/mod.rs
Normal file
348
nod/src/disc/mod.rs
Normal file
@@ -0,0 +1,348 @@
|
||||
//! Disc type related logic (GameCube, Wii)
|
||||
|
||||
use std::{
|
||||
borrow::Cow,
|
||||
ffi::CStr,
|
||||
fmt::{Debug, Display, Formatter},
|
||||
io,
|
||||
mem::size_of,
|
||||
str::from_utf8,
|
||||
};
|
||||
|
||||
use dyn_clone::DynClone;
|
||||
use zerocopy::{big_endian::*, AsBytes, FromBytes, FromZeroes};
|
||||
|
||||
use crate::{
|
||||
disc::wii::{Ticket, TmdHeader},
|
||||
fst::Node,
|
||||
static_assert,
|
||||
streams::{ReadStream, SharedWindowedReadStream},
|
||||
Fst, Result,
|
||||
};
|
||||
|
||||
pub(crate) mod gcn;
|
||||
pub(crate) mod hashes;
|
||||
pub(crate) mod reader;
|
||||
pub(crate) mod wii;
|
||||
|
||||
pub const SECTOR_SIZE: usize = 0x8000;
|
||||
|
||||
/// Shared GameCube & Wii disc header
|
||||
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||||
#[repr(C, align(4))]
|
||||
pub struct DiscHeader {
|
||||
/// Game ID (e.g. GM8E01 for Metroid Prime)
|
||||
pub game_id: [u8; 6],
|
||||
/// Used in multi-disc games
|
||||
pub disc_num: u8,
|
||||
/// Disc version
|
||||
pub disc_version: u8,
|
||||
/// Audio streaming enabled
|
||||
pub audio_streaming: u8,
|
||||
/// Audio streaming buffer size
|
||||
pub audio_stream_buf_size: u8,
|
||||
/// Padding
|
||||
_pad1: [u8; 14],
|
||||
/// If this is a Wii disc, this will be 0x5D1C9EA3
|
||||
pub wii_magic: U32,
|
||||
/// If this is a GameCube disc, this will be 0xC2339F3D
|
||||
pub gcn_magic: U32,
|
||||
/// Game title
|
||||
pub game_title: [u8; 64],
|
||||
/// If 1, disc omits partition hashes
|
||||
pub no_partition_hashes: u8,
|
||||
/// If 1, disc omits partition encryption
|
||||
pub no_partition_encryption: u8,
|
||||
/// Padding
|
||||
_pad2: [u8; 926],
|
||||
}
|
||||
|
||||
static_assert!(size_of::<DiscHeader>() == 0x400);
|
||||
|
||||
impl DiscHeader {
|
||||
/// Game ID as a string.
|
||||
pub fn game_id_str(&self) -> &str { from_utf8(&self.game_id).unwrap_or("[invalid]") }
|
||||
|
||||
/// Game title as a string.
|
||||
pub fn game_title_str(&self) -> &str {
|
||||
CStr::from_bytes_until_nul(&self.game_title)
|
||||
.ok()
|
||||
.and_then(|c| c.to_str().ok())
|
||||
.unwrap_or("[invalid]")
|
||||
}
|
||||
|
||||
/// Whether this is a GameCube disc.
|
||||
pub fn is_gamecube(&self) -> bool { self.gcn_magic.get() == 0xC2339F3D }
|
||||
|
||||
/// Whether this is a Wii disc.
|
||||
pub fn is_wii(&self) -> bool { self.wii_magic.get() == 0x5D1C9EA3 }
|
||||
}
|
||||
|
||||
/// Partition header
|
||||
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||||
#[repr(C, align(4))]
|
||||
pub struct PartitionHeader {
|
||||
/// Debug monitor offset
|
||||
pub debug_mon_off: U32,
|
||||
/// Debug monitor load address
|
||||
pub debug_load_addr: U32,
|
||||
/// Padding
|
||||
_pad1: [u8; 0x18],
|
||||
/// Offset to main DOL (Wii: >> 2)
|
||||
pub dol_off: U32,
|
||||
/// Offset to file system table (Wii: >> 2)
|
||||
pub fst_off: U32,
|
||||
/// File system size (Wii: >> 2)
|
||||
pub fst_sz: U32,
|
||||
/// File system max size (Wii: >> 2)
|
||||
pub fst_max_sz: U32,
|
||||
/// File system table load address
|
||||
pub fst_memory_address: U32,
|
||||
/// User position
|
||||
pub user_position: U32,
|
||||
/// User size
|
||||
pub user_sz: U32,
|
||||
/// Padding
|
||||
_pad2: [u8; 4],
|
||||
}
|
||||
|
||||
static_assert!(size_of::<PartitionHeader>() == 0x40);
|
||||
|
||||
impl PartitionHeader {
|
||||
pub fn dol_off(&self, is_wii: bool) -> u64 {
|
||||
if is_wii {
|
||||
self.dol_off.get() as u64 * 4
|
||||
} else {
|
||||
self.dol_off.get() as u64
|
||||
}
|
||||
}
|
||||
|
||||
pub fn fst_off(&self, is_wii: bool) -> u64 {
|
||||
if is_wii {
|
||||
self.fst_off.get() as u64 * 4
|
||||
} else {
|
||||
self.fst_off.get() as u64
|
||||
}
|
||||
}
|
||||
|
||||
pub fn fst_sz(&self, is_wii: bool) -> u64 {
|
||||
if is_wii {
|
||||
self.fst_sz.get() as u64 * 4
|
||||
} else {
|
||||
self.fst_sz.get() as u64
|
||||
}
|
||||
}
|
||||
|
||||
pub fn fst_max_sz(&self, is_wii: bool) -> u64 {
|
||||
if is_wii {
|
||||
self.fst_max_sz.get() as u64 * 4
|
||||
} else {
|
||||
self.fst_max_sz.get() as u64
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Apploader header
|
||||
#[derive(Debug, PartialEq, Clone, FromBytes, FromZeroes, AsBytes)]
|
||||
#[repr(C, align(4))]
|
||||
pub struct AppLoaderHeader {
|
||||
/// Apploader build date
|
||||
pub date: [u8; 16],
|
||||
/// Entry point
|
||||
pub entry_point: U32,
|
||||
/// Apploader size
|
||||
pub size: U32,
|
||||
/// Apploader trailer size
|
||||
pub trailer_size: U32,
|
||||
/// Padding
|
||||
_pad: [u8; 4],
|
||||
}
|
||||
|
||||
impl AppLoaderHeader {
|
||||
/// Apploader build date as a string
|
||||
pub fn date_str(&self) -> Option<&str> {
|
||||
CStr::from_bytes_until_nul(&self.date).ok().and_then(|c| c.to_str().ok())
|
||||
}
|
||||
}
|
||||
|
||||
/// Maximum number of text sections in a DOL
|
||||
pub const DOL_MAX_TEXT_SECTIONS: usize = 7;
|
||||
/// Maximum number of data sections in a DOL
|
||||
pub const DOL_MAX_DATA_SECTIONS: usize = 11;
|
||||
|
||||
/// DOL header
|
||||
#[derive(Debug, Clone, FromBytes, FromZeroes)]
|
||||
pub struct DolHeader {
|
||||
/// Text section offsets
|
||||
pub text_offs: [U32; DOL_MAX_TEXT_SECTIONS],
|
||||
/// Data section offsets
|
||||
pub data_offs: [U32; DOL_MAX_DATA_SECTIONS],
|
||||
/// Text section addresses
|
||||
pub text_addrs: [U32; DOL_MAX_TEXT_SECTIONS],
|
||||
/// Data section addresses
|
||||
pub data_addrs: [U32; DOL_MAX_DATA_SECTIONS],
|
||||
/// Text section sizes
|
||||
pub text_sizes: [U32; DOL_MAX_TEXT_SECTIONS],
|
||||
/// Data section sizes
|
||||
pub data_sizes: [U32; DOL_MAX_DATA_SECTIONS],
|
||||
/// BSS address
|
||||
pub bss_addr: U32,
|
||||
/// BSS size
|
||||
pub bss_size: U32,
|
||||
/// Entry point
|
||||
pub entry_point: U32,
|
||||
/// Padding
|
||||
_pad: [u8; 0x1C],
|
||||
}
|
||||
|
||||
static_assert!(size_of::<DolHeader>() == 0x100);
|
||||
|
||||
/// Partition type
|
||||
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
|
||||
pub enum PartitionKind {
|
||||
Data,
|
||||
Update,
|
||||
Channel,
|
||||
Other(u32),
|
||||
}
|
||||
|
||||
impl Display for PartitionKind {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Self::Data => write!(f, "Data"),
|
||||
Self::Update => write!(f, "Update"),
|
||||
Self::Channel => write!(f, "Channel"),
|
||||
Self::Other(v) => {
|
||||
let bytes = v.to_be_bytes();
|
||||
write!(f, "Other ({:08X}, {})", v, String::from_utf8_lossy(&bytes))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PartitionKind {
|
||||
/// Returns the directory name for the partition kind.
|
||||
pub fn dir_name(&self) -> Cow<str> {
|
||||
match self {
|
||||
Self::Data => Cow::Borrowed("DATA"),
|
||||
Self::Update => Cow::Borrowed("UPDATE"),
|
||||
Self::Channel => Cow::Borrowed("CHANNEL"),
|
||||
Self::Other(v) => {
|
||||
let bytes = v.to_be_bytes();
|
||||
Cow::Owned(format!("P-{}", String::from_utf8_lossy(&bytes)))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<u32> for PartitionKind {
|
||||
fn from(v: u32) -> Self {
|
||||
match v {
|
||||
0 => Self::Data,
|
||||
1 => Self::Update,
|
||||
2 => Self::Channel,
|
||||
v => Self::Other(v),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// An open read stream for a disc partition.
|
||||
pub trait PartitionBase: DynClone + ReadStream + Send + Sync {
|
||||
/// Reads the partition header and file system table.
|
||||
fn meta(&mut self) -> Result<Box<PartitionMeta>>;
|
||||
|
||||
/// Seeks the read stream to the specified file system node
|
||||
/// and returns a windowed stream.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// Basic usage:
|
||||
/// ```no_run
|
||||
/// use std::io::Read;
|
||||
///
|
||||
/// use nod::{Disc, PartitionKind};
|
||||
///
|
||||
/// fn main() -> nod::Result<()> {
|
||||
/// let disc = Disc::new("path/to/file.iso")?;
|
||||
/// let mut partition = disc.open_partition_kind(PartitionKind::Data)?;
|
||||
/// let meta = partition.meta()?;
|
||||
/// let fst = meta.fst()?;
|
||||
/// if let Some((_, node)) = fst.find("/MP3/Worlds.txt") {
|
||||
/// let mut s = String::new();
|
||||
/// partition
|
||||
/// .open_file(node)
|
||||
/// .expect("Failed to open file stream")
|
||||
/// .read_to_string(&mut s)
|
||||
/// .expect("Failed to read file");
|
||||
/// println!("{}", s);
|
||||
/// }
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// ```
|
||||
fn open_file(&mut self, node: &Node) -> io::Result<SharedWindowedReadStream>;
|
||||
|
||||
/// The ideal size for buffered reads from this partition.
|
||||
/// GameCube discs have a data block size of 0x8000,
|
||||
/// whereas Wii discs have a data block size of 0x7c00.
|
||||
fn ideal_buffer_size(&self) -> usize;
|
||||
}
|
||||
|
||||
dyn_clone::clone_trait_object!(PartitionBase);
|
||||
|
||||
/// Size of the disc header and partition header (boot.bin)
|
||||
pub const BOOT_SIZE: usize = size_of::<DiscHeader>() + size_of::<PartitionHeader>();
|
||||
/// Size of the debug and region information (bi2.bin)
|
||||
pub const BI2_SIZE: usize = 0x2000;
|
||||
|
||||
/// Disc partition metadata
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct PartitionMeta {
|
||||
/// Disc and partition header (boot.bin)
|
||||
pub raw_boot: Box<[u8; BOOT_SIZE]>,
|
||||
/// Debug and region information (bi2.bin)
|
||||
pub raw_bi2: Box<[u8; BI2_SIZE]>,
|
||||
/// Apploader (apploader.bin)
|
||||
pub raw_apploader: Box<[u8]>,
|
||||
/// File system table (fst.bin)
|
||||
pub raw_fst: Box<[u8]>,
|
||||
/// Main binary (main.dol)
|
||||
pub raw_dol: Box<[u8]>,
|
||||
/// Ticket (ticket.bin, Wii only)
|
||||
pub raw_ticket: Option<Box<[u8]>>,
|
||||
/// TMD (tmd.bin, Wii only)
|
||||
pub raw_tmd: Option<Box<[u8]>>,
|
||||
/// Certificate chain (cert.bin, Wii only)
|
||||
pub raw_cert_chain: Option<Box<[u8]>>,
|
||||
/// H3 hash table (h3.bin, Wii only)
|
||||
pub raw_h3_table: Option<Box<[u8]>>,
|
||||
}
|
||||
|
||||
impl PartitionMeta {
|
||||
pub fn header(&self) -> &DiscHeader {
|
||||
DiscHeader::ref_from(&self.raw_boot[..size_of::<DiscHeader>()]).unwrap()
|
||||
}
|
||||
|
||||
pub fn partition_header(&self) -> &PartitionHeader {
|
||||
PartitionHeader::ref_from(&self.raw_boot[size_of::<DiscHeader>()..]).unwrap()
|
||||
}
|
||||
|
||||
pub fn apploader_header(&self) -> &AppLoaderHeader {
|
||||
AppLoaderHeader::ref_from_prefix(&self.raw_apploader).unwrap()
|
||||
}
|
||||
|
||||
pub fn fst(&self) -> Result<Fst, &'static str> { Fst::new(&self.raw_fst) }
|
||||
|
||||
pub fn dol_header(&self) -> &DolHeader { DolHeader::ref_from_prefix(&self.raw_dol).unwrap() }
|
||||
|
||||
pub fn ticket(&self) -> Option<&Ticket> {
|
||||
self.raw_ticket.as_ref().and_then(|v| Ticket::ref_from(v))
|
||||
}
|
||||
|
||||
pub fn tmd_header(&self) -> Option<&TmdHeader> {
|
||||
self.raw_tmd.as_ref().and_then(|v| TmdHeader::ref_from_prefix(v))
|
||||
}
|
||||
}
|
||||
|
||||
pub const MINI_DVD_SIZE: u64 = 1_459_978_240;
|
||||
pub const SL_DVD_SIZE: u64 = 4_699_979_776;
|
||||
pub const DL_DVD_SIZE: u64 = 8_511_160_320;
|
||||
320
nod/src/disc/reader.rs
Normal file
320
nod/src/disc/reader.rs
Normal file
@@ -0,0 +1,320 @@
|
||||
use std::{
|
||||
cmp::min,
|
||||
io,
|
||||
io::{Read, Seek, SeekFrom},
|
||||
};
|
||||
|
||||
use zerocopy::FromZeroes;
|
||||
|
||||
use crate::{
|
||||
disc::{
|
||||
gcn::PartitionGC,
|
||||
hashes::{rebuild_hashes, HashTable},
|
||||
wii::{PartitionWii, WiiPartEntry, WiiPartGroup, WiiPartitionHeader, WII_PART_GROUP_OFF},
|
||||
DL_DVD_SIZE, MINI_DVD_SIZE, SL_DVD_SIZE,
|
||||
},
|
||||
io::block::{Block, BlockIO, PartitionInfo},
|
||||
util::read::{read_box, read_from, read_vec},
|
||||
DiscHeader, DiscMeta, Error, OpenOptions, PartitionBase, PartitionHeader, PartitionKind,
|
||||
Result, ResultContext, SECTOR_SIZE,
|
||||
};
|
||||
|
||||
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
|
||||
pub enum EncryptionMode {
|
||||
Encrypted,
|
||||
Decrypted,
|
||||
}
|
||||
|
||||
pub struct DiscReader {
|
||||
io: Box<dyn BlockIO>,
|
||||
block: Option<Block>,
|
||||
block_buf: Box<[u8]>,
|
||||
block_idx: u32,
|
||||
sector_buf: Box<[u8; SECTOR_SIZE]>,
|
||||
sector_idx: u32,
|
||||
pos: u64,
|
||||
mode: EncryptionMode,
|
||||
disc_header: Box<DiscHeader>,
|
||||
pub(crate) partitions: Vec<PartitionInfo>,
|
||||
hash_tables: Vec<HashTable>,
|
||||
}
|
||||
|
||||
impl Clone for DiscReader {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
io: self.io.clone(),
|
||||
block: None,
|
||||
block_buf: <u8>::new_box_slice_zeroed(self.block_buf.len()),
|
||||
block_idx: u32::MAX,
|
||||
sector_buf: <[u8; SECTOR_SIZE]>::new_box_zeroed(),
|
||||
sector_idx: u32::MAX,
|
||||
pos: 0,
|
||||
mode: self.mode,
|
||||
disc_header: self.disc_header.clone(),
|
||||
partitions: self.partitions.clone(),
|
||||
hash_tables: self.hash_tables.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DiscReader {
|
||||
pub fn new(inner: Box<dyn BlockIO>, options: &OpenOptions) -> Result<Self> {
|
||||
let block_size = inner.block_size();
|
||||
let meta = inner.meta();
|
||||
let mut reader = Self {
|
||||
io: inner,
|
||||
block: None,
|
||||
block_buf: <u8>::new_box_slice_zeroed(block_size as usize),
|
||||
block_idx: u32::MAX,
|
||||
sector_buf: <[u8; SECTOR_SIZE]>::new_box_zeroed(),
|
||||
sector_idx: u32::MAX,
|
||||
pos: 0,
|
||||
mode: if options.rebuild_encryption {
|
||||
EncryptionMode::Encrypted
|
||||
} else {
|
||||
EncryptionMode::Decrypted
|
||||
},
|
||||
disc_header: DiscHeader::new_box_zeroed(),
|
||||
partitions: vec![],
|
||||
hash_tables: vec![],
|
||||
};
|
||||
let disc_header: Box<DiscHeader> = read_box(&mut reader).context("Reading disc header")?;
|
||||
reader.disc_header = disc_header;
|
||||
if reader.disc_header.is_wii() {
|
||||
reader.partitions = read_partition_info(&mut reader)?;
|
||||
// Rebuild hashes if the format requires it
|
||||
if options.rebuild_encryption && meta.needs_hash_recovery {
|
||||
rebuild_hashes(&mut reader)?;
|
||||
}
|
||||
}
|
||||
reader.reset();
|
||||
Ok(reader)
|
||||
}
|
||||
|
||||
pub fn reset(&mut self) {
|
||||
self.block = None;
|
||||
self.block_buf.fill(0);
|
||||
self.block_idx = u32::MAX;
|
||||
self.sector_buf.fill(0);
|
||||
self.sector_idx = u32::MAX;
|
||||
self.pos = 0;
|
||||
}
|
||||
|
||||
pub fn disc_size(&self) -> u64 {
|
||||
self.io.meta().disc_size.unwrap_or_else(|| guess_disc_size(&self.partitions))
|
||||
}
|
||||
|
||||
pub fn header(&self) -> &DiscHeader { &self.disc_header }
|
||||
|
||||
pub fn partitions(&self) -> &[PartitionInfo] { &self.partitions }
|
||||
|
||||
pub fn meta(&self) -> DiscMeta { self.io.meta() }
|
||||
|
||||
/// Opens a new, decrypted partition read stream for the specified partition index.
|
||||
pub fn open_partition(
|
||||
&self,
|
||||
index: usize,
|
||||
options: &OpenOptions,
|
||||
) -> Result<Box<dyn PartitionBase>> {
|
||||
if self.disc_header.is_gamecube() {
|
||||
if index == 0 {
|
||||
Ok(PartitionGC::new(self.io.clone(), self.disc_header.clone())?)
|
||||
} else {
|
||||
Err(Error::DiscFormat("GameCube discs only have one partition".to_string()))
|
||||
}
|
||||
} else if let Some(part) = self.partitions.get(index) {
|
||||
Ok(PartitionWii::new(self.io.clone(), self.disc_header.clone(), part, options)?)
|
||||
} else {
|
||||
Err(Error::DiscFormat(format!("Partition {index} not found")))
|
||||
}
|
||||
}
|
||||
|
||||
/// Opens a new, decrypted partition read stream for the first partition matching
|
||||
/// the specified type.
|
||||
pub fn open_partition_kind(
|
||||
&self,
|
||||
part_type: PartitionKind,
|
||||
options: &OpenOptions,
|
||||
) -> Result<Box<dyn PartitionBase>> {
|
||||
if self.disc_header.is_gamecube() {
|
||||
if part_type == PartitionKind::Data {
|
||||
Ok(PartitionGC::new(self.io.clone(), self.disc_header.clone())?)
|
||||
} else {
|
||||
Err(Error::DiscFormat("GameCube discs only have a data partition".to_string()))
|
||||
}
|
||||
} else if let Some(part) = self.partitions.iter().find(|v| v.kind == part_type) {
|
||||
Ok(PartitionWii::new(self.io.clone(), self.disc_header.clone(), part, options)?)
|
||||
} else {
|
||||
Err(Error::DiscFormat(format!("Partition type {part_type} not found")))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Read for DiscReader {
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
let block_idx = (self.pos / self.block_buf.len() as u64) as u32;
|
||||
let abs_sector = (self.pos / SECTOR_SIZE as u64) as u32;
|
||||
|
||||
let partition = if self.disc_header.is_wii() {
|
||||
self.partitions.iter().find(|part| {
|
||||
abs_sector >= part.data_start_sector && abs_sector < part.data_end_sector
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Read new block
|
||||
if block_idx != self.block_idx {
|
||||
self.block = self.io.read_block(self.block_buf.as_mut(), block_idx, partition)?;
|
||||
self.block_idx = block_idx;
|
||||
}
|
||||
|
||||
// Read new sector into buffer
|
||||
if abs_sector != self.sector_idx {
|
||||
let Some(block) = &self.block else {
|
||||
return Ok(0);
|
||||
};
|
||||
if let Some(partition) = partition {
|
||||
match self.mode {
|
||||
EncryptionMode::Decrypted => block.decrypt(
|
||||
self.sector_buf.as_mut(),
|
||||
self.block_buf.as_ref(),
|
||||
block_idx,
|
||||
abs_sector,
|
||||
partition,
|
||||
)?,
|
||||
EncryptionMode::Encrypted => block.encrypt(
|
||||
self.sector_buf.as_mut(),
|
||||
self.block_buf.as_ref(),
|
||||
block_idx,
|
||||
abs_sector,
|
||||
partition,
|
||||
)?,
|
||||
}
|
||||
} else {
|
||||
block.copy_raw(
|
||||
self.sector_buf.as_mut(),
|
||||
self.block_buf.as_ref(),
|
||||
block_idx,
|
||||
abs_sector,
|
||||
&self.disc_header,
|
||||
)?;
|
||||
}
|
||||
self.sector_idx = abs_sector;
|
||||
}
|
||||
|
||||
// Read from sector buffer
|
||||
let offset = (self.pos % SECTOR_SIZE as u64) as usize;
|
||||
let len = min(buf.len(), SECTOR_SIZE - offset);
|
||||
buf[..len].copy_from_slice(&self.sector_buf[offset..offset + len]);
|
||||
self.pos += len as u64;
|
||||
Ok(len)
|
||||
}
|
||||
}
|
||||
|
||||
impl Seek for DiscReader {
|
||||
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
|
||||
self.pos = match pos {
|
||||
SeekFrom::Start(v) => v,
|
||||
SeekFrom::End(_) => {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Unsupported,
|
||||
"BlockIOReader: SeekFrom::End is not supported".to_string(),
|
||||
));
|
||||
}
|
||||
SeekFrom::Current(v) => self.pos.saturating_add_signed(v),
|
||||
};
|
||||
Ok(self.pos)
|
||||
}
|
||||
}
|
||||
|
||||
fn read_partition_info(reader: &mut DiscReader) -> crate::Result<Vec<PartitionInfo>> {
|
||||
reader.seek(SeekFrom::Start(WII_PART_GROUP_OFF)).context("Seeking to partition groups")?;
|
||||
let part_groups: [WiiPartGroup; 4] = read_from(reader).context("Reading partition groups")?;
|
||||
let mut part_info = Vec::new();
|
||||
for (group_idx, group) in part_groups.iter().enumerate() {
|
||||
let part_count = group.part_count.get();
|
||||
if part_count == 0 {
|
||||
continue;
|
||||
}
|
||||
reader
|
||||
.seek(SeekFrom::Start(group.part_entry_off()))
|
||||
.with_context(|| format!("Seeking to partition group {group_idx}"))?;
|
||||
let entries: Vec<WiiPartEntry> = read_vec(reader, part_count as usize)
|
||||
.with_context(|| format!("Reading partition group {group_idx}"))?;
|
||||
for (part_idx, entry) in entries.iter().enumerate() {
|
||||
let offset = entry.offset();
|
||||
reader
|
||||
.seek(SeekFrom::Start(offset))
|
||||
.with_context(|| format!("Seeking to partition data {group_idx}:{part_idx}"))?;
|
||||
let header: Box<WiiPartitionHeader> = read_box(reader)
|
||||
.with_context(|| format!("Reading partition header {group_idx}:{part_idx}"))?;
|
||||
|
||||
let key = header.ticket.decrypt_title_key()?;
|
||||
let start_offset = entry.offset();
|
||||
if start_offset % SECTOR_SIZE as u64 != 0 {
|
||||
return Err(Error::DiscFormat(format!(
|
||||
"Partition {group_idx}:{part_idx} offset is not sector aligned",
|
||||
)));
|
||||
}
|
||||
let data_start_offset = entry.offset() + header.data_off();
|
||||
let data_end_offset = data_start_offset + header.data_size();
|
||||
if data_start_offset % SECTOR_SIZE as u64 != 0
|
||||
|| data_end_offset % SECTOR_SIZE as u64 != 0
|
||||
{
|
||||
return Err(Error::DiscFormat(format!(
|
||||
"Partition {group_idx}:{part_idx} data is not sector aligned",
|
||||
)));
|
||||
}
|
||||
let mut info = PartitionInfo {
|
||||
index: part_info.len(),
|
||||
kind: entry.kind.get().into(),
|
||||
start_sector: (start_offset / SECTOR_SIZE as u64) as u32,
|
||||
data_start_sector: (data_start_offset / SECTOR_SIZE as u64) as u32,
|
||||
data_end_sector: (data_end_offset / SECTOR_SIZE as u64) as u32,
|
||||
key,
|
||||
header,
|
||||
disc_header: DiscHeader::new_box_zeroed(),
|
||||
partition_header: PartitionHeader::new_box_zeroed(),
|
||||
hash_table: None,
|
||||
};
|
||||
|
||||
let mut partition_reader = PartitionWii::new(
|
||||
reader.io.clone(),
|
||||
reader.disc_header.clone(),
|
||||
&info,
|
||||
&OpenOptions::default(),
|
||||
)?;
|
||||
info.disc_header = read_box(&mut partition_reader).context("Reading disc header")?;
|
||||
info.partition_header =
|
||||
read_box(&mut partition_reader).context("Reading partition header")?;
|
||||
|
||||
part_info.push(info);
|
||||
}
|
||||
}
|
||||
Ok(part_info)
|
||||
}
|
||||
|
||||
fn guess_disc_size(part_info: &[PartitionInfo]) -> u64 {
|
||||
let max_offset = part_info
|
||||
.iter()
|
||||
.flat_map(|v| {
|
||||
let offset = v.start_sector as u64 * SECTOR_SIZE as u64;
|
||||
[
|
||||
offset + v.header.tmd_off() + v.header.tmd_size(),
|
||||
offset + v.header.cert_chain_off() + v.header.cert_chain_size(),
|
||||
offset + v.header.h3_table_off() + v.header.h3_table_size(),
|
||||
offset + v.header.data_off() + v.header.data_size(),
|
||||
]
|
||||
})
|
||||
.max()
|
||||
.unwrap_or(0x50000);
|
||||
if max_offset <= MINI_DVD_SIZE && !part_info.iter().any(|v| v.kind == PartitionKind::Data) {
|
||||
// Datel disc
|
||||
MINI_DVD_SIZE
|
||||
} else if max_offset < SL_DVD_SIZE {
|
||||
SL_DVD_SIZE
|
||||
} else {
|
||||
DL_DVD_SIZE
|
||||
}
|
||||
}
|
||||
447
nod/src/disc/wii.rs
Normal file
447
nod/src/disc/wii.rs
Normal file
@@ -0,0 +1,447 @@
|
||||
use std::{
|
||||
cmp::min,
|
||||
ffi::CStr,
|
||||
io,
|
||||
io::{Read, Seek, SeekFrom},
|
||||
mem::size_of,
|
||||
};
|
||||
|
||||
use sha1::{digest, Digest, Sha1};
|
||||
use zerocopy::{big_endian::*, AsBytes, FromBytes, FromZeroes};
|
||||
|
||||
use crate::{
|
||||
array_ref,
|
||||
disc::{
|
||||
gcn::{read_part_meta, PartitionGC},
|
||||
PartitionBase, PartitionKind, PartitionMeta, SECTOR_SIZE,
|
||||
},
|
||||
fst::{Node, NodeKind},
|
||||
io::{
|
||||
aes_decrypt,
|
||||
block::{Block, BlockIO, PartitionInfo},
|
||||
KeyBytes,
|
||||
},
|
||||
static_assert,
|
||||
streams::{ReadStream, SharedWindowedReadStream},
|
||||
util::{div_rem, read::read_box_slice},
|
||||
DiscHeader, Error, OpenOptions, Result, ResultContext,
|
||||
};
|
||||
|
||||
pub(crate) const HASHES_SIZE: usize = 0x400;
|
||||
pub(crate) const SECTOR_DATA_SIZE: usize = SECTOR_SIZE - HASHES_SIZE; // 0x7C00
|
||||
|
||||
// ppki (Retail)
|
||||
const RVL_CERT_ISSUER_PPKI_TICKET: &str = "Root-CA00000001-XS00000003";
|
||||
#[rustfmt::skip]
|
||||
const RETAIL_COMMON_KEYS: [KeyBytes; 3] = [
|
||||
/* RVL_KEY_RETAIL */
|
||||
[0xeb, 0xe4, 0x2a, 0x22, 0x5e, 0x85, 0x93, 0xe4, 0x48, 0xd9, 0xc5, 0x45, 0x73, 0x81, 0xaa, 0xf7],
|
||||
/* RVL_KEY_KOREAN */
|
||||
[0x63, 0xb8, 0x2b, 0xb4, 0xf4, 0x61, 0x4e, 0x2e, 0x13, 0xf2, 0xfe, 0xfb, 0xba, 0x4c, 0x9b, 0x7e],
|
||||
/* vWii_KEY_RETAIL */
|
||||
[0x30, 0xbf, 0xc7, 0x6e, 0x7c, 0x19, 0xaf, 0xbb, 0x23, 0x16, 0x33, 0x30, 0xce, 0xd7, 0xc2, 0x8d],
|
||||
];
|
||||
|
||||
// dpki (Debug)
|
||||
const RVL_CERT_ISSUER_DPKI_TICKET: &str = "Root-CA00000002-XS00000006";
|
||||
#[rustfmt::skip]
|
||||
const DEBUG_COMMON_KEYS: [KeyBytes; 3] = [
|
||||
/* RVL_KEY_DEBUG */
|
||||
[0xa1, 0x60, 0x4a, 0x6a, 0x71, 0x23, 0xb5, 0x29, 0xae, 0x8b, 0xec, 0x32, 0xc8, 0x16, 0xfc, 0xaa],
|
||||
/* RVL_KEY_KOREAN_DEBUG */
|
||||
[0x67, 0x45, 0x8b, 0x6b, 0xc6, 0x23, 0x7b, 0x32, 0x69, 0x98, 0x3c, 0x64, 0x73, 0x48, 0x33, 0x66],
|
||||
/* vWii_KEY_DEBUG */
|
||||
[0x2f, 0x5c, 0x1b, 0x29, 0x44, 0xe7, 0xfd, 0x6f, 0xc3, 0x97, 0x96, 0x4b, 0x05, 0x76, 0x91, 0xfa],
|
||||
];
|
||||
|
||||
#[derive(Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||||
#[repr(C, align(4))]
|
||||
pub(crate) struct WiiPartEntry {
|
||||
pub(crate) offset: U32,
|
||||
pub(crate) kind: U32,
|
||||
}
|
||||
|
||||
static_assert!(size_of::<WiiPartEntry>() == 8);
|
||||
|
||||
impl WiiPartEntry {
|
||||
pub(crate) fn offset(&self) -> u64 { (self.offset.get() as u64) << 2 }
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub(crate) struct WiiPartInfo {
|
||||
pub(crate) group_idx: u32,
|
||||
pub(crate) part_idx: u32,
|
||||
pub(crate) offset: u64,
|
||||
pub(crate) kind: PartitionKind,
|
||||
pub(crate) header: WiiPartitionHeader,
|
||||
pub(crate) junk_id: [u8; 4],
|
||||
pub(crate) junk_start: u64,
|
||||
pub(crate) title_key: KeyBytes,
|
||||
}
|
||||
|
||||
pub(crate) const WII_PART_GROUP_OFF: u64 = 0x40000;
|
||||
|
||||
#[derive(Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||||
#[repr(C, align(4))]
|
||||
pub(crate) struct WiiPartGroup {
|
||||
pub(crate) part_count: U32,
|
||||
pub(crate) part_entry_off: U32,
|
||||
}
|
||||
|
||||
static_assert!(size_of::<WiiPartGroup>() == 8);
|
||||
|
||||
impl WiiPartGroup {
|
||||
pub(crate) fn part_entry_off(&self) -> u64 { (self.part_entry_off.get() as u64) << 2 }
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||||
#[repr(C, align(4))]
|
||||
pub struct SignedHeader {
|
||||
/// Signature type, always 0x00010001 (RSA-2048)
|
||||
pub sig_type: U32,
|
||||
/// RSA-2048 signature
|
||||
pub sig: [u8; 256],
|
||||
_pad: [u8; 60],
|
||||
}
|
||||
|
||||
static_assert!(size_of::<SignedHeader>() == 0x140);
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Default, FromBytes, FromZeroes, AsBytes)]
|
||||
#[repr(C, align(4))]
|
||||
pub struct TicketTimeLimit {
|
||||
pub enable_time_limit: U32,
|
||||
pub time_limit: U32,
|
||||
}
|
||||
|
||||
static_assert!(size_of::<TicketTimeLimit>() == 8);
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||||
#[repr(C, align(4))]
|
||||
pub struct Ticket {
|
||||
pub header: SignedHeader,
|
||||
pub sig_issuer: [u8; 64],
|
||||
pub ecdh: [u8; 60],
|
||||
pub version: u8,
|
||||
_pad1: U16,
|
||||
pub title_key: KeyBytes,
|
||||
_pad2: u8,
|
||||
pub ticket_id: [u8; 8],
|
||||
pub console_id: [u8; 4],
|
||||
pub title_id: [u8; 8],
|
||||
_pad3: U16,
|
||||
pub ticket_title_version: U16,
|
||||
pub permitted_titles_mask: U32,
|
||||
pub permit_mask: U32,
|
||||
pub title_export_allowed: u8,
|
||||
pub common_key_idx: u8,
|
||||
_pad4: [u8; 48],
|
||||
pub content_access_permissions: [u8; 64],
|
||||
_pad5: [u8; 2],
|
||||
pub time_limits: [TicketTimeLimit; 8],
|
||||
}
|
||||
|
||||
static_assert!(size_of::<Ticket>() == 0x2A4);
|
||||
|
||||
impl Ticket {
|
||||
pub fn decrypt_title_key(&self) -> Result<KeyBytes> {
|
||||
let mut iv: KeyBytes = [0; 16];
|
||||
iv[..8].copy_from_slice(&self.title_id);
|
||||
let cert_issuer_ticket =
|
||||
CStr::from_bytes_until_nul(&self.sig_issuer).ok().and_then(|c| c.to_str().ok());
|
||||
let common_keys = match cert_issuer_ticket {
|
||||
Some(RVL_CERT_ISSUER_PPKI_TICKET) => &RETAIL_COMMON_KEYS,
|
||||
Some(RVL_CERT_ISSUER_DPKI_TICKET) => &DEBUG_COMMON_KEYS,
|
||||
Some(v) => {
|
||||
return Err(Error::DiscFormat(format!("unknown certificate issuer {:?}", v)));
|
||||
}
|
||||
None => {
|
||||
return Err(Error::DiscFormat("failed to parse certificate issuer".to_string()));
|
||||
}
|
||||
};
|
||||
let common_key = common_keys.get(self.common_key_idx as usize).ok_or(Error::DiscFormat(
|
||||
format!("unknown common key index {}", self.common_key_idx),
|
||||
))?;
|
||||
let mut title_key = self.title_key;
|
||||
aes_decrypt(common_key, iv, &mut title_key);
|
||||
Ok(title_key)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||||
#[repr(C, align(4))]
|
||||
pub struct TmdHeader {
|
||||
pub header: SignedHeader,
|
||||
pub sig_issuer: [u8; 64],
|
||||
pub version: u8,
|
||||
pub ca_crl_version: u8,
|
||||
pub signer_crl_version: u8,
|
||||
pub is_vwii: u8,
|
||||
pub ios_id: [u8; 8],
|
||||
pub title_id: [u8; 8],
|
||||
pub title_type: u32,
|
||||
pub group_id: U16,
|
||||
_pad1: [u8; 2],
|
||||
pub region: U16,
|
||||
pub ratings: KeyBytes,
|
||||
_pad2: [u8; 12],
|
||||
pub ipc_mask: [u8; 12],
|
||||
_pad3: [u8; 18],
|
||||
pub access_flags: U32,
|
||||
pub title_version: U16,
|
||||
pub num_contents: U16,
|
||||
pub boot_idx: U16,
|
||||
pub minor_version: U16,
|
||||
}
|
||||
|
||||
static_assert!(size_of::<TmdHeader>() == 0x1E4);
|
||||
|
||||
pub const H3_TABLE_SIZE: usize = 0x18000;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||||
#[repr(C, align(4))]
|
||||
pub struct WiiPartitionHeader {
|
||||
pub ticket: Ticket,
|
||||
tmd_size: U32,
|
||||
tmd_off: U32,
|
||||
cert_chain_size: U32,
|
||||
cert_chain_off: U32,
|
||||
h3_table_off: U32,
|
||||
data_off: U32,
|
||||
data_size: U32,
|
||||
}
|
||||
|
||||
static_assert!(size_of::<WiiPartitionHeader>() == 0x2C0);
|
||||
|
||||
impl WiiPartitionHeader {
|
||||
pub fn tmd_size(&self) -> u64 { self.tmd_size.get() as u64 }
|
||||
|
||||
pub fn tmd_off(&self) -> u64 { (self.tmd_off.get() as u64) << 2 }
|
||||
|
||||
pub fn cert_chain_size(&self) -> u64 { self.cert_chain_size.get() as u64 }
|
||||
|
||||
pub fn cert_chain_off(&self) -> u64 { (self.cert_chain_off.get() as u64) << 2 }
|
||||
|
||||
pub fn h3_table_off(&self) -> u64 { (self.h3_table_off.get() as u64) << 2 }
|
||||
|
||||
pub fn h3_table_size(&self) -> u64 { H3_TABLE_SIZE as u64 }
|
||||
|
||||
pub fn data_off(&self) -> u64 { (self.data_off.get() as u64) << 2 }
|
||||
|
||||
pub fn data_size(&self) -> u64 { (self.data_size.get() as u64) << 2 }
|
||||
}
|
||||
|
||||
pub struct PartitionWii {
|
||||
io: Box<dyn BlockIO>,
|
||||
partition: PartitionInfo,
|
||||
block: Option<Block>,
|
||||
block_buf: Box<[u8]>,
|
||||
block_idx: u32,
|
||||
sector_buf: Box<[u8; SECTOR_SIZE]>,
|
||||
sector: u32,
|
||||
pos: u64,
|
||||
verify: bool,
|
||||
raw_tmd: Box<[u8]>,
|
||||
raw_cert_chain: Box<[u8]>,
|
||||
raw_h3_table: Box<[u8]>,
|
||||
}
|
||||
|
||||
impl Clone for PartitionWii {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
io: self.io.clone(),
|
||||
partition: self.partition.clone(),
|
||||
block: None,
|
||||
block_buf: <u8>::new_box_slice_zeroed(self.block_buf.len()),
|
||||
block_idx: u32::MAX,
|
||||
sector_buf: <[u8; SECTOR_SIZE]>::new_box_zeroed(),
|
||||
sector: u32::MAX,
|
||||
pos: 0,
|
||||
verify: self.verify,
|
||||
raw_tmd: self.raw_tmd.clone(),
|
||||
raw_cert_chain: self.raw_cert_chain.clone(),
|
||||
raw_h3_table: self.raw_h3_table.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PartitionWii {
|
||||
pub fn new(
|
||||
inner: Box<dyn BlockIO>,
|
||||
disc_header: Box<DiscHeader>,
|
||||
partition: &PartitionInfo,
|
||||
options: &OpenOptions,
|
||||
) -> Result<Box<Self>> {
|
||||
let block_size = inner.block_size();
|
||||
let mut reader = PartitionGC::new(inner, disc_header)?;
|
||||
|
||||
// Read TMD, cert chain, and H3 table
|
||||
let offset = partition.start_sector as u64 * SECTOR_SIZE as u64;
|
||||
reader
|
||||
.seek(SeekFrom::Start(offset + partition.header.tmd_off()))
|
||||
.context("Seeking to TMD offset")?;
|
||||
let raw_tmd: Box<[u8]> = read_box_slice(&mut reader, partition.header.tmd_size() as usize)
|
||||
.context("Reading TMD")?;
|
||||
reader
|
||||
.seek(SeekFrom::Start(offset + partition.header.cert_chain_off()))
|
||||
.context("Seeking to cert chain offset")?;
|
||||
let raw_cert_chain: Box<[u8]> =
|
||||
read_box_slice(&mut reader, partition.header.cert_chain_size() as usize)
|
||||
.context("Reading cert chain")?;
|
||||
reader
|
||||
.seek(SeekFrom::Start(offset + partition.header.h3_table_off()))
|
||||
.context("Seeking to H3 table offset")?;
|
||||
let raw_h3_table: Box<[u8]> =
|
||||
read_box_slice(&mut reader, H3_TABLE_SIZE).context("Reading H3 table")?;
|
||||
|
||||
Ok(Box::new(Self {
|
||||
io: reader.into_inner(),
|
||||
partition: partition.clone(),
|
||||
block: None,
|
||||
block_buf: <u8>::new_box_slice_zeroed(block_size as usize),
|
||||
block_idx: u32::MAX,
|
||||
sector_buf: <[u8; SECTOR_SIZE]>::new_box_zeroed(),
|
||||
sector: u32::MAX,
|
||||
pos: 0,
|
||||
verify: options.validate_hashes,
|
||||
raw_tmd,
|
||||
raw_cert_chain,
|
||||
raw_h3_table,
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
impl Read for PartitionWii {
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
let partition_sector = (self.pos / SECTOR_DATA_SIZE as u64) as u32;
|
||||
let sector = self.partition.data_start_sector + partition_sector;
|
||||
if sector >= self.partition.data_end_sector {
|
||||
return Ok(0);
|
||||
}
|
||||
let block_idx = (sector as u64 * SECTOR_SIZE as u64 / self.block_buf.len() as u64) as u32;
|
||||
|
||||
// Read new block if necessary
|
||||
if block_idx != self.block_idx {
|
||||
self.block =
|
||||
self.io.read_block(self.block_buf.as_mut(), block_idx, Some(&self.partition))?;
|
||||
self.block_idx = block_idx;
|
||||
}
|
||||
|
||||
// Decrypt sector if necessary
|
||||
if sector != self.sector {
|
||||
let Some(block) = &self.block else {
|
||||
return Ok(0);
|
||||
};
|
||||
block.decrypt(
|
||||
self.sector_buf.as_mut(),
|
||||
self.block_buf.as_ref(),
|
||||
block_idx,
|
||||
sector,
|
||||
&self.partition,
|
||||
)?;
|
||||
if self.verify {
|
||||
verify_hashes(&self.sector_buf, sector)?;
|
||||
}
|
||||
self.sector = sector;
|
||||
}
|
||||
|
||||
let offset = (self.pos % SECTOR_DATA_SIZE as u64) as usize;
|
||||
let len = min(buf.len(), SECTOR_DATA_SIZE - offset);
|
||||
buf[..len]
|
||||
.copy_from_slice(&self.sector_buf[HASHES_SIZE + offset..HASHES_SIZE + offset + len]);
|
||||
self.pos += len as u64;
|
||||
Ok(len)
|
||||
}
|
||||
}
|
||||
|
||||
impl Seek for PartitionWii {
|
||||
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
|
||||
self.pos = match pos {
|
||||
SeekFrom::Start(v) => v,
|
||||
SeekFrom::End(_) => {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Unsupported,
|
||||
"WiiPartitionReader: SeekFrom::End is not supported".to_string(),
|
||||
));
|
||||
}
|
||||
SeekFrom::Current(v) => self.pos.saturating_add_signed(v),
|
||||
};
|
||||
Ok(self.pos)
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub(crate) fn as_digest(slice: &[u8; 20]) -> digest::Output<Sha1> { (*slice).into() }
|
||||
|
||||
fn verify_hashes(buf: &[u8; SECTOR_SIZE], sector: u32) -> io::Result<()> {
|
||||
let (mut group, sub_group) = div_rem(sector as usize, 8);
|
||||
group %= 8;
|
||||
|
||||
// H0 hashes
|
||||
for i in 0..31 {
|
||||
let mut hash = Sha1::new();
|
||||
hash.update(array_ref![buf, (i + 1) * 0x400, 0x400]);
|
||||
let expected = as_digest(array_ref![buf, i * 20, 20]);
|
||||
let output = hash.finalize();
|
||||
if output != expected {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!("Invalid H0 hash! (block {:?}) {:x}\n\texpected {:x}", i, output, expected),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
// H1 hash
|
||||
{
|
||||
let mut hash = Sha1::new();
|
||||
hash.update(array_ref![buf, 0, 0x26C]);
|
||||
let expected = as_digest(array_ref![buf, 0x280 + sub_group * 20, 20]);
|
||||
let output = hash.finalize();
|
||||
if output != expected {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!(
|
||||
"Invalid H1 hash! (subgroup {:?}) {:x}\n\texpected {:x}",
|
||||
sub_group, output, expected
|
||||
),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
// H2 hash
|
||||
{
|
||||
let mut hash = Sha1::new();
|
||||
hash.update(array_ref![buf, 0x280, 0xA0]);
|
||||
let expected = as_digest(array_ref![buf, 0x340 + group * 20, 20]);
|
||||
let output = hash.finalize();
|
||||
if output != expected {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!(
|
||||
"Invalid H2 hash! (group {:?}) {:x}\n\texpected {:x}",
|
||||
group, output, expected
|
||||
),
|
||||
));
|
||||
}
|
||||
}
|
||||
// TODO H3 hash
|
||||
Ok(())
|
||||
}
|
||||
|
||||
impl PartitionBase for PartitionWii {
|
||||
fn meta(&mut self) -> Result<Box<PartitionMeta>> {
|
||||
self.seek(SeekFrom::Start(0)).context("Seeking to partition header")?;
|
||||
let mut meta = read_part_meta(self, true)?;
|
||||
meta.raw_ticket = Some(Box::from(self.partition.header.ticket.as_bytes()));
|
||||
meta.raw_tmd = Some(self.raw_tmd.clone());
|
||||
meta.raw_cert_chain = Some(self.raw_cert_chain.clone());
|
||||
meta.raw_h3_table = Some(self.raw_h3_table.clone());
|
||||
Ok(meta)
|
||||
}
|
||||
|
||||
fn open_file(&mut self, node: &Node) -> io::Result<SharedWindowedReadStream> {
|
||||
assert_eq!(node.kind(), NodeKind::File);
|
||||
self.new_window(node.offset(true), node.length(true))
|
||||
}
|
||||
|
||||
fn ideal_buffer_size(&self) -> usize { SECTOR_DATA_SIZE }
|
||||
}
|
||||
174
nod/src/fst.rs
Normal file
174
nod/src/fst.rs
Normal file
@@ -0,0 +1,174 @@
|
||||
//! Disc file system types
|
||||
|
||||
use std::{borrow::Cow, ffi::CStr, mem::size_of};
|
||||
|
||||
use encoding_rs::SHIFT_JIS;
|
||||
use zerocopy::{big_endian::*, AsBytes, FromBytes, FromZeroes};
|
||||
|
||||
use crate::{static_assert, Result};
|
||||
|
||||
/// File system node kind.
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub enum NodeKind {
|
||||
/// Node is a file.
|
||||
File,
|
||||
/// Node is a directory.
|
||||
Directory,
|
||||
/// Invalid node kind. (Should not normally occur)
|
||||
Invalid,
|
||||
}
|
||||
|
||||
/// An individual file system node.
|
||||
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||||
#[repr(C, align(4))]
|
||||
pub struct Node {
|
||||
kind: u8,
|
||||
// u24 big-endian
|
||||
name_offset: [u8; 3],
|
||||
offset: U32,
|
||||
length: U32,
|
||||
}
|
||||
|
||||
static_assert!(size_of::<Node>() == 12);
|
||||
|
||||
impl Node {
|
||||
/// File system node type.
|
||||
pub fn kind(&self) -> NodeKind {
|
||||
match self.kind {
|
||||
0 => NodeKind::File,
|
||||
1 => NodeKind::Directory,
|
||||
_ => NodeKind::Invalid,
|
||||
}
|
||||
}
|
||||
|
||||
/// Whether the node is a file.
|
||||
pub fn is_file(&self) -> bool { self.kind == 0 }
|
||||
|
||||
/// Whether the node is a directory.
|
||||
pub fn is_dir(&self) -> bool { self.kind == 1 }
|
||||
|
||||
/// Offset in the string table to the filename.
|
||||
pub fn name_offset(&self) -> u32 {
|
||||
u32::from_be_bytes([0, self.name_offset[0], self.name_offset[1], self.name_offset[2]])
|
||||
}
|
||||
|
||||
/// For files, this is the partition offset of the file data. (Wii: >> 2)
|
||||
///
|
||||
/// For directories, this is the parent node index in the FST.
|
||||
pub fn offset(&self, is_wii: bool) -> u64 {
|
||||
if is_wii && self.kind == 0 {
|
||||
self.offset.get() as u64 * 4
|
||||
} else {
|
||||
self.offset.get() as u64
|
||||
}
|
||||
}
|
||||
|
||||
/// For files, this is the byte size of the file. (Wii: >> 2)
|
||||
///
|
||||
/// For directories, this is the child end index in the FST.
|
||||
///
|
||||
/// Number of child files and directories recursively is `length - offset`.
|
||||
pub fn length(&self, is_wii: bool) -> u64 {
|
||||
if is_wii && self.kind == 0 {
|
||||
self.length.get() as u64 * 4
|
||||
} else {
|
||||
self.length.get() as u64
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A view into the file system tree (FST).
|
||||
pub struct Fst<'a> {
|
||||
pub nodes: &'a [Node],
|
||||
pub string_table: &'a [u8],
|
||||
}
|
||||
|
||||
impl<'a> Fst<'a> {
|
||||
/// Create a new FST view from a buffer.
|
||||
pub fn new(buf: &'a [u8]) -> Result<Self, &'static str> {
|
||||
let Some(root_node) = Node::ref_from_prefix(buf) else {
|
||||
return Err("FST root node not found");
|
||||
};
|
||||
// String table starts after the last node
|
||||
let string_base = root_node.length(false) * size_of::<Node>() as u64;
|
||||
if string_base >= buf.len() as u64 {
|
||||
return Err("FST string table out of bounds");
|
||||
}
|
||||
let (node_buf, string_table) = buf.split_at(string_base as usize);
|
||||
let nodes = Node::slice_from(node_buf).unwrap();
|
||||
Ok(Self { nodes, string_table })
|
||||
}
|
||||
|
||||
/// Iterate over the nodes in the FST.
|
||||
pub fn iter(&self) -> FstIter { FstIter { fst: self, idx: 1 } }
|
||||
|
||||
/// Get the name of a node.
|
||||
pub fn get_name(&self, node: &Node) -> Result<Cow<str>, String> {
|
||||
let name_buf = self.string_table.get(node.name_offset() as usize..).ok_or_else(|| {
|
||||
format!(
|
||||
"FST: name offset {} out of bounds (string table size: {})",
|
||||
node.name_offset(),
|
||||
self.string_table.len()
|
||||
)
|
||||
})?;
|
||||
let c_string = CStr::from_bytes_until_nul(name_buf).map_err(|_| {
|
||||
format!("FST: name at offset {} not null-terminated", node.name_offset())
|
||||
})?;
|
||||
let (decoded, _, errors) = SHIFT_JIS.decode(c_string.to_bytes());
|
||||
if errors {
|
||||
return Err(format!("FST: Failed to decode name at offset {}", node.name_offset()));
|
||||
}
|
||||
Ok(decoded)
|
||||
}
|
||||
|
||||
/// Finds a particular file or directory by path.
|
||||
pub fn find(&self, path: &str) -> Option<(usize, &Node)> {
|
||||
let mut split = path.trim_matches('/').split('/');
|
||||
let mut current = split.next()?;
|
||||
let mut idx = 1;
|
||||
let mut stop_at = None;
|
||||
while let Some(node) = self.nodes.get(idx) {
|
||||
if self.get_name(node).as_ref().map_or(false, |name| name.eq_ignore_ascii_case(current))
|
||||
{
|
||||
if let Some(next) = split.next() {
|
||||
current = next;
|
||||
} else {
|
||||
return Some((idx, node));
|
||||
}
|
||||
// Descend into directory
|
||||
idx += 1;
|
||||
stop_at = Some(node.length(false) as usize + idx);
|
||||
} else if node.is_dir() {
|
||||
// Skip directory
|
||||
idx = node.length(false) as usize;
|
||||
} else {
|
||||
// Skip file
|
||||
idx += 1;
|
||||
}
|
||||
if let Some(stop) = stop_at {
|
||||
if idx >= stop {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Iterator over the nodes in an FST.
|
||||
pub struct FstIter<'a> {
|
||||
fst: &'a Fst<'a>,
|
||||
idx: usize,
|
||||
}
|
||||
|
||||
impl<'a> Iterator for FstIter<'a> {
|
||||
type Item = (usize, &'a Node, Result<Cow<'a, str>, String>);
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
let idx = self.idx;
|
||||
let node = self.fst.nodes.get(idx)?;
|
||||
let name = self.fst.get_name(node);
|
||||
self.idx += 1;
|
||||
Some((idx, node, name))
|
||||
}
|
||||
}
|
||||
279
nod/src/io/block.rs
Normal file
279
nod/src/io/block.rs
Normal file
@@ -0,0 +1,279 @@
|
||||
use std::{cmp::min, fs, fs::File, io, path::Path};
|
||||
|
||||
use dyn_clone::DynClone;
|
||||
use zerocopy::transmute_ref;
|
||||
|
||||
use crate::{
|
||||
array_ref,
|
||||
disc::{
|
||||
hashes::HashTable,
|
||||
wii::{WiiPartitionHeader, HASHES_SIZE, SECTOR_DATA_SIZE},
|
||||
SECTOR_SIZE,
|
||||
},
|
||||
io::{aes_decrypt, aes_encrypt, ciso, iso, nfs, wbfs, wia, KeyBytes, MagicBytes},
|
||||
util::{lfg::LaggedFibonacci, read::read_from},
|
||||
DiscHeader, DiscMeta, Error, PartitionHeader, PartitionKind, Result, ResultContext,
|
||||
};
|
||||
|
||||
/// Block I/O trait for reading disc images.
|
||||
pub trait BlockIO: DynClone + Send + Sync {
|
||||
/// Reads a block from the disc image.
|
||||
fn read_block(
|
||||
&mut self,
|
||||
out: &mut [u8],
|
||||
block: u32,
|
||||
partition: Option<&PartitionInfo>,
|
||||
) -> io::Result<Option<Block>>;
|
||||
|
||||
/// The format's block size in bytes. Must be a multiple of the sector size (0x8000).
|
||||
fn block_size(&self) -> u32;
|
||||
|
||||
/// Returns extra metadata included in the disc file format, if any.
|
||||
fn meta(&self) -> DiscMeta;
|
||||
}
|
||||
|
||||
dyn_clone::clone_trait_object!(BlockIO);
|
||||
|
||||
/// Creates a new [`BlockIO`] instance.
|
||||
pub fn open(filename: &Path) -> Result<Box<dyn BlockIO>> {
|
||||
let path_result = fs::canonicalize(filename);
|
||||
if let Err(err) = path_result {
|
||||
return Err(Error::Io(format!("Failed to open {}", filename.display()), err));
|
||||
}
|
||||
let path = path_result.as_ref().unwrap();
|
||||
let meta = fs::metadata(path);
|
||||
if let Err(err) = meta {
|
||||
return Err(Error::Io(format!("Failed to open {}", filename.display()), err));
|
||||
}
|
||||
if !meta.unwrap().is_file() {
|
||||
return Err(Error::DiscFormat(format!("Input is not a file: {}", filename.display())));
|
||||
}
|
||||
let magic: MagicBytes = {
|
||||
let mut file =
|
||||
File::open(path).with_context(|| format!("Opening file {}", filename.display()))?;
|
||||
read_from(&mut file)
|
||||
.with_context(|| format!("Reading magic bytes from {}", filename.display()))?
|
||||
};
|
||||
match magic {
|
||||
ciso::CISO_MAGIC => Ok(ciso::DiscIOCISO::new(path)?),
|
||||
nfs::NFS_MAGIC => match path.parent() {
|
||||
Some(parent) if parent.is_dir() => Ok(nfs::DiscIONFS::new(path.parent().unwrap())?),
|
||||
_ => Err(Error::DiscFormat("Failed to locate NFS parent directory".to_string())),
|
||||
},
|
||||
wbfs::WBFS_MAGIC => Ok(wbfs::DiscIOWBFS::new(path)?),
|
||||
wia::WIA_MAGIC | wia::RVZ_MAGIC => Ok(wia::DiscIOWIA::new(path)?),
|
||||
_ => Ok(iso::DiscIOISO::new(path)?),
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct PartitionInfo {
|
||||
pub index: usize,
|
||||
pub kind: PartitionKind,
|
||||
pub start_sector: u32,
|
||||
pub data_start_sector: u32,
|
||||
pub data_end_sector: u32,
|
||||
pub key: KeyBytes,
|
||||
pub header: Box<WiiPartitionHeader>,
|
||||
pub disc_header: Box<DiscHeader>,
|
||||
pub partition_header: Box<PartitionHeader>,
|
||||
pub hash_table: Option<HashTable>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum Block {
|
||||
/// Raw data or encrypted Wii partition data
|
||||
Raw,
|
||||
/// Decrypted Wii partition data
|
||||
PartDecrypted {
|
||||
/// Whether the sector has its hash block intact
|
||||
has_hashes: bool,
|
||||
},
|
||||
/// Wii partition junk data
|
||||
Junk,
|
||||
/// All zeroes
|
||||
Zero,
|
||||
}
|
||||
|
||||
impl Block {
|
||||
/// Decrypts the block's data (if necessary) and writes it to the output buffer.
|
||||
pub(crate) fn decrypt(
|
||||
self,
|
||||
out: &mut [u8; SECTOR_SIZE],
|
||||
data: &[u8],
|
||||
block_idx: u32,
|
||||
abs_sector: u32,
|
||||
partition: &PartitionInfo,
|
||||
) -> io::Result<()> {
|
||||
let rel_sector = abs_sector - self.start_sector(block_idx, data.len());
|
||||
match self {
|
||||
Block::Raw => {
|
||||
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, rel_sector)?);
|
||||
decrypt_sector(out, partition);
|
||||
}
|
||||
Block::PartDecrypted { has_hashes } => {
|
||||
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, rel_sector)?);
|
||||
if !has_hashes {
|
||||
rebuild_hash_block(out, abs_sector, partition);
|
||||
}
|
||||
}
|
||||
Block::Junk => {
|
||||
generate_junk(out, abs_sector, Some(partition), &partition.disc_header);
|
||||
rebuild_hash_block(out, abs_sector, partition);
|
||||
}
|
||||
Block::Zero => {
|
||||
out.fill(0);
|
||||
rebuild_hash_block(out, abs_sector, partition);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Encrypts the block's data (if necessary) and writes it to the output buffer.
|
||||
pub(crate) fn encrypt(
|
||||
self,
|
||||
out: &mut [u8; SECTOR_SIZE],
|
||||
data: &[u8],
|
||||
block_idx: u32,
|
||||
abs_sector: u32,
|
||||
partition: &PartitionInfo,
|
||||
) -> io::Result<()> {
|
||||
let rel_sector = abs_sector - self.start_sector(block_idx, data.len());
|
||||
match self {
|
||||
Block::Raw => {
|
||||
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, rel_sector)?);
|
||||
}
|
||||
Block::PartDecrypted { has_hashes } => {
|
||||
out.copy_from_slice(block_sector::<SECTOR_SIZE>(data, rel_sector)?);
|
||||
if !has_hashes {
|
||||
rebuild_hash_block(out, abs_sector, partition);
|
||||
}
|
||||
encrypt_sector(out, partition);
|
||||
}
|
||||
Block::Junk => {
|
||||
generate_junk(out, abs_sector, Some(partition), &partition.disc_header);
|
||||
rebuild_hash_block(out, abs_sector, partition);
|
||||
encrypt_sector(out, partition);
|
||||
}
|
||||
Block::Zero => {
|
||||
out.fill(0);
|
||||
rebuild_hash_block(out, abs_sector, partition);
|
||||
encrypt_sector(out, partition);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Copies the block's raw data to the output buffer.
|
||||
pub(crate) fn copy_raw(
|
||||
self,
|
||||
out: &mut [u8; SECTOR_SIZE],
|
||||
data: &[u8],
|
||||
block_idx: u32,
|
||||
abs_sector: u32,
|
||||
disc_header: &DiscHeader,
|
||||
) -> io::Result<()> {
|
||||
match self {
|
||||
Block::Raw => {
|
||||
out.copy_from_slice(block_sector::<SECTOR_SIZE>(
|
||||
data,
|
||||
abs_sector - self.start_sector(block_idx, data.len()),
|
||||
)?);
|
||||
}
|
||||
Block::PartDecrypted { .. } => {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
"Cannot copy decrypted data as raw",
|
||||
));
|
||||
}
|
||||
Block::Junk => generate_junk(out, abs_sector, None, disc_header),
|
||||
Block::Zero => out.fill(0),
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns the start sector of the block.
|
||||
fn start_sector(&self, index: u32, block_size: usize) -> u32 {
|
||||
(index as u64 * block_size as u64 / SECTOR_SIZE as u64) as u32
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn block_sector<const N: usize>(data: &[u8], sector_idx: u32) -> io::Result<&[u8; N]> {
|
||||
if data.len() % N != 0 {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!("Expected block size {} to be a multiple of {}", data.len(), N),
|
||||
));
|
||||
}
|
||||
let offset = sector_idx as usize * N;
|
||||
data.get(offset..offset + N)
|
||||
.ok_or_else(|| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!(
|
||||
"Sector {} out of range (block size {}, sector size {})",
|
||||
sector_idx,
|
||||
data.len(),
|
||||
N
|
||||
),
|
||||
)
|
||||
})
|
||||
.map(|v| unsafe { &*(v as *const [u8] as *const [u8; N]) })
|
||||
}
|
||||
|
||||
fn generate_junk(
|
||||
out: &mut [u8; SECTOR_SIZE],
|
||||
sector: u32,
|
||||
partition: Option<&PartitionInfo>,
|
||||
disc_header: &DiscHeader,
|
||||
) {
|
||||
let mut pos = if let Some(partition) = partition {
|
||||
(sector - partition.data_start_sector) as u64 * SECTOR_DATA_SIZE as u64
|
||||
} else {
|
||||
sector as u64 * SECTOR_SIZE as u64
|
||||
};
|
||||
let mut offset = if partition.is_some() { HASHES_SIZE } else { 0 };
|
||||
out[..offset].fill(0);
|
||||
while offset < SECTOR_SIZE {
|
||||
// The LFG spans a single sector of the decrypted data,
|
||||
// so we may need to initialize it multiple times
|
||||
let mut lfg = LaggedFibonacci::default();
|
||||
lfg.init_with_seed(*array_ref![disc_header.game_id, 0, 4], disc_header.disc_num, pos);
|
||||
let sector_end = (pos + SECTOR_SIZE as u64) & !(SECTOR_SIZE as u64 - 1);
|
||||
let len = min(SECTOR_SIZE - offset, (sector_end - pos) as usize);
|
||||
lfg.fill(&mut out[offset..offset + len]);
|
||||
pos += len as u64;
|
||||
offset += len;
|
||||
}
|
||||
}
|
||||
|
||||
fn rebuild_hash_block(out: &mut [u8; SECTOR_SIZE], sector: u32, partition: &PartitionInfo) {
|
||||
let Some(hash_table) = partition.hash_table.as_ref() else {
|
||||
return;
|
||||
};
|
||||
let sector_idx = (sector - partition.data_start_sector) as usize;
|
||||
let h0_hashes: &[u8; 0x26C] =
|
||||
transmute_ref!(array_ref![hash_table.h0_hashes, sector_idx * 31, 31]);
|
||||
out[0..0x26C].copy_from_slice(h0_hashes);
|
||||
let h1_hashes: &[u8; 0xA0] =
|
||||
transmute_ref!(array_ref![hash_table.h1_hashes, sector_idx & !7, 8]);
|
||||
out[0x280..0x320].copy_from_slice(h1_hashes);
|
||||
let h2_hashes: &[u8; 0xA0] =
|
||||
transmute_ref!(array_ref![hash_table.h2_hashes, (sector_idx / 8) & !7, 8]);
|
||||
out[0x340..0x3E0].copy_from_slice(h2_hashes);
|
||||
}
|
||||
|
||||
fn encrypt_sector(out: &mut [u8; SECTOR_SIZE], partition: &PartitionInfo) {
|
||||
aes_encrypt(&partition.key, [0u8; 16], &mut out[..HASHES_SIZE]);
|
||||
// Data IV from encrypted hash block
|
||||
let iv = *array_ref![out, 0x3D0, 16];
|
||||
aes_encrypt(&partition.key, iv, &mut out[HASHES_SIZE..]);
|
||||
}
|
||||
|
||||
fn decrypt_sector(out: &mut [u8; SECTOR_SIZE], partition: &PartitionInfo) {
|
||||
// Data IV from encrypted hash block
|
||||
let iv = *array_ref![out, 0x3D0, 16];
|
||||
aes_decrypt(&partition.key, [0u8; 16], &mut out[..HASHES_SIZE]);
|
||||
aes_decrypt(&partition.key, iv, &mut out[HASHES_SIZE..]);
|
||||
}
|
||||
144
nod/src/io/ciso.rs
Normal file
144
nod/src/io/ciso.rs
Normal file
@@ -0,0 +1,144 @@
|
||||
use std::{
|
||||
io,
|
||||
io::{Read, Seek, SeekFrom},
|
||||
mem::size_of,
|
||||
path::Path,
|
||||
};
|
||||
|
||||
use zerocopy::{little_endian::*, AsBytes, FromBytes, FromZeroes};
|
||||
|
||||
use crate::{
|
||||
disc::SECTOR_SIZE,
|
||||
io::{
|
||||
block::{Block, BlockIO, PartitionInfo},
|
||||
nkit::NKitHeader,
|
||||
split::SplitFileReader,
|
||||
Format, MagicBytes,
|
||||
},
|
||||
static_assert,
|
||||
util::read::read_from,
|
||||
DiscMeta, Error, Result, ResultContext,
|
||||
};
|
||||
|
||||
pub const CISO_MAGIC: MagicBytes = *b"CISO";
|
||||
pub const CISO_MAP_SIZE: usize = SECTOR_SIZE - 8;
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||||
#[repr(C, align(4))]
|
||||
struct CISOHeader {
|
||||
magic: MagicBytes,
|
||||
// little endian
|
||||
block_size: U32,
|
||||
block_present: [u8; CISO_MAP_SIZE],
|
||||
}
|
||||
|
||||
static_assert!(size_of::<CISOHeader>() == SECTOR_SIZE);
|
||||
|
||||
pub struct DiscIOCISO {
|
||||
inner: SplitFileReader,
|
||||
header: CISOHeader,
|
||||
block_map: [u16; CISO_MAP_SIZE],
|
||||
nkit_header: Option<NKitHeader>,
|
||||
}
|
||||
|
||||
impl Clone for DiscIOCISO {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
inner: self.inner.clone(),
|
||||
header: self.header.clone(),
|
||||
block_map: self.block_map,
|
||||
nkit_header: self.nkit_header.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DiscIOCISO {
|
||||
pub fn new(filename: &Path) -> Result<Box<Self>> {
|
||||
let mut inner = SplitFileReader::new(filename)?;
|
||||
|
||||
// Read header
|
||||
let header: CISOHeader = read_from(&mut inner).context("Reading CISO header")?;
|
||||
if header.magic != CISO_MAGIC {
|
||||
return Err(Error::DiscFormat("Invalid CISO magic".to_string()));
|
||||
}
|
||||
|
||||
// Build block map
|
||||
let mut block_map = [0u16; CISO_MAP_SIZE];
|
||||
let mut block = 0u16;
|
||||
for (presence, out) in header.block_present.iter().zip(block_map.iter_mut()) {
|
||||
if *presence == 1 {
|
||||
*out = block;
|
||||
block += 1;
|
||||
} else {
|
||||
*out = u16::MAX;
|
||||
}
|
||||
}
|
||||
let file_size = SECTOR_SIZE as u64 + block as u64 * header.block_size.get() as u64;
|
||||
if file_size > inner.len() {
|
||||
return Err(Error::DiscFormat(format!(
|
||||
"CISO file size mismatch: expected at least {} bytes, got {}",
|
||||
file_size,
|
||||
inner.len()
|
||||
)));
|
||||
}
|
||||
|
||||
// Read NKit header if present (after CISO data)
|
||||
let nkit_header = if inner.len() > file_size + 4 {
|
||||
inner.seek(SeekFrom::Start(file_size)).context("Seeking to NKit header")?;
|
||||
NKitHeader::try_read_from(&mut inner, header.block_size.get(), true)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Reset reader
|
||||
inner.reset();
|
||||
Ok(Box::new(Self { inner, header, block_map, nkit_header }))
|
||||
}
|
||||
}
|
||||
|
||||
impl BlockIO for DiscIOCISO {
|
||||
fn read_block(
|
||||
&mut self,
|
||||
out: &mut [u8],
|
||||
block: u32,
|
||||
_partition: Option<&PartitionInfo>,
|
||||
) -> io::Result<Option<Block>> {
|
||||
if block >= CISO_MAP_SIZE as u32 {
|
||||
// Out of bounds
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
// Find the block in the map
|
||||
let phys_block = self.block_map[block as usize];
|
||||
if phys_block == u16::MAX {
|
||||
// Check if block is junk data
|
||||
if self.nkit_header.as_ref().is_some_and(|h| h.is_junk_block(block).unwrap_or(false)) {
|
||||
return Ok(Some(Block::Junk));
|
||||
};
|
||||
|
||||
// Otherwise, read zeroes
|
||||
return Ok(Some(Block::Zero));
|
||||
}
|
||||
|
||||
// Read block
|
||||
let file_offset = size_of::<CISOHeader>() as u64
|
||||
+ phys_block as u64 * self.header.block_size.get() as u64;
|
||||
self.inner.seek(SeekFrom::Start(file_offset))?;
|
||||
self.inner.read_exact(out)?;
|
||||
Ok(Some(Block::Raw))
|
||||
}
|
||||
|
||||
fn block_size(&self) -> u32 { self.header.block_size.get() }
|
||||
|
||||
fn meta(&self) -> DiscMeta {
|
||||
let mut result = DiscMeta {
|
||||
format: Format::Ciso,
|
||||
block_size: Some(self.header.block_size.get()),
|
||||
..Default::default()
|
||||
};
|
||||
if let Some(nkit_header) = &self.nkit_header {
|
||||
nkit_header.apply(&mut result);
|
||||
}
|
||||
result
|
||||
}
|
||||
}
|
||||
62
nod/src/io/iso.rs
Normal file
62
nod/src/io/iso.rs
Normal file
@@ -0,0 +1,62 @@
|
||||
use std::{
|
||||
io,
|
||||
io::{Read, Seek},
|
||||
path::Path,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
disc::SECTOR_SIZE,
|
||||
io::{
|
||||
block::{Block, BlockIO, PartitionInfo},
|
||||
split::SplitFileReader,
|
||||
Format,
|
||||
},
|
||||
DiscMeta, Error, Result,
|
||||
};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct DiscIOISO {
|
||||
inner: SplitFileReader,
|
||||
}
|
||||
|
||||
impl DiscIOISO {
|
||||
pub fn new(filename: &Path) -> Result<Box<Self>> {
|
||||
let inner = SplitFileReader::new(filename)?;
|
||||
if inner.len() % SECTOR_SIZE as u64 != 0 {
|
||||
return Err(Error::DiscFormat(
|
||||
"ISO size is not a multiple of sector size (0x8000 bytes)".to_string(),
|
||||
));
|
||||
}
|
||||
Ok(Box::new(Self { inner }))
|
||||
}
|
||||
}
|
||||
|
||||
impl BlockIO for DiscIOISO {
|
||||
fn read_block(
|
||||
&mut self,
|
||||
out: &mut [u8],
|
||||
block: u32,
|
||||
_partition: Option<&PartitionInfo>,
|
||||
) -> io::Result<Option<Block>> {
|
||||
let offset = block as u64 * SECTOR_SIZE as u64;
|
||||
if offset >= self.inner.len() {
|
||||
// End of file
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
self.inner.seek(io::SeekFrom::Start(offset))?;
|
||||
self.inner.read_exact(out)?;
|
||||
Ok(Some(Block::Raw))
|
||||
}
|
||||
|
||||
fn block_size(&self) -> u32 { SECTOR_SIZE as u32 }
|
||||
|
||||
fn meta(&self) -> DiscMeta {
|
||||
DiscMeta {
|
||||
format: Format::Iso,
|
||||
lossless: true,
|
||||
disc_size: Some(self.inner.len()),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
139
nod/src/io/mod.rs
Normal file
139
nod/src/io/mod.rs
Normal file
@@ -0,0 +1,139 @@
|
||||
//! Disc file format related logic (CISO, NFS, WBFS, WIA, etc.)
|
||||
|
||||
use std::fmt;
|
||||
|
||||
use crate::{streams::ReadStream, Result};
|
||||
|
||||
pub(crate) mod block;
|
||||
pub(crate) mod ciso;
|
||||
pub(crate) mod iso;
|
||||
pub(crate) mod nfs;
|
||||
pub(crate) mod nkit;
|
||||
pub(crate) mod split;
|
||||
pub(crate) mod wbfs;
|
||||
pub(crate) mod wia;
|
||||
|
||||
/// SHA-1 hash bytes
|
||||
pub(crate) type HashBytes = [u8; 20];
|
||||
|
||||
/// AES key bytes
|
||||
pub(crate) type KeyBytes = [u8; 16];
|
||||
|
||||
/// Magic bytes
|
||||
pub(crate) type MagicBytes = [u8; 4];
|
||||
|
||||
/// Abstraction over supported disc file formats.
|
||||
pub trait DiscIO: Send + Sync {
|
||||
/// Opens a new read stream for the disc file(s).
|
||||
/// Generally does _not_ need to be used directly.
|
||||
fn open(&self) -> Result<Box<dyn ReadStream + '_>>;
|
||||
|
||||
/// Returns extra metadata included in the disc file format, if any.
|
||||
fn meta(&self) -> Result<DiscMeta> { Ok(DiscMeta::default()) }
|
||||
|
||||
/// If None, the file format does not store the original disc size. (e.g. WBFS, NFS)
|
||||
fn disc_size(&self) -> Option<u64>;
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
||||
pub enum Format {
|
||||
/// Raw ISO
|
||||
#[default]
|
||||
Iso,
|
||||
/// CISO
|
||||
Ciso,
|
||||
/// NFS (Wii U VC)
|
||||
Nfs,
|
||||
/// RVZ
|
||||
Rvz,
|
||||
/// WBFS
|
||||
Wbfs,
|
||||
/// WIA
|
||||
Wia,
|
||||
}
|
||||
|
||||
impl fmt::Display for Format {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Format::Iso => write!(f, "ISO"),
|
||||
Format::Ciso => write!(f, "CISO"),
|
||||
Format::Nfs => write!(f, "NFS"),
|
||||
Format::Rvz => write!(f, "RVZ"),
|
||||
Format::Wbfs => write!(f, "WBFS"),
|
||||
Format::Wia => write!(f, "WIA"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
||||
pub enum Compression {
|
||||
/// No compression
|
||||
#[default]
|
||||
None,
|
||||
/// Purge (WIA only)
|
||||
Purge,
|
||||
/// BZIP2
|
||||
Bzip2,
|
||||
/// LZMA
|
||||
Lzma,
|
||||
/// LZMA2
|
||||
Lzma2,
|
||||
/// Zstandard
|
||||
Zstandard,
|
||||
}
|
||||
|
||||
impl fmt::Display for Compression {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Compression::None => write!(f, "None"),
|
||||
Compression::Purge => write!(f, "Purge"),
|
||||
Compression::Bzip2 => write!(f, "BZIP2"),
|
||||
Compression::Lzma => write!(f, "LZMA"),
|
||||
Compression::Lzma2 => write!(f, "LZMA2"),
|
||||
Compression::Zstandard => write!(f, "Zstandard"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Extra metadata about the underlying disc file format.
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct DiscMeta {
|
||||
/// The disc file format.
|
||||
pub format: Format,
|
||||
/// The format's compression algorithm.
|
||||
pub compression: Compression,
|
||||
/// If the format uses blocks, the block size in bytes.
|
||||
pub block_size: Option<u32>,
|
||||
/// Whether Wii partitions are stored decrypted in the format.
|
||||
pub decrypted: bool,
|
||||
/// Whether the format omits Wii partition data hashes.
|
||||
pub needs_hash_recovery: bool,
|
||||
/// Whether the format supports recovering the original disc data losslessly.
|
||||
pub lossless: bool,
|
||||
/// The original disc's size in bytes, if stored by the format.
|
||||
pub disc_size: Option<u64>,
|
||||
/// The original disc's CRC32 hash, if stored by the format.
|
||||
pub crc32: Option<u32>,
|
||||
/// The original disc's MD5 hash, if stored by the format.
|
||||
pub md5: Option<[u8; 16]>,
|
||||
/// The original disc's SHA-1 hash, if stored by the format.
|
||||
pub sha1: Option<[u8; 20]>,
|
||||
/// The original disc's XXH64 hash, if stored by the format.
|
||||
pub xxhash64: Option<u64>,
|
||||
}
|
||||
|
||||
/// Encrypts data in-place using AES-128-CBC with the given key and IV.
|
||||
pub(crate) fn aes_encrypt(key: &KeyBytes, iv: KeyBytes, data: &mut [u8]) {
|
||||
use aes::cipher::{block_padding::NoPadding, BlockEncryptMut, KeyIvInit};
|
||||
<cbc::Encryptor<aes::Aes128>>::new(key.into(), &aes::Block::from(iv))
|
||||
.encrypt_padded_mut::<NoPadding>(data, data.len())
|
||||
.unwrap(); // Safe: using NoPadding
|
||||
}
|
||||
|
||||
/// Decrypts data in-place using AES-128-CBC with the given key and IV.
|
||||
pub(crate) fn aes_decrypt(key: &KeyBytes, iv: KeyBytes, data: &mut [u8]) {
|
||||
use aes::cipher::{block_padding::NoPadding, BlockDecryptMut, KeyIvInit};
|
||||
<cbc::Decryptor<aes::Aes128>>::new(key.into(), &aes::Block::from(iv))
|
||||
.decrypt_padded_mut::<NoPadding>(data)
|
||||
.unwrap(); // Safe: using NoPadding
|
||||
}
|
||||
253
nod/src/io/nfs.rs
Normal file
253
nod/src/io/nfs.rs
Normal file
@@ -0,0 +1,253 @@
|
||||
use std::{
|
||||
fs::File,
|
||||
io,
|
||||
io::{BufReader, Read, Seek, SeekFrom},
|
||||
mem::size_of,
|
||||
path::{Component, Path, PathBuf},
|
||||
};
|
||||
|
||||
use zerocopy::{big_endian::U32, AsBytes, FromBytes, FromZeroes};
|
||||
|
||||
use crate::{
|
||||
disc::SECTOR_SIZE,
|
||||
io::{
|
||||
aes_decrypt,
|
||||
block::{Block, BlockIO, PartitionInfo},
|
||||
split::SplitFileReader,
|
||||
Format, KeyBytes, MagicBytes,
|
||||
},
|
||||
static_assert,
|
||||
util::read::read_from,
|
||||
DiscMeta, Error, Result, ResultContext,
|
||||
};
|
||||
|
||||
pub const NFS_MAGIC: MagicBytes = *b"EGGS";
|
||||
pub const NFS_END_MAGIC: MagicBytes = *b"SGGE";
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||||
#[repr(C, align(4))]
|
||||
struct LBARange {
|
||||
start_sector: U32,
|
||||
num_sectors: U32,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||||
#[repr(C, align(4))]
|
||||
struct NFSHeader {
|
||||
magic: MagicBytes,
|
||||
version: U32,
|
||||
unk1: U32,
|
||||
unk2: U32,
|
||||
num_lba_ranges: U32,
|
||||
lba_ranges: [LBARange; 61],
|
||||
end_magic: MagicBytes,
|
||||
}
|
||||
|
||||
static_assert!(size_of::<NFSHeader>() == 0x200);
|
||||
|
||||
impl NFSHeader {
|
||||
fn validate(&self) -> Result<()> {
|
||||
if self.magic != NFS_MAGIC {
|
||||
return Err(Error::DiscFormat("Invalid NFS magic".to_string()));
|
||||
}
|
||||
if self.num_lba_ranges.get() > 61 {
|
||||
return Err(Error::DiscFormat("Invalid NFS LBA range count".to_string()));
|
||||
}
|
||||
if self.end_magic != NFS_END_MAGIC {
|
||||
return Err(Error::DiscFormat("Invalid NFS end magic".to_string()));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn lba_ranges(&self) -> &[LBARange] { &self.lba_ranges[..self.num_lba_ranges.get() as usize] }
|
||||
|
||||
fn calculate_num_files(&self) -> u32 {
|
||||
let sector_count =
|
||||
self.lba_ranges().iter().fold(0u32, |acc, range| acc + range.num_sectors.get());
|
||||
(((sector_count as u64) * (SECTOR_SIZE as u64)
|
||||
+ (size_of::<NFSHeader>() as u64 + 0xF9FFFFFu64))
|
||||
/ 0xFA00000u64) as u32
|
||||
}
|
||||
|
||||
fn phys_sector(&self, sector: u32) -> u32 {
|
||||
let mut cur_sector = 0u32;
|
||||
for range in self.lba_ranges().iter() {
|
||||
if sector >= range.start_sector.get()
|
||||
&& sector - range.start_sector.get() < range.num_sectors.get()
|
||||
{
|
||||
return cur_sector + (sector - range.start_sector.get());
|
||||
}
|
||||
cur_sector += range.num_sectors.get();
|
||||
}
|
||||
u32::MAX
|
||||
}
|
||||
}
|
||||
|
||||
pub struct DiscIONFS {
|
||||
inner: SplitFileReader,
|
||||
header: NFSHeader,
|
||||
raw_size: u64,
|
||||
disc_size: u64,
|
||||
key: KeyBytes,
|
||||
}
|
||||
|
||||
impl Clone for DiscIONFS {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
inner: self.inner.clone(),
|
||||
header: self.header.clone(),
|
||||
raw_size: self.raw_size,
|
||||
disc_size: self.disc_size,
|
||||
key: self.key,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DiscIONFS {
|
||||
pub fn new(directory: &Path) -> Result<Box<Self>> {
|
||||
let mut disc_io = Box::new(Self {
|
||||
inner: SplitFileReader::empty(),
|
||||
header: NFSHeader::new_zeroed(),
|
||||
raw_size: 0,
|
||||
disc_size: 0,
|
||||
key: [0; 16],
|
||||
});
|
||||
disc_io.load_files(directory)?;
|
||||
Ok(disc_io)
|
||||
}
|
||||
}
|
||||
|
||||
impl BlockIO for DiscIONFS {
|
||||
fn read_block(
|
||||
&mut self,
|
||||
out: &mut [u8],
|
||||
sector: u32,
|
||||
partition: Option<&PartitionInfo>,
|
||||
) -> io::Result<Option<Block>> {
|
||||
// Calculate physical sector
|
||||
let phys_sector = self.header.phys_sector(sector);
|
||||
if phys_sector == u32::MAX {
|
||||
// Logical zero sector
|
||||
return Ok(Some(Block::Zero));
|
||||
}
|
||||
|
||||
// Read sector
|
||||
let offset = size_of::<NFSHeader>() as u64 + phys_sector as u64 * SECTOR_SIZE as u64;
|
||||
self.inner.seek(SeekFrom::Start(offset))?;
|
||||
self.inner.read_exact(out)?;
|
||||
|
||||
// Decrypt
|
||||
let iv_bytes = sector.to_be_bytes();
|
||||
#[rustfmt::skip]
|
||||
let iv: KeyBytes = [
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
iv_bytes[0], iv_bytes[1], iv_bytes[2], iv_bytes[3],
|
||||
];
|
||||
aes_decrypt(&self.key, iv, out);
|
||||
|
||||
if partition.is_some() {
|
||||
Ok(Some(Block::PartDecrypted { has_hashes: true }))
|
||||
} else {
|
||||
Ok(Some(Block::Raw))
|
||||
}
|
||||
}
|
||||
|
||||
fn block_size(&self) -> u32 { SECTOR_SIZE as u32 }
|
||||
|
||||
fn meta(&self) -> DiscMeta {
|
||||
DiscMeta { format: Format::Nfs, decrypted: true, ..Default::default() }
|
||||
}
|
||||
}
|
||||
|
||||
fn get_path<P>(directory: &Path, path: P) -> PathBuf
|
||||
where P: AsRef<Path> {
|
||||
let mut buf = directory.to_path_buf();
|
||||
for component in path.as_ref().components() {
|
||||
match component {
|
||||
Component::ParentDir => {
|
||||
buf.pop();
|
||||
}
|
||||
_ => buf.push(component),
|
||||
}
|
||||
}
|
||||
buf
|
||||
}
|
||||
|
||||
fn get_nfs(directory: &Path, num: u32) -> Result<PathBuf> {
|
||||
let path = get_path(directory, format!("hif_{:06}.nfs", num));
|
||||
if path.exists() {
|
||||
Ok(path)
|
||||
} else {
|
||||
Err(Error::DiscFormat(format!("Failed to locate {}", path.display())))
|
||||
}
|
||||
}
|
||||
|
||||
impl DiscIONFS {
|
||||
pub fn load_files(&mut self, directory: &Path) -> Result<()> {
|
||||
{
|
||||
// Load key file
|
||||
let primary_key_path =
|
||||
get_path(directory, ["..", "code", "htk.bin"].iter().collect::<PathBuf>());
|
||||
let secondary_key_path = get_path(directory, "htk.bin");
|
||||
let mut key_path = primary_key_path.canonicalize();
|
||||
if key_path.is_err() {
|
||||
key_path = secondary_key_path.canonicalize();
|
||||
}
|
||||
if key_path.is_err() {
|
||||
return Err(Error::DiscFormat(format!(
|
||||
"Failed to locate {} or {}",
|
||||
primary_key_path.display(),
|
||||
secondary_key_path.display()
|
||||
)));
|
||||
}
|
||||
let resolved_path = key_path.unwrap();
|
||||
File::open(resolved_path.as_path())
|
||||
.map_err(|v| Error::Io(format!("Failed to open {}", resolved_path.display()), v))?
|
||||
.read(&mut self.key)
|
||||
.map_err(|v| Error::Io(format!("Failed to read {}", resolved_path.display()), v))?;
|
||||
}
|
||||
|
||||
{
|
||||
// Load header from first file
|
||||
let path = get_nfs(directory, 0)?;
|
||||
self.inner.add(&path)?;
|
||||
|
||||
let mut file = BufReader::new(
|
||||
File::open(&path).with_context(|| format!("Opening file {}", path.display()))?,
|
||||
);
|
||||
let header: NFSHeader = read_from(&mut file)
|
||||
.with_context(|| format!("Reading NFS header from file {}", path.display()))?;
|
||||
header.validate()?;
|
||||
// log::debug!("{:?}", header);
|
||||
|
||||
// Ensure remaining files exist
|
||||
for i in 1..header.calculate_num_files() {
|
||||
self.inner.add(&get_nfs(directory, i)?)?;
|
||||
}
|
||||
|
||||
// Calculate sizes
|
||||
let num_sectors =
|
||||
header.lba_ranges().iter().map(|range| range.num_sectors.get()).sum::<u32>();
|
||||
let max_sector = header
|
||||
.lba_ranges()
|
||||
.iter()
|
||||
.map(|range| range.start_sector.get() + range.num_sectors.get())
|
||||
.max()
|
||||
.unwrap();
|
||||
let raw_size = size_of::<NFSHeader>() + (num_sectors as usize * SECTOR_SIZE);
|
||||
let data_size = max_sector as usize * SECTOR_SIZE;
|
||||
if raw_size > self.inner.len() as usize {
|
||||
return Err(Error::DiscFormat(format!(
|
||||
"NFS raw size mismatch: expected at least {}, got {}",
|
||||
raw_size,
|
||||
self.inner.len()
|
||||
)));
|
||||
}
|
||||
|
||||
self.header = header;
|
||||
self.raw_size = raw_size as u64;
|
||||
self.disc_size = data_size as u64;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
169
nod/src/io/nkit.rs
Normal file
169
nod/src/io/nkit.rs
Normal file
@@ -0,0 +1,169 @@
|
||||
use std::{
|
||||
io,
|
||||
io::{Read, Seek, SeekFrom},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
disc::DL_DVD_SIZE,
|
||||
io::MagicBytes,
|
||||
util::read::{read_from, read_u16_be, read_u32_be, read_u64_be, read_vec},
|
||||
DiscMeta,
|
||||
};
|
||||
|
||||
#[allow(unused)]
|
||||
#[repr(u16)]
|
||||
enum NKitHeaderFlags {
|
||||
Size = 0x1,
|
||||
Crc32 = 0x2,
|
||||
Md5 = 0x4,
|
||||
Sha1 = 0x8,
|
||||
Xxhash64 = 0x10,
|
||||
Key = 0x20,
|
||||
Encrypted = 0x40,
|
||||
ExtraData = 0x80,
|
||||
IndexFile = 0x100,
|
||||
}
|
||||
|
||||
const NKIT_HEADER_V1_FLAGS: u16 = NKitHeaderFlags::Crc32 as u16
|
||||
| NKitHeaderFlags::Md5 as u16
|
||||
| NKitHeaderFlags::Sha1 as u16
|
||||
| NKitHeaderFlags::Xxhash64 as u16;
|
||||
|
||||
const fn calc_header_size(version: u8, flags: u16, key_len: u32) -> usize {
|
||||
let mut size = 8;
|
||||
if version >= 2 {
|
||||
// header size + flags
|
||||
size += 4;
|
||||
}
|
||||
if flags & NKitHeaderFlags::Size as u16 != 0 {
|
||||
size += 8;
|
||||
}
|
||||
if flags & NKitHeaderFlags::Crc32 as u16 != 0 {
|
||||
size += 4;
|
||||
}
|
||||
if flags & NKitHeaderFlags::Md5 as u16 != 0 {
|
||||
size += 16;
|
||||
}
|
||||
if flags & NKitHeaderFlags::Sha1 as u16 != 0 {
|
||||
size += 20;
|
||||
}
|
||||
if flags & NKitHeaderFlags::Xxhash64 as u16 != 0 {
|
||||
size += 8;
|
||||
}
|
||||
if flags & NKitHeaderFlags::Key as u16 != 0 {
|
||||
size += key_len as usize + 2;
|
||||
}
|
||||
size
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct NKitHeader {
|
||||
pub version: u8,
|
||||
pub flags: u16,
|
||||
pub size: Option<u64>,
|
||||
pub crc32: Option<u32>,
|
||||
pub md5: Option<[u8; 16]>,
|
||||
pub sha1: Option<[u8; 20]>,
|
||||
pub xxhash64: Option<u64>,
|
||||
/// Bitstream of blocks that are junk data
|
||||
pub junk_bits: Option<Vec<u8>>,
|
||||
pub block_size: u32,
|
||||
}
|
||||
|
||||
const VERSION_PREFIX: [u8; 7] = *b"NKIT v";
|
||||
|
||||
impl NKitHeader {
|
||||
pub fn try_read_from<R>(reader: &mut R, block_size: u32, has_junk_bits: bool) -> Option<Self>
|
||||
where R: Read + Seek + ?Sized {
|
||||
let magic: MagicBytes = read_from(reader).ok()?;
|
||||
if magic == *b"NKIT" {
|
||||
reader.seek(SeekFrom::Current(-4)).ok()?;
|
||||
match NKitHeader::read_from(reader, block_size, has_junk_bits) {
|
||||
Ok(header) => Some(header),
|
||||
Err(e) => {
|
||||
log::warn!("Failed to read NKit header: {}", e);
|
||||
None
|
||||
}
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn read_from<R>(reader: &mut R, block_size: u32, has_junk_bits: bool) -> io::Result<Self>
|
||||
where R: Read + ?Sized {
|
||||
let version_string: [u8; 8] = read_from(reader)?;
|
||||
if version_string[0..7] != VERSION_PREFIX
|
||||
|| version_string[7] < b'1'
|
||||
|| version_string[7] > b'9'
|
||||
{
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
"Invalid NKit header version string",
|
||||
));
|
||||
}
|
||||
let version = version_string[7] - b'0';
|
||||
let header_size = match version {
|
||||
1 => calc_header_size(version, NKIT_HEADER_V1_FLAGS, 0) as u16,
|
||||
2 => read_u16_be(reader)?,
|
||||
_ => {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!("Unsupported NKit header version: {}", version),
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let mut remaining_header_size = header_size as usize - 8;
|
||||
if version >= 2 {
|
||||
// We read the header size already
|
||||
remaining_header_size -= 2;
|
||||
}
|
||||
let header_bytes = read_vec(reader, remaining_header_size)?;
|
||||
let mut inner = &header_bytes[..];
|
||||
|
||||
let flags = if version == 1 { NKIT_HEADER_V1_FLAGS } else { read_u16_be(&mut inner)? };
|
||||
let size = (flags & NKitHeaderFlags::Size as u16 != 0)
|
||||
.then(|| read_u64_be(&mut inner))
|
||||
.transpose()?;
|
||||
let crc32 = (flags & NKitHeaderFlags::Crc32 as u16 != 0)
|
||||
.then(|| read_u32_be(&mut inner))
|
||||
.transpose()?;
|
||||
let md5 = (flags & NKitHeaderFlags::Md5 as u16 != 0)
|
||||
.then(|| read_from::<[u8; 16], _>(&mut inner))
|
||||
.transpose()?;
|
||||
let sha1 = (flags & NKitHeaderFlags::Sha1 as u16 != 0)
|
||||
.then(|| read_from::<[u8; 20], _>(&mut inner))
|
||||
.transpose()?;
|
||||
let xxhash64 = (flags & NKitHeaderFlags::Xxhash64 as u16 != 0)
|
||||
.then(|| read_u64_be(&mut inner))
|
||||
.transpose()?;
|
||||
|
||||
let junk_bits = if has_junk_bits {
|
||||
let n = DL_DVD_SIZE.div_ceil(block_size as u64).div_ceil(8);
|
||||
Some(read_vec(reader, n as usize)?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(Self { version, flags, size, crc32, md5, sha1, xxhash64, junk_bits, block_size })
|
||||
}
|
||||
|
||||
pub fn is_junk_block(&self, block: u32) -> Option<bool> {
|
||||
self.junk_bits
|
||||
.as_ref()
|
||||
.and_then(|v| v.get((block / 8) as usize))
|
||||
.map(|&b| b & (1 << (7 - (block & 7))) != 0)
|
||||
}
|
||||
|
||||
pub fn apply(&self, meta: &mut DiscMeta) {
|
||||
meta.needs_hash_recovery |= self.junk_bits.is_some();
|
||||
meta.lossless |= self.size.is_some() && self.junk_bits.is_some();
|
||||
meta.disc_size = meta.disc_size.or(self.size);
|
||||
meta.crc32 = self.crc32;
|
||||
meta.md5 = self.md5;
|
||||
meta.sha1 = self.sha1;
|
||||
meta.xxhash64 = self.xxhash64;
|
||||
}
|
||||
}
|
||||
154
nod/src/io/split.rs
Normal file
154
nod/src/io/split.rs
Normal file
@@ -0,0 +1,154 @@
|
||||
use std::{
|
||||
cmp::min,
|
||||
fs::File,
|
||||
io,
|
||||
io::{BufReader, Read, Seek, SeekFrom},
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
|
||||
use crate::{ErrorContext, Result, ResultContext};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct SplitFileReader {
|
||||
files: Vec<Split<PathBuf>>,
|
||||
open_file: Option<Split<BufReader<File>>>,
|
||||
pos: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct Split<T> {
|
||||
inner: T,
|
||||
begin: u64,
|
||||
size: u64,
|
||||
}
|
||||
|
||||
impl<T> Split<T> {
|
||||
fn contains(&self, pos: u64) -> bool { self.begin <= pos && pos < self.begin + self.size }
|
||||
}
|
||||
|
||||
// .iso.1, .iso.2, etc.
|
||||
fn split_path_1(input: &Path, index: u32) -> PathBuf {
|
||||
let input_str = input.to_str().unwrap_or("[INVALID]");
|
||||
let mut out = input_str.to_string();
|
||||
out.push('.');
|
||||
out.push(char::from_digit(index, 10).unwrap());
|
||||
PathBuf::from(out)
|
||||
}
|
||||
|
||||
// .part1.iso, .part2.iso, etc.
|
||||
fn split_path_2(input: &Path, index: u32) -> PathBuf {
|
||||
let extension = input.extension().and_then(|s| s.to_str()).unwrap_or("iso");
|
||||
let input_without_ext = input.with_extension("");
|
||||
let input_str = input_without_ext.to_str().unwrap_or("[INVALID]");
|
||||
let mut out = input_str.to_string();
|
||||
out.push_str(".part");
|
||||
out.push(char::from_digit(index, 10).unwrap());
|
||||
out.push('.');
|
||||
out.push_str(extension);
|
||||
PathBuf::from(out)
|
||||
}
|
||||
|
||||
// .wbf1, .wbf2, etc.
|
||||
fn split_path_3(input: &Path, index: u32) -> PathBuf {
|
||||
let input_str = input.to_str().unwrap_or("[INVALID]");
|
||||
let mut chars = input_str.chars();
|
||||
chars.next_back();
|
||||
let mut out = chars.as_str().to_string();
|
||||
out.push(char::from_digit(index, 10).unwrap());
|
||||
PathBuf::from(out)
|
||||
}
|
||||
|
||||
impl SplitFileReader {
|
||||
pub fn empty() -> Self { Self { files: Vec::new(), open_file: None, pos: 0 } }
|
||||
|
||||
pub fn new(path: &Path) -> Result<Self> {
|
||||
let mut files = vec![];
|
||||
let mut begin = 0;
|
||||
match path.metadata() {
|
||||
Ok(metadata) => {
|
||||
files.push(Split { inner: path.to_path_buf(), begin, size: metadata.len() });
|
||||
begin += metadata.len();
|
||||
}
|
||||
Err(e) => {
|
||||
return Err(e.context(format!("Failed to stat file {}", path.display())));
|
||||
}
|
||||
}
|
||||
for path_fn in [split_path_1, split_path_2, split_path_3] {
|
||||
let mut index = 1;
|
||||
loop {
|
||||
let path = path_fn(path, index);
|
||||
if let Ok(metadata) = path.metadata() {
|
||||
files.push(Split { inner: path, begin, size: metadata.len() });
|
||||
begin += metadata.len();
|
||||
index += 1;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if index > 1 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Ok(Self { files, open_file: None, pos: 0 })
|
||||
}
|
||||
|
||||
pub fn add(&mut self, path: &Path) -> Result<()> {
|
||||
let begin = self.len();
|
||||
let metadata =
|
||||
path.metadata().context(format!("Failed to stat file {}", path.display()))?;
|
||||
self.files.push(Split { inner: path.to_path_buf(), begin, size: metadata.len() });
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn reset(&mut self) {
|
||||
self.open_file = None;
|
||||
self.pos = 0;
|
||||
}
|
||||
|
||||
pub fn len(&self) -> u64 { self.files.last().map_or(0, |f| f.begin + f.size) }
|
||||
}
|
||||
|
||||
impl Read for SplitFileReader {
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
if self.open_file.is_none() || !self.open_file.as_ref().unwrap().contains(self.pos) {
|
||||
self.open_file = if let Some(split) = self.files.iter().find(|f| f.contains(self.pos)) {
|
||||
let mut file = BufReader::new(File::open(&split.inner)?);
|
||||
// log::info!("Opened file {} at pos {}", split.inner.display(), self.pos);
|
||||
file.seek(SeekFrom::Start(self.pos - split.begin))?;
|
||||
Some(Split { inner: file, begin: split.begin, size: split.size })
|
||||
} else {
|
||||
None
|
||||
};
|
||||
}
|
||||
let Some(split) = self.open_file.as_mut() else {
|
||||
return Ok(0);
|
||||
};
|
||||
let to_read = min(buf.len(), (split.begin + split.size - self.pos) as usize);
|
||||
let read = split.inner.read(&mut buf[..to_read])?;
|
||||
self.pos += read as u64;
|
||||
Ok(read)
|
||||
}
|
||||
}
|
||||
|
||||
impl Seek for SplitFileReader {
|
||||
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
|
||||
self.pos = match pos {
|
||||
SeekFrom::Start(pos) => pos,
|
||||
SeekFrom::Current(offset) => self.pos.saturating_add_signed(offset),
|
||||
SeekFrom::End(offset) => self.len().saturating_add_signed(offset),
|
||||
};
|
||||
if let Some(split) = &mut self.open_file {
|
||||
if split.contains(self.pos) {
|
||||
// Seek within the open file
|
||||
split.inner.seek(SeekFrom::Start(self.pos - split.begin))?;
|
||||
} else {
|
||||
self.open_file = None;
|
||||
}
|
||||
}
|
||||
Ok(self.pos)
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for SplitFileReader {
|
||||
fn clone(&self) -> Self { Self { files: self.files.clone(), open_file: None, pos: 0 } }
|
||||
}
|
||||
148
nod/src/io/wbfs.rs
Normal file
148
nod/src/io/wbfs.rs
Normal file
@@ -0,0 +1,148 @@
|
||||
use std::{
|
||||
io,
|
||||
io::{Read, Seek, SeekFrom},
|
||||
mem::size_of,
|
||||
path::Path,
|
||||
};
|
||||
|
||||
use zerocopy::{big_endian::*, AsBytes, FromBytes, FromZeroes};
|
||||
|
||||
use crate::{
|
||||
io::{
|
||||
block::{Block, BlockIO, PartitionInfo},
|
||||
nkit::NKitHeader,
|
||||
split::SplitFileReader,
|
||||
DiscMeta, Format, MagicBytes,
|
||||
},
|
||||
util::read::{read_box_slice, read_from},
|
||||
Error, Result, ResultContext,
|
||||
};
|
||||
|
||||
pub const WBFS_MAGIC: MagicBytes = *b"WBFS";
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||||
#[repr(C, align(4))]
|
||||
struct WBFSHeader {
|
||||
magic: MagicBytes,
|
||||
num_sectors: U32,
|
||||
sector_size_shift: u8,
|
||||
block_size_shift: u8,
|
||||
_pad: [u8; 2],
|
||||
}
|
||||
|
||||
impl WBFSHeader {
|
||||
fn sector_size(&self) -> u32 { 1 << self.sector_size_shift }
|
||||
|
||||
fn block_size(&self) -> u32 { 1 << self.block_size_shift }
|
||||
|
||||
// fn align_lba(&self, x: u32) -> u32 { (x + self.sector_size() - 1) & !(self.sector_size() - 1) }
|
||||
//
|
||||
// fn num_wii_sectors(&self) -> u32 {
|
||||
// (self.num_sectors.get() / SECTOR_SIZE as u32) * self.sector_size()
|
||||
// }
|
||||
//
|
||||
// fn max_wii_sectors(&self) -> u32 { NUM_WII_SECTORS }
|
||||
//
|
||||
// fn num_wbfs_sectors(&self) -> u32 {
|
||||
// self.num_wii_sectors() >> (self.wbfs_sector_size_shift - 15)
|
||||
// }
|
||||
|
||||
fn max_blocks(&self) -> u32 { NUM_WII_SECTORS >> (self.block_size_shift - 15) }
|
||||
}
|
||||
|
||||
const DISC_HEADER_SIZE: usize = 0x100;
|
||||
const NUM_WII_SECTORS: u32 = 143432 * 2; // Double layer discs
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct DiscIOWBFS {
|
||||
inner: SplitFileReader,
|
||||
/// WBFS header
|
||||
header: WBFSHeader,
|
||||
/// Map of Wii LBAs to WBFS LBAs
|
||||
block_table: Box<[U16]>,
|
||||
/// Optional NKit header
|
||||
nkit_header: Option<NKitHeader>,
|
||||
}
|
||||
|
||||
impl DiscIOWBFS {
|
||||
pub fn new(filename: &Path) -> Result<Box<Self>> {
|
||||
let mut inner = SplitFileReader::new(filename)?;
|
||||
|
||||
let header: WBFSHeader = read_from(&mut inner).context("Reading WBFS header")?;
|
||||
if header.magic != WBFS_MAGIC {
|
||||
return Err(Error::DiscFormat("Invalid WBFS magic".to_string()));
|
||||
}
|
||||
let file_len = inner.len();
|
||||
let expected_file_len = header.num_sectors.get() as u64 * header.sector_size() as u64;
|
||||
if file_len != expected_file_len {
|
||||
return Err(Error::DiscFormat(format!(
|
||||
"Invalid WBFS file size: {}, expected {}",
|
||||
file_len, expected_file_len
|
||||
)));
|
||||
}
|
||||
|
||||
let disc_table: Box<[u8]> =
|
||||
read_box_slice(&mut inner, header.sector_size() as usize - size_of::<WBFSHeader>())
|
||||
.context("Reading WBFS disc table")?;
|
||||
if disc_table[0] != 1 {
|
||||
return Err(Error::DiscFormat("WBFS doesn't contain a disc".to_string()));
|
||||
}
|
||||
if disc_table[1../*max_disc as usize*/].iter().any(|&x| x != 0) {
|
||||
return Err(Error::DiscFormat("Only single WBFS discs are supported".to_string()));
|
||||
}
|
||||
|
||||
// Read WBFS LBA table
|
||||
inner
|
||||
.seek(SeekFrom::Start(header.sector_size() as u64 + DISC_HEADER_SIZE as u64))
|
||||
.context("Seeking to WBFS LBA table")?; // Skip header
|
||||
let block_table: Box<[U16]> = read_box_slice(&mut inner, header.max_blocks() as usize)
|
||||
.context("Reading WBFS LBA table")?;
|
||||
|
||||
// Read NKit header if present (always at 0x10000)
|
||||
inner.seek(SeekFrom::Start(0x10000)).context("Seeking to NKit header")?;
|
||||
let nkit_header = NKitHeader::try_read_from(&mut inner, header.block_size(), true);
|
||||
|
||||
// Reset reader
|
||||
inner.reset();
|
||||
Ok(Box::new(Self { inner, header, block_table, nkit_header }))
|
||||
}
|
||||
}
|
||||
|
||||
impl BlockIO for DiscIOWBFS {
|
||||
fn read_block(
|
||||
&mut self,
|
||||
out: &mut [u8],
|
||||
block: u32,
|
||||
_partition: Option<&PartitionInfo>,
|
||||
) -> io::Result<Option<Block>> {
|
||||
let block_size = self.header.block_size();
|
||||
if block >= self.header.max_blocks() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
// Check if block is junk data
|
||||
if self.nkit_header.as_ref().is_some_and(|h| h.is_junk_block(block).unwrap_or(false)) {
|
||||
return Ok(Some(Block::Junk));
|
||||
}
|
||||
|
||||
// Read block
|
||||
let block_start = block_size as u64 * self.block_table[block as usize].get() as u64;
|
||||
self.inner.seek(SeekFrom::Start(block_start))?;
|
||||
self.inner.read_exact(out)?;
|
||||
Ok(Some(Block::Raw))
|
||||
}
|
||||
|
||||
fn block_size(&self) -> u32 { self.header.block_size() }
|
||||
|
||||
fn meta(&self) -> DiscMeta {
|
||||
let mut result = DiscMeta {
|
||||
format: Format::Wbfs,
|
||||
block_size: Some(self.header.block_size()),
|
||||
..Default::default()
|
||||
};
|
||||
if let Some(nkit_header) = &self.nkit_header {
|
||||
nkit_header.apply(&mut result);
|
||||
}
|
||||
result
|
||||
}
|
||||
}
|
||||
917
nod/src/io/wia.rs
Normal file
917
nod/src/io/wia.rs
Normal file
@@ -0,0 +1,917 @@
|
||||
use std::{
|
||||
io,
|
||||
io::{Read, Seek, SeekFrom},
|
||||
mem::size_of,
|
||||
path::Path,
|
||||
};
|
||||
|
||||
use sha1::{Digest, Sha1};
|
||||
use zerocopy::{big_endian::*, AsBytes, FromBytes, FromZeroes};
|
||||
|
||||
use crate::{
|
||||
disc::{
|
||||
wii::{HASHES_SIZE, SECTOR_DATA_SIZE},
|
||||
SECTOR_SIZE,
|
||||
},
|
||||
io::{
|
||||
block::{Block, BlockIO, PartitionInfo},
|
||||
nkit::NKitHeader,
|
||||
split::SplitFileReader,
|
||||
Compression, Format, HashBytes, KeyBytes, MagicBytes,
|
||||
},
|
||||
static_assert,
|
||||
util::{
|
||||
compress::{lzma2_props_decode, lzma_props_decode, new_lzma2_decoder, new_lzma_decoder},
|
||||
lfg::LaggedFibonacci,
|
||||
read::{read_box_slice, read_from, read_u16_be, read_vec},
|
||||
take_seek::TakeSeekExt,
|
||||
},
|
||||
DiscMeta, Error, Result, ResultContext,
|
||||
};
|
||||
|
||||
pub const WIA_MAGIC: MagicBytes = *b"WIA\x01";
|
||||
pub const RVZ_MAGIC: MagicBytes = *b"RVZ\x01";
|
||||
|
||||
/// This struct is stored at offset 0x0 and is 0x48 bytes long. The wit source code says its format
|
||||
/// will never be changed.
|
||||
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||||
#[repr(C, align(4))]
|
||||
pub struct WIAFileHeader {
|
||||
pub magic: MagicBytes,
|
||||
/// The WIA format version.
|
||||
///
|
||||
/// A short note from the wit source code about how version numbers are encoded:
|
||||
///
|
||||
/// ```c
|
||||
/// //-----------------------------------------------------
|
||||
/// // Format of version number: AABBCCDD = A.BB | A.BB.CC
|
||||
/// // If D != 0x00 && D != 0xff => append: 'beta' D
|
||||
/// //-----------------------------------------------------
|
||||
/// ```
|
||||
pub version: U32,
|
||||
/// If the reading program supports the version of WIA indicated here, it can read the file.
|
||||
///
|
||||
/// [version](Self::version) can be higher than `version_compatible`.
|
||||
pub version_compatible: U32,
|
||||
/// The size of the [WIADisc] struct.
|
||||
pub disc_size: U32,
|
||||
/// The SHA-1 hash of the [WIADisc] struct.
|
||||
///
|
||||
/// The number of bytes to hash is determined by [disc_size](Self::disc_size).
|
||||
pub disc_hash: HashBytes,
|
||||
/// The original size of the ISO.
|
||||
pub iso_file_size: U64,
|
||||
/// The size of this file.
|
||||
pub wia_file_size: U64,
|
||||
/// The SHA-1 hash of this struct, up to but not including `file_head_hash` itself.
|
||||
pub file_head_hash: HashBytes,
|
||||
}
|
||||
|
||||
static_assert!(size_of::<WIAFileHeader>() == 0x48);
|
||||
|
||||
impl WIAFileHeader {
|
||||
pub fn validate(&self) -> Result<()> {
|
||||
// Check magic
|
||||
if self.magic != WIA_MAGIC && self.magic != RVZ_MAGIC {
|
||||
return Err(Error::DiscFormat(format!("Invalid WIA/RVZ magic: {:#X?}", self.magic)));
|
||||
}
|
||||
// Check file head hash
|
||||
let bytes = self.as_bytes();
|
||||
verify_hash(&bytes[..bytes.len() - size_of::<HashBytes>()], &self.file_head_hash)?;
|
||||
// Check version compatibility
|
||||
if self.version_compatible.get() < 0x30000 {
|
||||
return Err(Error::DiscFormat(format!(
|
||||
"WIA/RVZ version {:#X} is not supported",
|
||||
self.version_compatible
|
||||
)));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn is_rvz(&self) -> bool { self.magic == RVZ_MAGIC }
|
||||
}
|
||||
|
||||
/// Disc type
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub enum DiscType {
|
||||
/// GameCube disc
|
||||
GameCube,
|
||||
/// Wii disc
|
||||
Wii,
|
||||
}
|
||||
|
||||
impl TryFrom<u32> for DiscType {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(value: u32) -> Result<Self> {
|
||||
match value {
|
||||
1 => Ok(Self::GameCube),
|
||||
2 => Ok(Self::Wii),
|
||||
v => Err(Error::DiscFormat(format!("Invalid disc type {}", v))),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Compression type
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub enum WIACompression {
|
||||
/// No compression.
|
||||
None,
|
||||
/// (WIA only) See [WIASegment]
|
||||
Purge,
|
||||
/// BZIP2 compression
|
||||
Bzip2,
|
||||
/// LZMA compression
|
||||
Lzma,
|
||||
/// LZMA2 compression
|
||||
Lzma2,
|
||||
/// (RVZ only) Zstandard compression
|
||||
Zstandard,
|
||||
}
|
||||
|
||||
impl TryFrom<u32> for WIACompression {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(value: u32) -> Result<Self> {
|
||||
match value {
|
||||
0 => Ok(Self::None),
|
||||
1 => Ok(Self::Purge),
|
||||
2 => Ok(Self::Bzip2),
|
||||
3 => Ok(Self::Lzma),
|
||||
4 => Ok(Self::Lzma2),
|
||||
5 => Ok(Self::Zstandard),
|
||||
v => Err(Error::DiscFormat(format!("Invalid compression type {}", v))),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const DISC_HEAD_SIZE: usize = 0x80;
|
||||
|
||||
/// This struct is stored at offset 0x48, immediately after [WIAFileHeader].
|
||||
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||||
#[repr(C, align(4))]
|
||||
pub struct WIADisc {
|
||||
/// The disc type. (1 = GameCube, 2 = Wii)
|
||||
pub disc_type: U32,
|
||||
/// The compression type.
|
||||
pub compression: U32,
|
||||
/// The compression level used by the compressor.
|
||||
///
|
||||
/// The possible values are compressor-specific.
|
||||
///
|
||||
/// RVZ only:
|
||||
/// > This is signed (instead of unsigned) to support negative compression levels in
|
||||
/// [Zstandard](WIACompression::Zstandard) (RVZ only).
|
||||
pub compression_level: I32,
|
||||
/// The size of the chunks that data is divided into.
|
||||
///
|
||||
/// WIA only:
|
||||
/// > Must be a multiple of 2 MiB.
|
||||
///
|
||||
/// RVZ only:
|
||||
/// > Chunk sizes smaller than 2 MiB are supported. The following applies when using a chunk size
|
||||
/// smaller than 2 MiB:
|
||||
/// > - The chunk size must be at least 32 KiB and must be a power of two. (Just like with WIA,
|
||||
/// sizes larger than 2 MiB do not have to be a power of two, they just have to be an integer
|
||||
/// multiple of 2 MiB.)
|
||||
/// > - For Wii partition data, each chunk contains one [WIAExceptionList] which contains
|
||||
/// exceptions for that chunk (and no other chunks). Offset 0 refers to the first hash of the
|
||||
/// current chunk, not the first hash of the full 2 MiB of data.
|
||||
pub chunk_size: U32,
|
||||
/// The first 0x80 bytes of the disc image.
|
||||
pub disc_head: [u8; DISC_HEAD_SIZE],
|
||||
/// The number of [WIAPartition] structs.
|
||||
pub num_partitions: U32,
|
||||
/// The size of one [WIAPartition] struct.
|
||||
///
|
||||
/// If this is smaller than the size of [WIAPartition], fill the missing bytes with 0x00.
|
||||
pub partition_type_size: U32,
|
||||
/// The offset in the file where the [WIAPartition] structs are stored (uncompressed).
|
||||
pub partition_offset: U64,
|
||||
/// The SHA-1 hash of the [WIAPartition] structs.
|
||||
///
|
||||
/// The number of bytes to hash is determined by `num_partitions * partition_type_size`.
|
||||
pub partition_hash: HashBytes,
|
||||
/// The number of [WIARawData] structs.
|
||||
pub num_raw_data: U32,
|
||||
/// The offset in the file where the [WIARawData] structs are stored (compressed).
|
||||
pub raw_data_offset: U64,
|
||||
/// The total compressed size of the [WIARawData] structs.
|
||||
pub raw_data_size: U32,
|
||||
/// The number of [WIAGroup] structs.
|
||||
pub num_groups: U32,
|
||||
/// The offset in the file where the [WIAGroup] structs are stored (compressed).
|
||||
pub group_offset: U64,
|
||||
/// The total compressed size of the [WIAGroup] structs.
|
||||
pub group_size: U32,
|
||||
/// The number of used bytes in the [compr_data](Self::compr_data) array.
|
||||
pub compr_data_len: u8,
|
||||
/// Compressor specific data.
|
||||
///
|
||||
/// If the compression method is [None](WIACompression::None), [Purge](WIACompression::Purge),
|
||||
/// [Bzip2](WIACompression::Bzip2), or [Zstandard](WIACompression::Zstandard) (RVZ only),
|
||||
/// [compr_data_len](Self::compr_data_len) is 0. If the compression method is
|
||||
/// [Lzma](WIACompression::Lzma) or [Lzma2](WIACompression::Lzma2), the compressor specific data is
|
||||
/// stored in the format used by the 7-Zip SDK. It needs to be converted if you are using e.g.
|
||||
/// liblzma.
|
||||
///
|
||||
/// For [Lzma](WIACompression::Lzma), the data is 5 bytes long. The first byte encodes the `lc`,
|
||||
/// `pb`, and `lp` parameters, and the four other bytes encode the dictionary size in little
|
||||
/// endian.
|
||||
pub compr_data: [u8; 7],
|
||||
}
|
||||
|
||||
static_assert!(size_of::<WIADisc>() == 0xDC);
|
||||
|
||||
impl WIADisc {
|
||||
pub fn validate(&self) -> Result<()> {
|
||||
DiscType::try_from(self.disc_type.get())?;
|
||||
WIACompression::try_from(self.compression.get())?;
|
||||
if self.partition_type_size.get() != size_of::<WIAPartition>() as u32 {
|
||||
return Err(Error::DiscFormat(format!(
|
||||
"WIA partition type size is {}, expected {}",
|
||||
self.partition_type_size.get(),
|
||||
size_of::<WIAPartition>()
|
||||
)));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn compression(&self) -> WIACompression {
|
||||
WIACompression::try_from(self.compression.get()).unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||||
#[repr(C, align(4))]
|
||||
pub struct WIAPartitionData {
|
||||
/// The sector on the disc at which this data starts.
|
||||
/// One sector is 32 KiB (or 31 KiB excluding hashes).
|
||||
pub first_sector: U32,
|
||||
/// The number of sectors on the disc covered by this struct.
|
||||
/// One sector is 32 KiB (or 31 KiB excluding hashes).
|
||||
pub num_sectors: U32,
|
||||
/// The index of the first [WIAGroup] struct that points to the data covered by this struct.
|
||||
/// The other [WIAGroup] indices follow sequentially.
|
||||
pub group_index: U32,
|
||||
/// The number of [WIAGroup] structs used for this data.
|
||||
pub num_groups: U32,
|
||||
}
|
||||
|
||||
static_assert!(size_of::<WIAPartitionData>() == 0x10);
|
||||
|
||||
impl WIAPartitionData {
|
||||
pub fn contains(&self, sector: u32) -> bool {
|
||||
let start = self.first_sector.get();
|
||||
sector >= start && sector < start + self.num_sectors.get()
|
||||
}
|
||||
}
|
||||
|
||||
/// This struct is used for keeping track of Wii partition data that on the actual disc is encrypted
|
||||
/// and hashed. This does not include the unencrypted area at the beginning of partitions that
|
||||
/// contains the ticket, TMD, certificate chain, and H3 table. So for a typical game partition,
|
||||
/// `pd[0].first_sector * 0x8000` would be 0x0F820000, not 0x0F800000.
|
||||
///
|
||||
/// Wii partition data is stored decrypted and with hashes removed. For each 0x8000 bytes on the
|
||||
/// disc, 0x7C00 bytes are stored in the WIA file (prior to compression). If the hashes are desired,
|
||||
/// the reading program must first recalculate the hashes as done when creating a Wii disc image
|
||||
/// from scratch (see <https://wiibrew.org/wiki/Wii_Disc>), and must then apply the hash exceptions
|
||||
/// which are stored along with the data (see the [WIAExceptionList] section).
|
||||
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||||
#[repr(C, align(4))]
|
||||
pub struct WIAPartition {
|
||||
/// The title key for this partition (128-bit AES), which can be used for re-encrypting the
|
||||
/// partition data.
|
||||
///
|
||||
/// This key can be used directly, without decrypting it using the Wii common key.
|
||||
pub partition_key: KeyBytes,
|
||||
/// To quote the wit source code: `segment 0 is small and defined for management data (boot ..
|
||||
/// fst). segment 1 takes the remaining data.`
|
||||
///
|
||||
/// The point at which wit splits the two segments is the FST end offset rounded up to the next
|
||||
/// 2 MiB. Giving the first segment a size which is not a multiple of 2 MiB is likely a bad idea
|
||||
/// (unless the second segment has a size of 0).
|
||||
pub partition_data: [WIAPartitionData; 2],
|
||||
}
|
||||
|
||||
static_assert!(size_of::<WIAPartition>() == 0x30);
|
||||
|
||||
/// This struct is used for keeping track of disc data that is not stored as [WIAPartition].
|
||||
/// The data is stored as is (other than compression being applied).
|
||||
///
|
||||
/// The first [WIARawData] has `raw_data_offset` set to 0x80 and `raw_data_size` set to 0x4FF80,
|
||||
/// but despite this, it actually contains 0x50000 bytes of data. (However, the first 0x80 bytes
|
||||
/// should be read from [WIADisc] instead.) This should be handled by rounding the offset down to
|
||||
/// the previous multiple of 0x8000 (and adding the equivalent amount to the size so that the end
|
||||
/// offset stays the same), not by special casing the first [WIARawData].
|
||||
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||||
#[repr(C, align(4))]
|
||||
pub struct WIARawData {
|
||||
/// The offset on the disc at which this data starts.
|
||||
pub raw_data_offset: U64,
|
||||
/// The number of bytes on the disc covered by this struct.
|
||||
pub raw_data_size: U64,
|
||||
/// The index of the first [WIAGroup] struct that points to the data covered by this struct.
|
||||
/// The other [WIAGroup] indices follow sequentially.
|
||||
pub group_index: U32,
|
||||
/// The number of [WIAGroup] structs used for this data.
|
||||
pub num_groups: U32,
|
||||
}
|
||||
|
||||
impl WIARawData {
|
||||
pub fn start_offset(&self) -> u64 { self.raw_data_offset.get() & !(SECTOR_SIZE as u64 - 1) }
|
||||
|
||||
pub fn start_sector(&self) -> u32 { (self.start_offset() / SECTOR_SIZE as u64) as u32 }
|
||||
|
||||
pub fn end_offset(&self) -> u64 { self.raw_data_offset.get() + self.raw_data_size.get() }
|
||||
|
||||
pub fn end_sector(&self) -> u32 { (self.end_offset() / SECTOR_SIZE as u64) as u32 }
|
||||
|
||||
pub fn contains(&self, sector: u32) -> bool {
|
||||
sector >= self.start_sector() && sector < self.end_sector()
|
||||
}
|
||||
}
|
||||
|
||||
/// This struct points directly to the actual disc data, stored compressed.
|
||||
///
|
||||
/// The data is interpreted differently depending on whether the [WIAGroup] is referenced by a
|
||||
/// [WIAPartitionData] or a [WIARawData] (see the [WIAPartition] section for details).
|
||||
///
|
||||
/// A [WIAGroup] normally contains chunk_size bytes of decompressed data
|
||||
/// (or `chunk_size / 0x8000 * 0x7C00` for Wii partition data when not counting hashes), not
|
||||
/// counting any [WIAExceptionList] structs. However, the last [WIAGroup] of a [WIAPartitionData]
|
||||
/// or [WIARawData] contains less data than that if `num_sectors * 0x8000` (for [WIAPartitionData])
|
||||
/// or `raw_data_size` (for [WIARawData]) is not evenly divisible by `chunk_size`.
|
||||
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||||
#[repr(C, align(4))]
|
||||
pub struct WIAGroup {
|
||||
/// The offset in the file where the compressed data is stored.
|
||||
///
|
||||
/// Stored as a `u32`, divided by 4.
|
||||
pub data_offset: U32,
|
||||
/// The size of the compressed data, including any [WIAExceptionList] structs. 0 is a special
|
||||
/// case meaning that every byte of the decompressed data is 0x00 and the [WIAExceptionList]
|
||||
/// structs (if there are supposed to be any) contain 0 exceptions.
|
||||
pub data_size: U32,
|
||||
}
|
||||
|
||||
/// Compared to [WIAGroup], [RVZGroup] changes the meaning of the most significant bit of
|
||||
/// [data_size](Self::data_size) and adds one additional attribute.
|
||||
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||||
#[repr(C, align(4))]
|
||||
pub struct RVZGroup {
|
||||
/// The offset in the file where the compressed data is stored, divided by 4.
|
||||
pub data_offset: U32,
|
||||
/// The most significant bit is 1 if the data is compressed using the compression method
|
||||
/// indicated in [WIADisc], and 0 if it is not compressed. The lower 31 bits are the size of
|
||||
/// the compressed data, including any [WIAExceptionList] structs. The lower 31 bits being 0 is
|
||||
/// a special case meaning that every byte of the decompressed and unpacked data is 0x00 and
|
||||
/// the [WIAExceptionList] structs (if there are supposed to be any) contain 0 exceptions.
|
||||
pub data_size_and_flag: U32,
|
||||
/// The size after decompressing but before decoding the RVZ packing.
|
||||
/// If this is 0, RVZ packing is not used for this group.
|
||||
pub rvz_packed_size: U32,
|
||||
}
|
||||
|
||||
impl RVZGroup {
|
||||
pub fn data_size(&self) -> u32 { self.data_size_and_flag.get() & 0x7FFFFFFF }
|
||||
|
||||
pub fn is_compressed(&self) -> bool { self.data_size_and_flag.get() & 0x80000000 != 0 }
|
||||
}
|
||||
|
||||
impl From<&WIAGroup> for RVZGroup {
|
||||
fn from(value: &WIAGroup) -> Self {
|
||||
Self {
|
||||
data_offset: value.data_offset,
|
||||
data_size_and_flag: U32::new(value.data_size.get() | 0x80000000),
|
||||
rvz_packed_size: U32::new(0),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// This struct represents a 20-byte difference between the recalculated hash data and the original
|
||||
/// hash data. (See also [WIAExceptionList])
|
||||
///
|
||||
/// When recalculating hashes for a [WIAGroup] with a size which is not evenly divisible by 2 MiB
|
||||
/// (with the size of the hashes included), the missing bytes should be treated as zeroes for the
|
||||
/// purpose of hashing. (wit's writing code seems to act as if the reading code does not assume that
|
||||
/// these missing bytes are zero, but both wit's and Dolphin's reading code treat them as zero.
|
||||
/// Dolphin's writing code assumes that the reading code treats them as zero.)
|
||||
///
|
||||
/// wit's writing code only outputs [WIAException] structs for mismatches in the actual hash
|
||||
/// data, not in the padding data (which normally only contains zeroes). Dolphin's writing code
|
||||
/// outputs [WIAException] structs for both hash data and padding data. When Dolphin needs to
|
||||
/// write [WIAException] structs for a padding area which is 32 bytes long, it writes one which
|
||||
/// covers the first 20 bytes of the padding area and one which covers the last 20 bytes of the
|
||||
/// padding area, generating 12 bytes of overlap between the [WIAException] structs.
|
||||
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||||
#[repr(C, align(2))]
|
||||
pub struct WIAException {
|
||||
/// The offset among the hashes. The offsets 0x0000-0x0400 here map to the offsets 0x0000-0x0400
|
||||
/// in the full 2 MiB of data, the offsets 0x0400-0x0800 here map to the offsets 0x8000-0x8400
|
||||
/// in the full 2 MiB of data, and so on.
|
||||
///
|
||||
/// The offsets start over at 0 for each new [WIAExceptionList].
|
||||
pub offset: U16,
|
||||
/// The hash that the automatically generated hash at the given offset needs to be replaced
|
||||
/// with.
|
||||
///
|
||||
/// The replacement should happen after calculating all hashes for the current 2 MiB of data
|
||||
/// but before encrypting the hashes.
|
||||
pub hash: HashBytes,
|
||||
}
|
||||
|
||||
/// Each [WIAGroup] of Wii partition data contains one or more [WIAExceptionList] structs before
|
||||
/// the actual data, one for each 2 MiB of data in the [WIAGroup]. The number of [WIAExceptionList]
|
||||
/// structs per [WIAGroup] is always `chunk_size / 0x200000`, even for a [WIAGroup] which contains
|
||||
/// less data than normal due to it being at the end of a partition.
|
||||
///
|
||||
/// For memory management reasons, programs which read WIA files might place a limit on how many
|
||||
/// exceptions there can be in a [WIAExceptionList]. Dolphin's reading code has a limit of
|
||||
/// `52 × 64 = 3328` (unless the compression method is [None](WIACompression::None) or
|
||||
/// [Purge](WIACompression::Purge), in which case there is no limit), which is enough to cover all
|
||||
/// hashes and all padding. wit's reading code seems to be written as if `47 × 64 = 3008` is the
|
||||
/// maximum it needs to be able to handle, which is enough to cover all hashes but not any padding.
|
||||
/// However, because wit allocates more memory than needed, it seems to be possible to exceed 3008
|
||||
/// by some amount without problems. It should be safe for writing code to assume that reading code
|
||||
/// can handle at least 3328 exceptions per [WIAExceptionList].
|
||||
///
|
||||
/// Somewhat ironically, there are exceptions to how [WIAExceptionList] structs are handled:
|
||||
///
|
||||
/// For the compression method [Purge](WIACompression::Purge), the [WIAExceptionList] structs are
|
||||
/// stored uncompressed (in other words, before the first [WIASegment]). For
|
||||
/// [Bzip2](WIACompression::Bzip2), [Lzma](WIACompression::Lzma) and [Lzma2](WIACompression::Lzma2), they are
|
||||
/// compressed along with the rest of the data.
|
||||
///
|
||||
/// For the compression methods [None](WIACompression::None) and [Purge](WIACompression::Purge), if the
|
||||
/// end offset of the last [WIAExceptionList] is not evenly divisible by 4, padding is inserted
|
||||
/// after it so that the data afterwards will start at a 4 byte boundary. This padding is not
|
||||
/// inserted for the other compression methods.
|
||||
type WIAExceptionList = Box<[WIAException]>;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub enum Decompressor {
|
||||
None,
|
||||
#[cfg(feature = "compress-bzip2")]
|
||||
Bzip2,
|
||||
#[cfg(feature = "compress-lzma")]
|
||||
Lzma(Box<[u8]>),
|
||||
#[cfg(feature = "compress-lzma")]
|
||||
Lzma2(Box<[u8]>),
|
||||
#[cfg(feature = "compress-zstd")]
|
||||
Zstandard,
|
||||
}
|
||||
|
||||
impl Decompressor {
|
||||
pub fn new(disc: &WIADisc) -> Result<Self> {
|
||||
let data = &disc.compr_data[..disc.compr_data_len as usize];
|
||||
match disc.compression() {
|
||||
WIACompression::None => Ok(Self::None),
|
||||
#[cfg(feature = "compress-bzip2")]
|
||||
WIACompression::Bzip2 => Ok(Self::Bzip2),
|
||||
#[cfg(feature = "compress-lzma")]
|
||||
WIACompression::Lzma => Ok(Self::Lzma(Box::from(data))),
|
||||
#[cfg(feature = "compress-lzma")]
|
||||
WIACompression::Lzma2 => Ok(Self::Lzma2(Box::from(data))),
|
||||
#[cfg(feature = "compress-zstd")]
|
||||
WIACompression::Zstandard => Ok(Self::Zstandard),
|
||||
comp => Err(Error::DiscFormat(format!("Unsupported WIA/RVZ compression: {:?}", comp))),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn wrap<'a, R>(&mut self, reader: R) -> io::Result<Box<dyn Read + 'a>>
|
||||
where R: Read + 'a {
|
||||
Ok(match self {
|
||||
Decompressor::None => Box::new(reader),
|
||||
#[cfg(feature = "compress-bzip2")]
|
||||
Decompressor::Bzip2 => Box::new(bzip2::read::BzDecoder::new(reader)),
|
||||
#[cfg(feature = "compress-lzma")]
|
||||
Decompressor::Lzma(data) => {
|
||||
let options = lzma_props_decode(data)?;
|
||||
Box::new(new_lzma_decoder(reader, &options)?)
|
||||
}
|
||||
#[cfg(feature = "compress-lzma")]
|
||||
Decompressor::Lzma2(data) => {
|
||||
let options = lzma2_props_decode(data)?;
|
||||
Box::new(new_lzma2_decoder(reader, &options)?)
|
||||
}
|
||||
#[cfg(feature = "compress-zstd")]
|
||||
Decompressor::Zstandard => Box::new(zstd::stream::Decoder::new(reader)?),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub struct DiscIOWIA {
|
||||
inner: SplitFileReader,
|
||||
header: WIAFileHeader,
|
||||
disc: WIADisc,
|
||||
partitions: Box<[WIAPartition]>,
|
||||
raw_data: Box<[WIARawData]>,
|
||||
groups: Box<[RVZGroup]>,
|
||||
nkit_header: Option<NKitHeader>,
|
||||
decompressor: Decompressor,
|
||||
group: u32,
|
||||
group_data: Vec<u8>,
|
||||
exception_lists: Vec<WIAExceptionList>,
|
||||
}
|
||||
|
||||
impl Clone for DiscIOWIA {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
header: self.header.clone(),
|
||||
disc: self.disc.clone(),
|
||||
partitions: self.partitions.clone(),
|
||||
raw_data: self.raw_data.clone(),
|
||||
groups: self.groups.clone(),
|
||||
inner: self.inner.clone(),
|
||||
nkit_header: self.nkit_header.clone(),
|
||||
decompressor: self.decompressor.clone(),
|
||||
group: u32::MAX,
|
||||
group_data: Vec::new(),
|
||||
exception_lists: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn hash_bytes(buf: &[u8]) -> HashBytes {
|
||||
let mut hasher = Sha1::new();
|
||||
hasher.update(buf);
|
||||
hasher.finalize().into()
|
||||
}
|
||||
|
||||
fn verify_hash(buf: &[u8], expected: &HashBytes) -> Result<()> {
|
||||
let out = hash_bytes(buf);
|
||||
if out != *expected {
|
||||
let mut got_bytes = [0u8; 40];
|
||||
let got = base16ct::lower::encode_str(&out, &mut got_bytes).unwrap(); // Safe: fixed buffer size
|
||||
let mut expected_bytes = [0u8; 40];
|
||||
let expected = base16ct::lower::encode_str(expected, &mut expected_bytes).unwrap(); // Safe: fixed buffer size
|
||||
return Err(Error::DiscFormat(format!(
|
||||
"WIA hash mismatch: {}, expected {}",
|
||||
got, expected
|
||||
)));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
impl DiscIOWIA {
|
||||
pub fn new(filename: &Path) -> Result<Box<Self>> {
|
||||
let mut inner = SplitFileReader::new(filename)?;
|
||||
|
||||
// Load & verify file header
|
||||
let header: WIAFileHeader = read_from(&mut inner).context("Reading WIA/RVZ file header")?;
|
||||
header.validate()?;
|
||||
let is_rvz = header.is_rvz();
|
||||
// log::debug!("Header: {:?}", header);
|
||||
|
||||
// Load & verify disc header
|
||||
let mut disc_buf: Vec<u8> = read_vec(&mut inner, header.disc_size.get() as usize)
|
||||
.context("Reading WIA/RVZ disc header")?;
|
||||
verify_hash(&disc_buf, &header.disc_hash)?;
|
||||
disc_buf.resize(size_of::<WIADisc>(), 0);
|
||||
let disc = WIADisc::read_from(disc_buf.as_slice()).unwrap();
|
||||
disc.validate()?;
|
||||
// if !options.rebuild_hashes {
|
||||
// // If we're not rebuilding hashes, disable partition hashes in disc header
|
||||
// disc.disc_head[0x60] = 1;
|
||||
// }
|
||||
// if !options.rebuild_encryption {
|
||||
// // If we're not re-encrypting, disable partition encryption in disc header
|
||||
// disc.disc_head[0x61] = 1;
|
||||
// }
|
||||
// log::debug!("Disc: {:?}", disc);
|
||||
|
||||
// Read NKit header if present (after disc header)
|
||||
let nkit_header = NKitHeader::try_read_from(&mut inner, disc.chunk_size.get(), false);
|
||||
|
||||
// Load & verify partition headers
|
||||
inner
|
||||
.seek(SeekFrom::Start(disc.partition_offset.get()))
|
||||
.context("Seeking to WIA/RVZ partition headers")?;
|
||||
let partitions: Box<[WIAPartition]> =
|
||||
read_box_slice(&mut inner, disc.num_partitions.get() as usize)
|
||||
.context("Reading WIA/RVZ partition headers")?;
|
||||
verify_hash(partitions.as_ref().as_bytes(), &disc.partition_hash)?;
|
||||
// log::debug!("Partitions: {:?}", partitions);
|
||||
|
||||
// Create decompressor
|
||||
let mut decompressor = Decompressor::new(&disc)?;
|
||||
|
||||
// Load raw data headers
|
||||
let raw_data: Box<[WIARawData]> = {
|
||||
inner
|
||||
.seek(SeekFrom::Start(disc.raw_data_offset.get()))
|
||||
.context("Seeking to WIA/RVZ raw data headers")?;
|
||||
let mut reader = decompressor
|
||||
.wrap((&mut inner).take(disc.raw_data_size.get() as u64))
|
||||
.context("Creating WIA/RVZ decompressor")?;
|
||||
read_box_slice(&mut reader, disc.num_raw_data.get() as usize)
|
||||
.context("Reading WIA/RVZ raw data headers")?
|
||||
};
|
||||
// Validate raw data alignment
|
||||
for (idx, rd) in raw_data.iter().enumerate() {
|
||||
let start_offset = rd.start_offset();
|
||||
let end_offset = rd.end_offset();
|
||||
if (start_offset % SECTOR_SIZE as u64) != 0 || (end_offset % SECTOR_SIZE as u64) != 0 {
|
||||
return Err(Error::DiscFormat(format!(
|
||||
"WIA/RVZ raw data {} not aligned to sector: {:#X}..{:#X}",
|
||||
idx, start_offset, end_offset
|
||||
)));
|
||||
}
|
||||
}
|
||||
// log::debug!("Raw data: {:?}", raw_data);
|
||||
|
||||
// Load group headers
|
||||
let groups = {
|
||||
inner
|
||||
.seek(SeekFrom::Start(disc.group_offset.get()))
|
||||
.context("Seeking to WIA/RVZ group headers")?;
|
||||
let mut reader = decompressor
|
||||
.wrap((&mut inner).take(disc.group_size.get() as u64))
|
||||
.context("Creating WIA/RVZ decompressor")?;
|
||||
if is_rvz {
|
||||
read_box_slice(&mut reader, disc.num_groups.get() as usize)
|
||||
.context("Reading WIA/RVZ group headers")?
|
||||
} else {
|
||||
let wia_groups: Box<[WIAGroup]> =
|
||||
read_box_slice(&mut reader, disc.num_groups.get() as usize)
|
||||
.context("Reading WIA/RVZ group headers")?;
|
||||
wia_groups.iter().map(RVZGroup::from).collect()
|
||||
}
|
||||
// log::debug!("Groups: {:?}", groups);
|
||||
};
|
||||
|
||||
Ok(Box::new(Self {
|
||||
header,
|
||||
disc,
|
||||
partitions,
|
||||
raw_data,
|
||||
groups,
|
||||
inner,
|
||||
nkit_header,
|
||||
decompressor,
|
||||
group: u32::MAX,
|
||||
group_data: vec![],
|
||||
exception_lists: vec![],
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
fn read_exception_lists<R>(
|
||||
reader: &mut R,
|
||||
in_partition: bool,
|
||||
chunk_size: u32,
|
||||
) -> io::Result<Vec<WIAExceptionList>>
|
||||
where
|
||||
R: Read + ?Sized,
|
||||
{
|
||||
if !in_partition {
|
||||
return Ok(vec![]);
|
||||
}
|
||||
|
||||
// One exception list for each 2 MiB of data
|
||||
let num_exception_list = (chunk_size as usize).div_ceil(0x200000);
|
||||
// log::debug!("Num exception list: {:?}", num_exception_list);
|
||||
let mut exception_lists = Vec::with_capacity(num_exception_list);
|
||||
for i in 0..num_exception_list {
|
||||
let num_exceptions = read_u16_be(reader)?;
|
||||
let exceptions: Box<[WIAException]> = read_box_slice(reader, num_exceptions as usize)?;
|
||||
if !exceptions.is_empty() {
|
||||
log::debug!("Exception list {}: {:?}", i, exceptions);
|
||||
}
|
||||
exception_lists.push(exceptions);
|
||||
}
|
||||
Ok(exception_lists)
|
||||
}
|
||||
|
||||
impl BlockIO for DiscIOWIA {
|
||||
fn read_block(
|
||||
&mut self,
|
||||
out: &mut [u8],
|
||||
sector: u32,
|
||||
partition: Option<&PartitionInfo>,
|
||||
) -> io::Result<Option<Block>> {
|
||||
let mut chunk_size = self.disc.chunk_size.get();
|
||||
let sectors_per_chunk = chunk_size / SECTOR_SIZE as u32;
|
||||
let disc_offset = sector as u64 * SECTOR_SIZE as u64;
|
||||
let mut partition_offset = disc_offset;
|
||||
if let Some(partition) = partition {
|
||||
// Within a partition, hashes are excluded from the data size
|
||||
chunk_size = (chunk_size * SECTOR_DATA_SIZE as u32) / SECTOR_SIZE as u32;
|
||||
partition_offset =
|
||||
(sector - partition.data_start_sector) as u64 * SECTOR_DATA_SIZE as u64;
|
||||
}
|
||||
|
||||
let (group_index, group_sector) = if let Some(partition) = partition {
|
||||
// Find the partition
|
||||
let Some(wia_part) = self.partitions.get(partition.index) else {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
format!("Couldn't find WIA/RVZ partition index {}", partition.index),
|
||||
));
|
||||
};
|
||||
|
||||
// Sanity check partition sector ranges
|
||||
let wia_part_start = wia_part.partition_data[0].first_sector.get();
|
||||
let wia_part_end = wia_part.partition_data[1].first_sector.get()
|
||||
+ wia_part.partition_data[1].num_sectors.get();
|
||||
if partition.data_start_sector != wia_part_start
|
||||
|| partition.data_end_sector != wia_part_end
|
||||
{
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
format!(
|
||||
"WIA/RVZ partition sector mismatch: {}..{} != {}..{}",
|
||||
wia_part_start,
|
||||
wia_part_end,
|
||||
partition.data_start_sector,
|
||||
partition.data_end_sector
|
||||
),
|
||||
));
|
||||
}
|
||||
|
||||
// Find the partition data for the sector
|
||||
let Some(pd) = wia_part.partition_data.iter().find(|pd| pd.contains(sector)) else {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
format!("Couldn't find WIA/RVZ partition data for sector {}", sector),
|
||||
));
|
||||
};
|
||||
|
||||
// Find the group index for the sector
|
||||
let part_data_sector = sector - pd.first_sector.get();
|
||||
let part_group_index = part_data_sector / sectors_per_chunk;
|
||||
let part_group_sector = part_data_sector % sectors_per_chunk;
|
||||
if part_group_index >= pd.num_groups.get() {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
format!(
|
||||
"WIA/RVZ partition group index out of range: {} >= {}",
|
||||
part_group_index,
|
||||
pd.num_groups.get()
|
||||
),
|
||||
));
|
||||
}
|
||||
|
||||
(pd.group_index.get() + part_group_index, part_group_sector)
|
||||
} else {
|
||||
let Some(rd) = self.raw_data.iter().find(|d| d.contains(sector)) else {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
format!("Couldn't find WIA/RVZ raw data for sector {}", sector),
|
||||
));
|
||||
};
|
||||
|
||||
// Find the group index for the sector
|
||||
let data_sector = sector - (rd.raw_data_offset.get() / SECTOR_SIZE as u64) as u32;
|
||||
let group_index = data_sector / sectors_per_chunk;
|
||||
let group_sector = data_sector % sectors_per_chunk;
|
||||
if group_index >= rd.num_groups.get() {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
format!(
|
||||
"WIA/RVZ raw data group index out of range: {} >= {}",
|
||||
group_index,
|
||||
rd.num_groups.get()
|
||||
),
|
||||
));
|
||||
}
|
||||
|
||||
(rd.group_index.get() + group_index, group_sector)
|
||||
};
|
||||
|
||||
// Fetch the group
|
||||
let Some(group) = self.groups.get(group_index as usize) else {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
format!("Couldn't find WIA/RVZ group index {}", group_index),
|
||||
));
|
||||
};
|
||||
|
||||
// Special case for all-zero data
|
||||
if group.data_size() == 0 {
|
||||
self.exception_lists.clear();
|
||||
return Ok(Some(Block::Zero));
|
||||
}
|
||||
|
||||
// Read group data if necessary
|
||||
if group_index != self.group {
|
||||
self.group_data = Vec::with_capacity(chunk_size as usize);
|
||||
let group_data_start = group.data_offset.get() as u64 * 4;
|
||||
self.inner.seek(SeekFrom::Start(group_data_start))?;
|
||||
|
||||
let mut reader = (&mut self.inner).take_seek(group.data_size() as u64);
|
||||
let uncompressed_exception_lists =
|
||||
matches!(self.disc.compression(), WIACompression::None | WIACompression::Purge)
|
||||
|| !group.is_compressed();
|
||||
if uncompressed_exception_lists {
|
||||
self.exception_lists = read_exception_lists(
|
||||
&mut reader,
|
||||
partition.is_some(),
|
||||
self.disc.chunk_size.get(),
|
||||
)?;
|
||||
// Align to 4
|
||||
let rem = reader.stream_position()? % 4;
|
||||
if rem != 0 {
|
||||
reader.seek(SeekFrom::Current((4 - rem) as i64))?;
|
||||
}
|
||||
}
|
||||
let mut reader: Box<dyn Read> = if group.is_compressed() {
|
||||
self.decompressor.wrap(reader)?
|
||||
} else {
|
||||
Box::new(reader)
|
||||
};
|
||||
if !uncompressed_exception_lists {
|
||||
self.exception_lists = read_exception_lists(
|
||||
reader.as_mut(),
|
||||
partition.is_some(),
|
||||
self.disc.chunk_size.get(),
|
||||
)?;
|
||||
}
|
||||
|
||||
if group.rvz_packed_size.get() > 0 {
|
||||
// Decode RVZ packed data
|
||||
let mut lfg = LaggedFibonacci::default();
|
||||
loop {
|
||||
let mut size_bytes = [0u8; 4];
|
||||
match reader.read_exact(&mut size_bytes) {
|
||||
Ok(_) => {}
|
||||
Err(e) if e.kind() == io::ErrorKind::UnexpectedEof => break,
|
||||
Err(e) => {
|
||||
return Err(io::Error::new(e.kind(), "Failed to read RVZ packed size"));
|
||||
}
|
||||
}
|
||||
let size = u32::from_be_bytes(size_bytes);
|
||||
let cur_data_len = self.group_data.len();
|
||||
if size & 0x80000000 != 0 {
|
||||
// Junk data
|
||||
let size = size & 0x7FFFFFFF;
|
||||
lfg.init_with_reader(reader.as_mut())?;
|
||||
lfg.skip(
|
||||
((partition_offset + cur_data_len as u64) % SECTOR_SIZE as u64)
|
||||
as usize,
|
||||
);
|
||||
self.group_data.resize(cur_data_len + size as usize, 0);
|
||||
lfg.fill(&mut self.group_data[cur_data_len..]);
|
||||
} else {
|
||||
// Real data
|
||||
self.group_data.resize(cur_data_len + size as usize, 0);
|
||||
reader.read_exact(&mut self.group_data[cur_data_len..])?;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Read and decompress data
|
||||
reader.read_to_end(&mut self.group_data)?;
|
||||
}
|
||||
|
||||
self.group = group_index;
|
||||
}
|
||||
|
||||
// Read sector from cached group data
|
||||
if partition.is_some() {
|
||||
let sector_data_start = group_sector as usize * SECTOR_DATA_SIZE;
|
||||
let sector_data =
|
||||
&self.group_data[sector_data_start..sector_data_start + SECTOR_DATA_SIZE];
|
||||
out[..HASHES_SIZE].fill(0);
|
||||
out[HASHES_SIZE..SECTOR_SIZE].copy_from_slice(sector_data);
|
||||
Ok(Some(Block::PartDecrypted { has_hashes: false }))
|
||||
} else {
|
||||
let sector_data_start = group_sector as usize * SECTOR_SIZE;
|
||||
out.copy_from_slice(
|
||||
&self.group_data[sector_data_start..sector_data_start + SECTOR_SIZE],
|
||||
);
|
||||
Ok(Some(Block::Raw))
|
||||
}
|
||||
}
|
||||
|
||||
fn block_size(&self) -> u32 {
|
||||
// WIA/RVZ chunks aren't always the full size, so we'll consider the
|
||||
// block size to be one sector, and handle the complexity ourselves.
|
||||
SECTOR_SIZE as u32
|
||||
}
|
||||
|
||||
fn meta(&self) -> DiscMeta {
|
||||
let mut result = DiscMeta {
|
||||
format: if self.header.is_rvz() { Format::Rvz } else { Format::Wia },
|
||||
block_size: Some(self.disc.chunk_size.get()),
|
||||
compression: match self.disc.compression() {
|
||||
WIACompression::None => Compression::None,
|
||||
WIACompression::Purge => Compression::Purge,
|
||||
WIACompression::Bzip2 => Compression::Bzip2,
|
||||
WIACompression::Lzma => Compression::Lzma,
|
||||
WIACompression::Lzma2 => Compression::Lzma2,
|
||||
WIACompression::Zstandard => Compression::Zstandard,
|
||||
},
|
||||
decrypted: true,
|
||||
needs_hash_recovery: true,
|
||||
lossless: true,
|
||||
disc_size: Some(self.header.iso_file_size.get()),
|
||||
..Default::default()
|
||||
};
|
||||
if let Some(nkit_header) = &self.nkit_header {
|
||||
nkit_header.apply(&mut result);
|
||||
}
|
||||
result
|
||||
}
|
||||
}
|
||||
174
nod/src/lib.rs
Normal file
174
nod/src/lib.rs
Normal file
@@ -0,0 +1,174 @@
|
||||
// #![warn(missing_docs, rustdoc::missing_doc_code_examples)]
|
||||
//! Library for traversing & reading GameCube and Wii disc images.
|
||||
//!
|
||||
//! Based on the C++ library [nod](https://github.com/AxioDL/nod),
|
||||
//! but does not currently support authoring.
|
||||
//!
|
||||
//! Currently supported file formats:
|
||||
//! - ISO (GCM)
|
||||
//! - WIA / RVZ
|
||||
//! - WBFS
|
||||
//! - NFS (Wii U VC files, e.g. `hif_000000.nfs`)
|
||||
//!
|
||||
//! # Examples
|
||||
//!
|
||||
//! Opening a disc image and reading a file:
|
||||
//! ```no_run
|
||||
//! use std::io::Read;
|
||||
//!
|
||||
//! use nod::{Disc, PartitionKind};
|
||||
//!
|
||||
//! fn main() -> nod::Result<()> {
|
||||
//! let disc = Disc::new("path/to/file.iso")?;
|
||||
//! let mut partition = disc.open_partition_kind(PartitionKind::Data)?;
|
||||
//! let meta = partition.meta()?;
|
||||
//! let fst = meta.fst()?;
|
||||
//! if let Some((_, node)) = fst.find("/MP3/Worlds.txt") {
|
||||
//! let mut s = String::new();
|
||||
//! partition
|
||||
//! .open_file(node)
|
||||
//! .expect("Failed to open file stream")
|
||||
//! .read_to_string(&mut s)
|
||||
//! .expect("Failed to read file");
|
||||
//! println!("{}", s);
|
||||
//! }
|
||||
//! Ok(())
|
||||
//! }
|
||||
//! ```
|
||||
|
||||
use std::{
|
||||
io::{Read, Seek},
|
||||
path::Path,
|
||||
};
|
||||
|
||||
pub use disc::{
|
||||
AppLoaderHeader, DiscHeader, DolHeader, PartitionBase, PartitionHeader, PartitionKind,
|
||||
PartitionMeta, BI2_SIZE, BOOT_SIZE, SECTOR_SIZE,
|
||||
};
|
||||
pub use fst::{Fst, Node, NodeKind};
|
||||
pub use io::{block::PartitionInfo, Compression, DiscMeta, Format};
|
||||
pub use streams::ReadStream;
|
||||
|
||||
mod disc;
|
||||
mod fst;
|
||||
mod io;
|
||||
mod streams;
|
||||
mod util;
|
||||
|
||||
/// Error types for nod.
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum Error {
|
||||
/// An error for disc format related issues.
|
||||
#[error("disc format error: {0}")]
|
||||
DiscFormat(String),
|
||||
/// A general I/O error.
|
||||
#[error("I/O error: {0}")]
|
||||
Io(String, #[source] std::io::Error),
|
||||
/// An unknown error.
|
||||
#[error("error: {0}")]
|
||||
Other(String),
|
||||
}
|
||||
|
||||
impl From<&str> for Error {
|
||||
fn from(s: &str) -> Error { Error::Other(s.to_string()) }
|
||||
}
|
||||
|
||||
impl From<String> for Error {
|
||||
fn from(s: String) -> Error { Error::Other(s) }
|
||||
}
|
||||
|
||||
/// Helper result type for [`Error`].
|
||||
pub type Result<T, E = Error> = core::result::Result<T, E>;
|
||||
|
||||
pub trait ErrorContext {
|
||||
fn context(self, context: impl Into<String>) -> Error;
|
||||
}
|
||||
|
||||
impl ErrorContext for std::io::Error {
|
||||
fn context(self, context: impl Into<String>) -> Error { Error::Io(context.into(), self) }
|
||||
}
|
||||
|
||||
pub trait ResultContext<T> {
|
||||
fn context(self, context: impl Into<String>) -> Result<T>;
|
||||
|
||||
fn with_context<F>(self, f: F) -> Result<T>
|
||||
where F: FnOnce() -> String;
|
||||
}
|
||||
|
||||
impl<T, E> ResultContext<T> for Result<T, E>
|
||||
where E: ErrorContext
|
||||
{
|
||||
fn context(self, context: impl Into<String>) -> Result<T> {
|
||||
self.map_err(|e| e.context(context))
|
||||
}
|
||||
|
||||
fn with_context<F>(self, f: F) -> Result<T>
|
||||
where F: FnOnce() -> String {
|
||||
self.map_err(|e| e.context(f()))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, Clone)]
|
||||
pub struct OpenOptions {
|
||||
/// Wii: Rebuild partition data encryption and hashes if the underlying format stores data
|
||||
/// decrypted or with hashes removed. (e.g. WIA/RVZ, NFS)
|
||||
pub rebuild_encryption: bool,
|
||||
/// Wii: Validate partition data hashes while reading the disc image.
|
||||
pub validate_hashes: bool,
|
||||
}
|
||||
|
||||
pub struct Disc {
|
||||
reader: disc::reader::DiscReader,
|
||||
options: OpenOptions,
|
||||
}
|
||||
|
||||
impl Disc {
|
||||
/// Opens a disc image from a file path.
|
||||
pub fn new<P: AsRef<Path>>(path: P) -> Result<Disc> {
|
||||
Disc::new_with_options(path, &OpenOptions::default())
|
||||
}
|
||||
|
||||
/// Opens a disc image from a file path with custom options.
|
||||
pub fn new_with_options<P: AsRef<Path>>(path: P, options: &OpenOptions) -> Result<Disc> {
|
||||
let io = io::block::open(path.as_ref())?;
|
||||
let reader = disc::reader::DiscReader::new(io, options)?;
|
||||
Ok(Disc { reader, options: options.clone() })
|
||||
}
|
||||
|
||||
/// The disc's header.
|
||||
pub fn header(&self) -> &DiscHeader { self.reader.header() }
|
||||
|
||||
/// Returns extra metadata included in the disc file format, if any.
|
||||
pub fn meta(&self) -> DiscMeta { self.reader.meta() }
|
||||
|
||||
/// The disc's size in bytes, or an estimate if not stored by the format.
|
||||
pub fn disc_size(&self) -> u64 { self.reader.disc_size() }
|
||||
|
||||
/// A list of Wii partitions on the disc.
|
||||
///
|
||||
/// For GameCube discs, this will return an empty slice.
|
||||
pub fn partitions(&self) -> &[PartitionInfo] { self.reader.partitions() }
|
||||
|
||||
/// Opens a new, decrypted partition read stream for the specified partition index.
|
||||
///
|
||||
/// For GameCube discs, the index must always be 0.
|
||||
pub fn open_partition(&self, index: usize) -> Result<Box<dyn PartitionBase>> {
|
||||
self.reader.open_partition(index, &self.options)
|
||||
}
|
||||
|
||||
/// Opens a new partition read stream for the first partition matching
|
||||
/// the specified type.
|
||||
///
|
||||
/// For GameCube discs, the kind must always be `PartitionKind::Data`.
|
||||
pub fn open_partition_kind(&self, kind: PartitionKind) -> Result<Box<dyn PartitionBase>> {
|
||||
self.reader.open_partition_kind(kind, &self.options)
|
||||
}
|
||||
}
|
||||
|
||||
impl Read for Disc {
|
||||
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> { self.reader.read(buf) }
|
||||
}
|
||||
|
||||
impl Seek for Disc {
|
||||
fn seek(&mut self, pos: std::io::SeekFrom) -> std::io::Result<u64> { self.reader.seek(pos) }
|
||||
}
|
||||
80
nod/src/streams.rs
Normal file
80
nod/src/streams.rs
Normal file
@@ -0,0 +1,80 @@
|
||||
//! Common stream types
|
||||
|
||||
use std::{
|
||||
io,
|
||||
io::{Read, Seek, SeekFrom},
|
||||
};
|
||||
|
||||
/// A helper trait for seekable read streams.
|
||||
pub trait ReadStream: Read + Seek {
|
||||
/// Creates a windowed read sub-stream with offset and size.
|
||||
///
|
||||
/// Seeks underlying stream immediately.
|
||||
fn new_window(&mut self, offset: u64, size: u64) -> io::Result<SharedWindowedReadStream> {
|
||||
self.seek(SeekFrom::Start(offset))?;
|
||||
Ok(SharedWindowedReadStream { base: self.as_dyn(), begin: offset, end: offset + size })
|
||||
}
|
||||
|
||||
/// Retrieves a type-erased reference to the stream.
|
||||
fn as_dyn(&mut self) -> &mut dyn ReadStream;
|
||||
}
|
||||
|
||||
impl<T> ReadStream for T
|
||||
where T: Read + Seek
|
||||
{
|
||||
fn as_dyn(&mut self) -> &mut dyn ReadStream { self }
|
||||
}
|
||||
|
||||
/// A non-owning window into an existing [`ReadStream`].
|
||||
pub struct SharedWindowedReadStream<'a> {
|
||||
/// A reference to the base stream.
|
||||
pub base: &'a mut dyn ReadStream,
|
||||
/// The beginning of the window in bytes.
|
||||
pub begin: u64,
|
||||
/// The end of the window in bytes.
|
||||
pub end: u64,
|
||||
}
|
||||
|
||||
impl<'a> SharedWindowedReadStream<'a> {
|
||||
/// Modifies the current window & seeks to the beginning of the window.
|
||||
pub fn set_window(&mut self, begin: u64, end: u64) -> io::Result<()> {
|
||||
self.base.seek(SeekFrom::Start(begin))?;
|
||||
self.begin = begin;
|
||||
self.end = end;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Read for SharedWindowedReadStream<'a> {
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
let pos = self.stream_position()?;
|
||||
let size = self.end - self.begin;
|
||||
if pos == size {
|
||||
return Ok(0);
|
||||
}
|
||||
self.base.read(if pos + buf.len() as u64 > size {
|
||||
&mut buf[..(size - pos) as usize]
|
||||
} else {
|
||||
buf
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Seek for SharedWindowedReadStream<'a> {
|
||||
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
|
||||
let result = self.base.seek(match pos {
|
||||
SeekFrom::Start(p) => SeekFrom::Start(self.begin + p),
|
||||
SeekFrom::End(p) => SeekFrom::End(self.end as i64 + p),
|
||||
SeekFrom::Current(_) => pos,
|
||||
})?;
|
||||
if result < self.begin || result > self.end {
|
||||
Err(io::Error::from(io::ErrorKind::UnexpectedEof))
|
||||
} else {
|
||||
Ok(result - self.begin)
|
||||
}
|
||||
}
|
||||
|
||||
fn stream_position(&mut self) -> io::Result<u64> {
|
||||
Ok(self.base.stream_position()? - self.begin)
|
||||
}
|
||||
}
|
||||
92
nod/src/util/compress.rs
Normal file
92
nod/src/util/compress.rs
Normal file
@@ -0,0 +1,92 @@
|
||||
use std::{io, io::Read};
|
||||
|
||||
/// Decodes the LZMA Properties byte (lc/lp/pb).
|
||||
/// See `lzma_lzma_lclppb_decode` in `liblzma/lzma/lzma_decoder.c`.
|
||||
#[cfg(feature = "compress-lzma")]
|
||||
pub fn lzma_lclppb_decode(options: &mut liblzma::stream::LzmaOptions, byte: u8) -> io::Result<()> {
|
||||
let mut d = byte as u32;
|
||||
if d >= (9 * 5 * 5) {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!("Invalid LZMA props byte: {}", d),
|
||||
));
|
||||
}
|
||||
options.literal_context_bits(d % 9);
|
||||
d /= 9;
|
||||
options.position_bits(d / 5);
|
||||
options.literal_position_bits(d % 5);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Decodes LZMA properties.
|
||||
/// See `lzma_lzma_props_decode` in `liblzma/lzma/lzma_decoder.c`.
|
||||
#[cfg(feature = "compress-lzma")]
|
||||
pub fn lzma_props_decode(props: &[u8]) -> io::Result<liblzma::stream::LzmaOptions> {
|
||||
use crate::array_ref;
|
||||
if props.len() != 5 {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!("Invalid LZMA props length: {}", props.len()),
|
||||
));
|
||||
}
|
||||
let mut options = liblzma::stream::LzmaOptions::new();
|
||||
lzma_lclppb_decode(&mut options, props[0])?;
|
||||
options.dict_size(u32::from_le_bytes(*array_ref!(props, 1, 4)));
|
||||
Ok(options)
|
||||
}
|
||||
|
||||
/// Decodes LZMA2 properties.
|
||||
/// See `lzma_lzma2_props_decode` in `liblzma/lzma/lzma2_decoder.c`.
|
||||
#[cfg(feature = "compress-lzma")]
|
||||
pub fn lzma2_props_decode(props: &[u8]) -> io::Result<liblzma::stream::LzmaOptions> {
|
||||
use std::cmp::Ordering;
|
||||
if props.len() != 1 {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!("Invalid LZMA2 props length: {}", props.len()),
|
||||
));
|
||||
}
|
||||
let d = props[0] as u32;
|
||||
let mut options = liblzma::stream::LzmaOptions::new();
|
||||
options.dict_size(match d.cmp(&40) {
|
||||
Ordering::Greater => {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!("Invalid LZMA2 props byte: {}", d),
|
||||
));
|
||||
}
|
||||
Ordering::Equal => u32::MAX,
|
||||
Ordering::Less => (2 | (d & 1)) << (d / 2 + 11),
|
||||
});
|
||||
Ok(options)
|
||||
}
|
||||
|
||||
/// Creates a new raw LZMA decoder with the given options.
|
||||
#[cfg(feature = "compress-lzma")]
|
||||
pub fn new_lzma_decoder<R>(
|
||||
reader: R,
|
||||
options: &liblzma::stream::LzmaOptions,
|
||||
) -> io::Result<liblzma::read::XzDecoder<R>>
|
||||
where
|
||||
R: Read,
|
||||
{
|
||||
let mut filters = liblzma::stream::Filters::new();
|
||||
filters.lzma1(options);
|
||||
let stream = liblzma::stream::Stream::new_raw_decoder(&filters).map_err(io::Error::from)?;
|
||||
Ok(liblzma::read::XzDecoder::new_stream(reader, stream))
|
||||
}
|
||||
|
||||
/// Creates a new raw LZMA2 decoder with the given options.
|
||||
#[cfg(feature = "compress-lzma")]
|
||||
pub fn new_lzma2_decoder<R>(
|
||||
reader: R,
|
||||
options: &liblzma::stream::LzmaOptions,
|
||||
) -> io::Result<liblzma::read::XzDecoder<R>>
|
||||
where
|
||||
R: Read,
|
||||
{
|
||||
let mut filters = liblzma::stream::Filters::new();
|
||||
filters.lzma2(options);
|
||||
let stream = liblzma::stream::Stream::new_raw_decoder(&filters).map_err(io::Error::from)?;
|
||||
Ok(liblzma::read::XzDecoder::new_stream(reader, stream))
|
||||
}
|
||||
135
nod/src/util/lfg.rs
Normal file
135
nod/src/util/lfg.rs
Normal file
@@ -0,0 +1,135 @@
|
||||
use std::{cmp::min, io, io::Read};
|
||||
|
||||
use zerocopy::{transmute_ref, AsBytes};
|
||||
|
||||
use crate::disc::SECTOR_SIZE;
|
||||
|
||||
pub const LFG_K: usize = 521;
|
||||
pub const LFG_J: usize = 32;
|
||||
pub const SEED_SIZE: usize = 17;
|
||||
|
||||
/// Lagged Fibonacci generator for Wii partition junk data.
|
||||
///
|
||||
/// References (license CC0-1.0):
|
||||
/// https://github.com/dolphin-emu/dolphin/blob/a0f555648c27ec0c928f6b1e1fcad5e2d7c4d0c4/docs/WiaAndRvz.md
|
||||
/// https://github.com/dolphin-emu/dolphin/blob/a0f555648c27ec0c928f6b1e1fcad5e2d7c4d0c4/Source/Core/DiscIO/LaggedFibonacciGenerator.cpp
|
||||
pub struct LaggedFibonacci {
|
||||
buffer: [u32; LFG_K],
|
||||
position: usize,
|
||||
}
|
||||
|
||||
impl Default for LaggedFibonacci {
|
||||
fn default() -> Self { Self { buffer: [0u32; LFG_K], position: 0 } }
|
||||
}
|
||||
|
||||
impl LaggedFibonacci {
|
||||
fn init(&mut self) {
|
||||
for i in SEED_SIZE..LFG_K {
|
||||
self.buffer[i] =
|
||||
(self.buffer[i - 17] << 23) ^ (self.buffer[i - 16] >> 9) ^ self.buffer[i - 1];
|
||||
}
|
||||
// Instead of doing the "shift by 18 instead of 16" oddity when actually outputting the data,
|
||||
// we can do the shifting (and byteswapping) at this point to make the output code simpler.
|
||||
for x in self.buffer.iter_mut() {
|
||||
*x = ((*x & 0xFF00FFFF) | (*x >> 2 & 0x00FF0000)).swap_bytes();
|
||||
}
|
||||
for _ in 0..4 {
|
||||
self.forward();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn init_with_seed(&mut self, init: [u8; 4], disc_num: u8, partition_offset: u64) {
|
||||
let seed = u32::from_be_bytes([
|
||||
init[2],
|
||||
init[1],
|
||||
init[3].wrapping_add(init[2]),
|
||||
init[0].wrapping_add(init[1]),
|
||||
]) ^ disc_num as u32;
|
||||
let sector = (partition_offset / SECTOR_SIZE as u64) as u32;
|
||||
let sector_offset = partition_offset % SECTOR_SIZE as u64;
|
||||
let mut n = seed.wrapping_mul(0x260BCD5) ^ sector.wrapping_mul(0x1EF29123);
|
||||
for i in 0..SEED_SIZE {
|
||||
let mut v = 0u32;
|
||||
for _ in 0..LFG_J {
|
||||
n = n.wrapping_mul(0x5D588B65).wrapping_add(1);
|
||||
v = (v >> 1) | (n & 0x80000000);
|
||||
}
|
||||
self.buffer[i] = v;
|
||||
}
|
||||
self.buffer[16] ^= self.buffer[0] >> 9 ^ self.buffer[16] << 23;
|
||||
self.position = 0;
|
||||
self.init();
|
||||
self.skip(sector_offset as usize);
|
||||
}
|
||||
|
||||
pub fn init_with_reader<R>(&mut self, reader: &mut R) -> io::Result<()>
|
||||
where R: Read + ?Sized {
|
||||
reader.read_exact(self.buffer[..SEED_SIZE].as_bytes_mut())?;
|
||||
for x in self.buffer[..SEED_SIZE].iter_mut() {
|
||||
*x = u32::from_be(*x);
|
||||
}
|
||||
self.position = 0;
|
||||
self.init();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn forward(&mut self) {
|
||||
for i in 0..LFG_J {
|
||||
self.buffer[i] ^= self.buffer[i + LFG_K - LFG_J];
|
||||
}
|
||||
for i in LFG_J..LFG_K {
|
||||
self.buffer[i] ^= self.buffer[i - LFG_J];
|
||||
}
|
||||
}
|
||||
|
||||
pub fn skip(&mut self, n: usize) {
|
||||
self.position += n;
|
||||
while self.position >= LFG_K * 4 {
|
||||
self.forward();
|
||||
self.position -= LFG_K * 4;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn fill(&mut self, mut buf: &mut [u8]) {
|
||||
while !buf.is_empty() {
|
||||
let len = min(buf.len(), LFG_K * 4 - self.position);
|
||||
let bytes: &[u8; LFG_K * 4] = transmute_ref!(&self.buffer);
|
||||
buf[..len].copy_from_slice(&bytes[self.position..self.position + len]);
|
||||
self.position += len;
|
||||
buf = &mut buf[len..];
|
||||
if self.position == LFG_K * 4 {
|
||||
self.forward();
|
||||
self.position = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_init_with_seed_1() {
|
||||
let mut lfg = LaggedFibonacci::default();
|
||||
lfg.init_with_seed([0x47, 0x41, 0x4c, 0x45], 0, 0x600000);
|
||||
let mut buf = [0u8; 16];
|
||||
lfg.fill(&mut buf);
|
||||
assert_eq!(buf, [
|
||||
0xE9, 0x47, 0x67, 0xBD, 0x41, 0x50, 0x4D, 0x5D, 0x61, 0x48, 0xB1, 0x99, 0xA0, 0x12,
|
||||
0x0C, 0xBA
|
||||
]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_init_with_seed_2() {
|
||||
let mut lfg = LaggedFibonacci::default();
|
||||
lfg.init_with_seed([0x47, 0x41, 0x4c, 0x45], 0, 0x608000);
|
||||
let mut buf = [0u8; 16];
|
||||
lfg.fill(&mut buf);
|
||||
assert_eq!(buf, [
|
||||
0xE2, 0xBB, 0xBD, 0x77, 0xDA, 0xB2, 0x22, 0x42, 0x1C, 0x0C, 0x0B, 0xFC, 0xAC, 0x06,
|
||||
0xEA, 0xD0
|
||||
]);
|
||||
}
|
||||
}
|
||||
46
nod/src/util/mod.rs
Normal file
46
nod/src/util/mod.rs
Normal file
@@ -0,0 +1,46 @@
|
||||
use std::ops::{Div, Rem};
|
||||
|
||||
pub(crate) mod compress;
|
||||
pub(crate) mod lfg;
|
||||
pub(crate) mod read;
|
||||
pub(crate) mod take_seek;
|
||||
|
||||
#[inline(always)]
|
||||
pub(crate) fn div_rem<T>(x: T, y: T) -> (T, T)
|
||||
where T: Div<Output = T> + Rem<Output = T> + Copy {
|
||||
let quot = x / y;
|
||||
let rem = x % y;
|
||||
(quot, rem)
|
||||
}
|
||||
|
||||
/// Creates a fixed-size array reference from a slice.
|
||||
#[macro_export]
|
||||
macro_rules! array_ref {
|
||||
($slice:expr, $offset:expr, $size:expr) => {{
|
||||
#[inline]
|
||||
fn to_array<T>(slice: &[T]) -> &[T; $size] {
|
||||
unsafe { &*(slice.as_ptr() as *const [_; $size]) }
|
||||
}
|
||||
to_array(&$slice[$offset..$offset + $size])
|
||||
}};
|
||||
}
|
||||
|
||||
/// Creates a mutable fixed-size array reference from a slice.
|
||||
#[macro_export]
|
||||
macro_rules! array_ref_mut {
|
||||
($slice:expr, $offset:expr, $size:expr) => {{
|
||||
#[inline]
|
||||
fn to_array<T>(slice: &mut [T]) -> &mut [T; $size] {
|
||||
unsafe { &mut *(slice.as_ptr() as *mut [_; $size]) }
|
||||
}
|
||||
to_array(&mut $slice[$offset..$offset + $size])
|
||||
}};
|
||||
}
|
||||
|
||||
/// Compile-time assertion.
|
||||
#[macro_export]
|
||||
macro_rules! static_assert {
|
||||
($condition:expr) => {
|
||||
const _: () = core::assert!($condition);
|
||||
};
|
||||
}
|
||||
71
nod/src/util/read.rs
Normal file
71
nod/src/util/read.rs
Normal file
@@ -0,0 +1,71 @@
|
||||
use std::{io, io::Read};
|
||||
|
||||
use zerocopy::{AsBytes, FromBytes, FromZeroes};
|
||||
|
||||
#[inline(always)]
|
||||
pub fn read_from<T, R>(reader: &mut R) -> io::Result<T>
|
||||
where
|
||||
T: FromBytes + FromZeroes + AsBytes,
|
||||
R: Read + ?Sized,
|
||||
{
|
||||
let mut ret = <T>::new_zeroed();
|
||||
reader.read_exact(ret.as_bytes_mut())?;
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn read_vec<T, R>(reader: &mut R, count: usize) -> io::Result<Vec<T>>
|
||||
where
|
||||
T: FromBytes + FromZeroes + AsBytes,
|
||||
R: Read + ?Sized,
|
||||
{
|
||||
let mut ret = <T>::new_vec_zeroed(count);
|
||||
reader.read_exact(ret.as_mut_slice().as_bytes_mut())?;
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn read_box<T, R>(reader: &mut R) -> io::Result<Box<T>>
|
||||
where
|
||||
T: FromBytes + FromZeroes + AsBytes,
|
||||
R: Read + ?Sized,
|
||||
{
|
||||
let mut ret = <T>::new_box_zeroed();
|
||||
reader.read_exact(ret.as_mut().as_bytes_mut())?;
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn read_box_slice<T, R>(reader: &mut R, count: usize) -> io::Result<Box<[T]>>
|
||||
where
|
||||
T: FromBytes + FromZeroes + AsBytes,
|
||||
R: Read + ?Sized,
|
||||
{
|
||||
let mut ret = <T>::new_box_slice_zeroed(count);
|
||||
reader.read_exact(ret.as_mut().as_bytes_mut())?;
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn read_u16_be<R>(reader: &mut R) -> io::Result<u16>
|
||||
where R: Read + ?Sized {
|
||||
let mut buf = [0u8; 2];
|
||||
reader.read_exact(&mut buf)?;
|
||||
Ok(u16::from_be_bytes(buf))
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn read_u32_be<R>(reader: &mut R) -> io::Result<u32>
|
||||
where R: Read + ?Sized {
|
||||
let mut buf = [0u8; 4];
|
||||
reader.read_exact(&mut buf)?;
|
||||
Ok(u32::from_be_bytes(buf))
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn read_u64_be<R>(reader: &mut R) -> io::Result<u64>
|
||||
where R: Read + ?Sized {
|
||||
let mut buf = [0u8; 8];
|
||||
reader.read_exact(&mut buf)?;
|
||||
Ok(u64::from_be_bytes(buf))
|
||||
}
|
||||
127
nod/src/util/take_seek.rs
Normal file
127
nod/src/util/take_seek.rs
Normal file
@@ -0,0 +1,127 @@
|
||||
// From https://github.com/jam1garner/binrw/blob/e96a1320287ec83d1f471525ffa380800ec9e124/binrw/src/io/take_seek.rs
|
||||
// MIT License
|
||||
//
|
||||
// Copyright (c) jam1garner and other contributors
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in all
|
||||
// copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
// SOFTWARE.
|
||||
#![allow(dead_code)]
|
||||
//! Types for seekable reader adapters which limit the number of bytes read from
|
||||
//! the underlying reader.
|
||||
|
||||
use std::io::{Read, Result, Seek, SeekFrom};
|
||||
|
||||
/// Read adapter which limits the bytes read from an underlying reader, with
|
||||
/// seek support.
|
||||
///
|
||||
/// This struct is generally created by importing the [`TakeSeekExt`] extension
|
||||
/// and calling [`take_seek`] on a reader.
|
||||
///
|
||||
/// [`take_seek`]: TakeSeekExt::take_seek
|
||||
#[derive(Debug)]
|
||||
pub struct TakeSeek<T> {
|
||||
inner: T,
|
||||
pos: u64,
|
||||
end: u64,
|
||||
}
|
||||
|
||||
impl<T> TakeSeek<T> {
|
||||
/// Gets a reference to the underlying reader.
|
||||
pub fn get_ref(&self) -> &T { &self.inner }
|
||||
|
||||
/// Gets a mutable reference to the underlying reader.
|
||||
///
|
||||
/// Care should be taken to avoid modifying the internal I/O state of the
|
||||
/// underlying reader as doing so may corrupt the internal limit of this
|
||||
/// `TakeSeek`.
|
||||
pub fn get_mut(&mut self) -> &mut T { &mut self.inner }
|
||||
|
||||
/// Consumes this wrapper, returning the wrapped value.
|
||||
pub fn into_inner(self) -> T { self.inner }
|
||||
|
||||
/// Returns the number of bytes that can be read before this instance will
|
||||
/// return EOF.
|
||||
///
|
||||
/// # Note
|
||||
///
|
||||
/// This instance may reach EOF after reading fewer bytes than indicated by
|
||||
/// this method if the underlying [`Read`] instance reaches EOF.
|
||||
pub fn limit(&self) -> u64 { self.end.saturating_sub(self.pos) }
|
||||
}
|
||||
|
||||
impl<T: Seek> TakeSeek<T> {
|
||||
/// Sets the number of bytes that can be read before this instance will
|
||||
/// return EOF. This is the same as constructing a new `TakeSeek` instance,
|
||||
/// so the amount of bytes read and the previous limit value don’t matter
|
||||
/// when calling this method.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Panics if the inner stream returns an error from `stream_position`.
|
||||
pub fn set_limit(&mut self, limit: u64) {
|
||||
let pos = self.inner.stream_position().expect("cannot get position for `set_limit`");
|
||||
self.pos = pos;
|
||||
self.end = pos + limit;
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Read> Read for TakeSeek<T> {
|
||||
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
|
||||
let limit = self.limit();
|
||||
|
||||
// Don't call into inner reader at all at EOF because it may still block
|
||||
if limit == 0 {
|
||||
return Ok(0);
|
||||
}
|
||||
|
||||
// Lint: It is impossible for this cast to truncate because the value
|
||||
// being cast is the minimum of two values, and one of the value types
|
||||
// is already `usize`.
|
||||
#[allow(clippy::cast_possible_truncation)]
|
||||
let max = (buf.len() as u64).min(limit) as usize;
|
||||
let n = self.inner.read(&mut buf[0..max])?;
|
||||
self.pos += n as u64;
|
||||
Ok(n)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Seek> Seek for TakeSeek<T> {
|
||||
fn seek(&mut self, pos: SeekFrom) -> Result<u64> {
|
||||
self.pos = self.inner.seek(pos)?;
|
||||
Ok(self.pos)
|
||||
}
|
||||
|
||||
fn stream_position(&mut self) -> Result<u64> { Ok(self.pos) }
|
||||
}
|
||||
|
||||
/// An extension trait that implements `take_seek()` for compatible streams.
|
||||
pub trait TakeSeekExt {
|
||||
/// Creates an adapter which will read at most `limit` bytes from the
|
||||
/// wrapped stream.
|
||||
fn take_seek(self, limit: u64) -> TakeSeek<Self>
|
||||
where Self: Sized;
|
||||
}
|
||||
|
||||
impl<T: Read + Seek> TakeSeekExt for T {
|
||||
fn take_seek(mut self, limit: u64) -> TakeSeek<Self>
|
||||
where Self: Sized {
|
||||
let pos = self.stream_position().expect("cannot get position for `take_seek`");
|
||||
|
||||
TakeSeek { inner: self, pos, end: pos + limit }
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user