mirror of https://github.com/encounter/nod-rs.git
Restore all functionality, split lib/bin & integrate redump validation
This commit is contained in:
parent
7f97dac399
commit
07bb8ccc1d
|
@ -1,3 +1,2 @@
|
||||||
/target
|
/target
|
||||||
Cargo.lock
|
|
||||||
.idea
|
.idea
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
63
Cargo.toml
63
Cargo.toml
|
@ -1,65 +1,8 @@
|
||||||
[package]
|
[workspace]
|
||||||
name = "nod"
|
members = ["nod", "nodtool"]
|
||||||
version = "0.2.0"
|
resolver = "2"
|
||||||
edition = "2021"
|
|
||||||
rust-version = "1.59.0"
|
|
||||||
authors = ["Luke Street <luke@street.dev>"]
|
|
||||||
license = "MIT OR Apache-2.0"
|
|
||||||
repository = "https://github.com/encounter/nod-rs"
|
|
||||||
documentation = "https://docs.rs/nod"
|
|
||||||
readme = "README.md"
|
|
||||||
description = """
|
|
||||||
Rust library and CLI tool for reading GameCube and Wii disc images.
|
|
||||||
"""
|
|
||||||
keywords = ["gamecube", "wii", "iso", "nfs", "rvz"]
|
|
||||||
categories = ["command-line-utilities", "parser-implementations"]
|
|
||||||
build = "build.rs"
|
|
||||||
|
|
||||||
[[bin]]
|
|
||||||
name = "nodtool"
|
|
||||||
path = "src/bin.rs"
|
|
||||||
|
|
||||||
[profile.release]
|
|
||||||
debug = true
|
|
||||||
|
|
||||||
[profile.release-lto]
|
[profile.release-lto]
|
||||||
inherits = "release"
|
inherits = "release"
|
||||||
lto = "thin"
|
lto = "thin"
|
||||||
strip = "debuginfo"
|
strip = "debuginfo"
|
||||||
|
|
||||||
[features]
|
|
||||||
default = ["compress-bzip2", "compress-lzma", "compress-zstd"]
|
|
||||||
asm = ["md-5/asm", "sha1/asm"]
|
|
||||||
compress-bzip2 = ["bzip2"]
|
|
||||||
compress-lzma = ["liblzma"]
|
|
||||||
compress-zstd = ["zstd"]
|
|
||||||
nightly = ["crc32fast/nightly"]
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
aes = "0.8.4"
|
|
||||||
argh_derive = "0.1.12"
|
|
||||||
argp = "0.3.0"
|
|
||||||
base16ct = "0.2.0"
|
|
||||||
bzip2 = { version = "0.4.4", features = ["static"], optional = true }
|
|
||||||
cbc = "0.1.2"
|
|
||||||
crc32fast = "1.4.0"
|
|
||||||
digest = "0.10.7"
|
|
||||||
dyn-clone = "1.0.16"
|
|
||||||
enable-ansi-support = "0.2.1"
|
|
||||||
encoding_rs = "0.8.33"
|
|
||||||
file-size = "1.0.3"
|
|
||||||
indicatif = "0.17.8"
|
|
||||||
itertools = "0.12.1"
|
|
||||||
liblzma = { git = "https://github.com/encounter/liblzma-rs.git", rev = "ce29b22", features = ["static"], optional = true }
|
|
||||||
log = "0.4.20"
|
|
||||||
md-5 = "0.10.6"
|
|
||||||
rayon = "1.8.1"
|
|
||||||
sha1 = "0.10.6"
|
|
||||||
supports-color = "3.0.0"
|
|
||||||
thiserror = "1.0.57"
|
|
||||||
tracing = "0.1.40"
|
|
||||||
tracing-attributes = "0.1.27"
|
|
||||||
tracing-subscriber = { version = "0.3.18", features = ["env-filter"] }
|
|
||||||
xxhash-rust = { version = "0.8.8", features = ["xxh64"] }
|
|
||||||
zerocopy = { version = "0.7.32", features = ["alloc", "derive"] }
|
|
||||||
zstd = { version = "0.13.0", optional = true }
|
|
||||||
|
|
9
build.rs
9
build.rs
|
@ -1,9 +0,0 @@
|
||||||
fn main() {
|
|
||||||
let output = std::process::Command::new("git")
|
|
||||||
.args(["rev-parse", "HEAD"])
|
|
||||||
.output()
|
|
||||||
.expect("Failed to execute git");
|
|
||||||
let rev = String::from_utf8(output.stdout).expect("Failed to parse git output");
|
|
||||||
println!("cargo:rustc-env=GIT_COMMIT_SHA={rev}");
|
|
||||||
println!("cargo:rustc-rerun-if-changed=.git/HEAD");
|
|
||||||
}
|
|
|
@ -74,6 +74,8 @@ allow = [
|
||||||
"Apache-2.0",
|
"Apache-2.0",
|
||||||
"BSD-3-Clause",
|
"BSD-3-Clause",
|
||||||
"Unicode-DFS-2016",
|
"Unicode-DFS-2016",
|
||||||
|
"BSL-1.0",
|
||||||
|
"ISC",
|
||||||
]
|
]
|
||||||
# List of explictly disallowed licenses
|
# List of explictly disallowed licenses
|
||||||
# See https://spdx.org/licenses/ for list of possible licenses
|
# See https://spdx.org/licenses/ for list of possible licenses
|
||||||
|
@ -197,7 +199,7 @@ allow-git = []
|
||||||
|
|
||||||
[sources.allow-org]
|
[sources.allow-org]
|
||||||
# 1 or more github.com organizations to allow git sources for
|
# 1 or more github.com organizations to allow git sources for
|
||||||
#github = [""]
|
github = ["encounter"]
|
||||||
# 1 or more gitlab.com organizations to allow git sources for
|
# 1 or more gitlab.com organizations to allow git sources for
|
||||||
#gitlab = [""]
|
#gitlab = [""]
|
||||||
# 1 or more bitbucket.org organizations to allow git sources for
|
# 1 or more bitbucket.org organizations to allow git sources for
|
||||||
|
|
|
@ -0,0 +1,39 @@
|
||||||
|
[package]
|
||||||
|
name = "nod"
|
||||||
|
version = "0.2.0"
|
||||||
|
edition = "2021"
|
||||||
|
rust-version = "1.59.0"
|
||||||
|
authors = ["Luke Street <luke@street.dev>"]
|
||||||
|
license = "MIT OR Apache-2.0"
|
||||||
|
repository = "https://github.com/encounter/nod-rs"
|
||||||
|
documentation = "https://docs.rs/nod"
|
||||||
|
readme = "../README.md"
|
||||||
|
description = """
|
||||||
|
Library for reading GameCube and Wii disc images.
|
||||||
|
"""
|
||||||
|
keywords = ["gamecube", "wii", "iso", "wbfs", "rvz"]
|
||||||
|
categories = ["command-line-utilities", "parser-implementations"]
|
||||||
|
|
||||||
|
[features]
|
||||||
|
default = ["compress-bzip2", "compress-lzma", "compress-zstd"]
|
||||||
|
asm = ["sha1/asm"]
|
||||||
|
compress-bzip2 = ["bzip2"]
|
||||||
|
compress-lzma = ["liblzma"]
|
||||||
|
compress-zstd = ["zstd"]
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
aes = "0.8.4"
|
||||||
|
base16ct = "0.2.0"
|
||||||
|
bzip2 = { version = "0.4.4", features = ["static"], optional = true }
|
||||||
|
cbc = "0.1.2"
|
||||||
|
digest = "0.10.7"
|
||||||
|
dyn-clone = "1.0.16"
|
||||||
|
encoding_rs = "0.8.33"
|
||||||
|
itertools = "0.12.1"
|
||||||
|
liblzma = { version = "0.2.3", features = ["static"], optional = true }
|
||||||
|
log = "0.4.20"
|
||||||
|
rayon = "1.8.1"
|
||||||
|
sha1 = "0.10.6"
|
||||||
|
thiserror = "1.0.57"
|
||||||
|
zerocopy = { version = "0.7.32", features = ["alloc", "derive"] }
|
||||||
|
zstd = { version = "0.13.0", optional = true }
|
|
@ -0,0 +1,202 @@
|
||||||
|
use std::{
|
||||||
|
cmp::min,
|
||||||
|
io,
|
||||||
|
io::{Read, Seek, SeekFrom},
|
||||||
|
mem::size_of,
|
||||||
|
};
|
||||||
|
|
||||||
|
use zerocopy::{FromBytes, FromZeroes};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
disc::{
|
||||||
|
AppLoaderHeader, DiscHeader, DolHeader, PartitionBase, PartitionHeader, PartitionMeta,
|
||||||
|
BI2_SIZE, BOOT_SIZE, SECTOR_SIZE,
|
||||||
|
},
|
||||||
|
fst::{Node, NodeKind},
|
||||||
|
io::block::{Block, BlockIO},
|
||||||
|
streams::{ReadStream, SharedWindowedReadStream},
|
||||||
|
util::read::{read_box, read_box_slice, read_vec},
|
||||||
|
Result, ResultContext,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub struct PartitionGC {
|
||||||
|
io: Box<dyn BlockIO>,
|
||||||
|
block: Option<Block>,
|
||||||
|
block_buf: Box<[u8]>,
|
||||||
|
block_idx: u32,
|
||||||
|
sector_buf: Box<[u8; SECTOR_SIZE]>,
|
||||||
|
sector: u32,
|
||||||
|
pos: u64,
|
||||||
|
disc_header: Box<DiscHeader>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Clone for PartitionGC {
|
||||||
|
fn clone(&self) -> Self {
|
||||||
|
Self {
|
||||||
|
io: self.io.clone(),
|
||||||
|
block: None,
|
||||||
|
block_buf: <u8>::new_box_slice_zeroed(self.block_buf.len()),
|
||||||
|
block_idx: u32::MAX,
|
||||||
|
sector_buf: <[u8; SECTOR_SIZE]>::new_box_zeroed(),
|
||||||
|
sector: u32::MAX,
|
||||||
|
pos: 0,
|
||||||
|
disc_header: self.disc_header.clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PartitionGC {
|
||||||
|
pub fn new(inner: Box<dyn BlockIO>, disc_header: Box<DiscHeader>) -> Result<Box<Self>> {
|
||||||
|
let block_size = inner.block_size();
|
||||||
|
Ok(Box::new(Self {
|
||||||
|
io: inner,
|
||||||
|
block: None,
|
||||||
|
block_buf: <u8>::new_box_slice_zeroed(block_size as usize),
|
||||||
|
block_idx: u32::MAX,
|
||||||
|
sector_buf: <[u8; SECTOR_SIZE]>::new_box_zeroed(),
|
||||||
|
sector: u32::MAX,
|
||||||
|
pos: 0,
|
||||||
|
disc_header,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn into_inner(self) -> Box<dyn BlockIO> { self.io }
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Read for PartitionGC {
|
||||||
|
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||||
|
let sector = (self.pos / SECTOR_SIZE as u64) as u32;
|
||||||
|
let block_idx = (sector as u64 * SECTOR_SIZE as u64 / self.block_buf.len() as u64) as u32;
|
||||||
|
|
||||||
|
// Read new block if necessary
|
||||||
|
if block_idx != self.block_idx {
|
||||||
|
self.block = self.io.read_block(self.block_buf.as_mut(), block_idx, None)?;
|
||||||
|
self.block_idx = block_idx;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy sector if necessary
|
||||||
|
if sector != self.sector {
|
||||||
|
let Some(block) = &self.block else {
|
||||||
|
return Ok(0);
|
||||||
|
};
|
||||||
|
block.copy_raw(
|
||||||
|
self.sector_buf.as_mut(),
|
||||||
|
self.block_buf.as_ref(),
|
||||||
|
block_idx,
|
||||||
|
sector,
|
||||||
|
&self.disc_header,
|
||||||
|
)?;
|
||||||
|
self.sector = sector;
|
||||||
|
}
|
||||||
|
|
||||||
|
let offset = (self.pos % SECTOR_SIZE as u64) as usize;
|
||||||
|
let len = min(buf.len(), SECTOR_SIZE - offset);
|
||||||
|
buf[..len].copy_from_slice(&self.sector_buf[offset..offset + len]);
|
||||||
|
self.pos += len as u64;
|
||||||
|
Ok(len)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Seek for PartitionGC {
|
||||||
|
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
|
||||||
|
self.pos = match pos {
|
||||||
|
SeekFrom::Start(v) => v,
|
||||||
|
SeekFrom::End(_) => {
|
||||||
|
return Err(io::Error::new(
|
||||||
|
io::ErrorKind::Unsupported,
|
||||||
|
"GCPartitionReader: SeekFrom::End is not supported".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
SeekFrom::Current(v) => self.pos.saturating_add_signed(v),
|
||||||
|
};
|
||||||
|
Ok(self.pos)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PartitionBase for PartitionGC {
|
||||||
|
fn meta(&mut self) -> Result<Box<PartitionMeta>> {
|
||||||
|
self.seek(SeekFrom::Start(0)).context("Seeking to partition metadata")?;
|
||||||
|
read_part_meta(self, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn open_file(&mut self, node: &Node) -> io::Result<SharedWindowedReadStream> {
|
||||||
|
assert_eq!(node.kind(), NodeKind::File);
|
||||||
|
self.new_window(node.offset(false), node.length(false))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn ideal_buffer_size(&self) -> usize { SECTOR_SIZE }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn read_part_meta(
|
||||||
|
reader: &mut dyn ReadStream,
|
||||||
|
is_wii: bool,
|
||||||
|
) -> Result<Box<PartitionMeta>> {
|
||||||
|
// boot.bin
|
||||||
|
let raw_boot: Box<[u8; BOOT_SIZE]> = read_box(reader).context("Reading boot.bin")?;
|
||||||
|
let partition_header = PartitionHeader::ref_from(&raw_boot[size_of::<DiscHeader>()..]).unwrap();
|
||||||
|
|
||||||
|
// bi2.bin
|
||||||
|
let raw_bi2: Box<[u8; BI2_SIZE]> = read_box(reader).context("Reading bi2.bin")?;
|
||||||
|
|
||||||
|
// apploader.bin
|
||||||
|
let mut raw_apploader: Vec<u8> =
|
||||||
|
read_vec(reader, size_of::<AppLoaderHeader>()).context("Reading apploader header")?;
|
||||||
|
let apploader_header = AppLoaderHeader::ref_from(raw_apploader.as_slice()).unwrap();
|
||||||
|
raw_apploader.resize(
|
||||||
|
size_of::<AppLoaderHeader>()
|
||||||
|
+ apploader_header.size.get() as usize
|
||||||
|
+ apploader_header.trailer_size.get() as usize,
|
||||||
|
0,
|
||||||
|
);
|
||||||
|
reader
|
||||||
|
.read_exact(&mut raw_apploader[size_of::<AppLoaderHeader>()..])
|
||||||
|
.context("Reading apploader")?;
|
||||||
|
|
||||||
|
// fst.bin
|
||||||
|
reader
|
||||||
|
.seek(SeekFrom::Start(partition_header.fst_off(is_wii)))
|
||||||
|
.context("Seeking to FST offset")?;
|
||||||
|
let raw_fst: Box<[u8]> = read_box_slice(reader, partition_header.fst_sz(is_wii) as usize)
|
||||||
|
.with_context(|| {
|
||||||
|
format!(
|
||||||
|
"Reading partition FST (offset {}, size {})",
|
||||||
|
partition_header.fst_off, partition_header.fst_sz
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
// main.dol
|
||||||
|
reader
|
||||||
|
.seek(SeekFrom::Start(partition_header.dol_off(is_wii)))
|
||||||
|
.context("Seeking to DOL offset")?;
|
||||||
|
let mut raw_dol: Vec<u8> =
|
||||||
|
read_vec(reader, size_of::<DolHeader>()).context("Reading DOL header")?;
|
||||||
|
let dol_header = DolHeader::ref_from(raw_dol.as_slice()).unwrap();
|
||||||
|
let dol_size = dol_header
|
||||||
|
.text_offs
|
||||||
|
.iter()
|
||||||
|
.zip(&dol_header.text_sizes)
|
||||||
|
.map(|(offs, size)| offs.get() + size.get())
|
||||||
|
.chain(
|
||||||
|
dol_header
|
||||||
|
.data_offs
|
||||||
|
.iter()
|
||||||
|
.zip(&dol_header.data_sizes)
|
||||||
|
.map(|(offs, size)| offs.get() + size.get()),
|
||||||
|
)
|
||||||
|
.max()
|
||||||
|
.unwrap_or(size_of::<DolHeader>() as u32);
|
||||||
|
raw_dol.resize(dol_size as usize, 0);
|
||||||
|
reader.read_exact(&mut raw_dol[size_of::<DolHeader>()..]).context("Reading DOL")?;
|
||||||
|
|
||||||
|
Ok(Box::new(PartitionMeta {
|
||||||
|
raw_boot,
|
||||||
|
raw_bi2,
|
||||||
|
raw_apploader: raw_apploader.into_boxed_slice(),
|
||||||
|
raw_fst,
|
||||||
|
raw_dol: raw_dol.into_boxed_slice(),
|
||||||
|
raw_ticket: None,
|
||||||
|
raw_tmd: None,
|
||||||
|
raw_cert_chain: None,
|
||||||
|
raw_h3_table: None,
|
||||||
|
}))
|
||||||
|
}
|
|
@ -11,13 +11,12 @@ use zerocopy::FromZeroes;
|
||||||
use crate::{
|
use crate::{
|
||||||
array_ref, array_ref_mut,
|
array_ref, array_ref_mut,
|
||||||
disc::{
|
disc::{
|
||||||
partition::PartitionReader,
|
|
||||||
reader::DiscReader,
|
reader::DiscReader,
|
||||||
wii::{HASHES_SIZE, SECTOR_DATA_SIZE},
|
wii::{HASHES_SIZE, SECTOR_DATA_SIZE},
|
||||||
},
|
},
|
||||||
io::HashBytes,
|
io::HashBytes,
|
||||||
util::read::read_box_slice,
|
util::read::read_box_slice,
|
||||||
Result, ResultContext, SECTOR_SIZE,
|
OpenOptions, Result, ResultContext, SECTOR_SIZE,
|
||||||
};
|
};
|
||||||
|
|
||||||
/// In a sector, following the 0x400 byte block of hashes, each 0x400 bytes of decrypted data is
|
/// In a sector, following the 0x400 byte block of hashes, each 0x400 bytes of decrypted data is
|
||||||
|
@ -88,8 +87,9 @@ pub fn rebuild_hashes(reader: &mut DiscReader) -> Result<()> {
|
||||||
zero_h1_hash.update(zero_h0_hash);
|
zero_h1_hash.update(zero_h0_hash);
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut hash_tables = Vec::with_capacity(reader.partitions.len());
|
let partitions = reader.partitions();
|
||||||
for part in &reader.partitions {
|
let mut hash_tables = Vec::with_capacity(partitions.len());
|
||||||
|
for part in partitions {
|
||||||
let part_sectors = part.data_end_sector - part.data_start_sector;
|
let part_sectors = part.data_end_sector - part.data_start_sector;
|
||||||
let hash_table = HashTable::new(part_sectors);
|
let hash_table = HashTable::new(part_sectors);
|
||||||
log::debug!(
|
log::debug!(
|
||||||
|
@ -102,7 +102,7 @@ pub fn rebuild_hashes(reader: &mut DiscReader) -> Result<()> {
|
||||||
let group_count = hash_table.h3_hashes.len();
|
let group_count = hash_table.h3_hashes.len();
|
||||||
let mutex = Arc::new(Mutex::new(hash_table));
|
let mutex = Arc::new(Mutex::new(hash_table));
|
||||||
(0..group_count).into_par_iter().try_for_each_with(
|
(0..group_count).into_par_iter().try_for_each_with(
|
||||||
(PartitionReader::new(reader.io.clone(), part)?, mutex.clone()),
|
(reader.open_partition(part.index, &OpenOptions::default())?, mutex.clone()),
|
||||||
|(stream, mutex), h3_index| -> Result<()> {
|
|(stream, mutex), h3_index| -> Result<()> {
|
||||||
let mut result = HashResult::new_box_zeroed();
|
let mut result = HashResult::new_box_zeroed();
|
||||||
let mut data_buf = <u8>::new_box_slice_zeroed(SECTOR_DATA_SIZE);
|
let mut data_buf = <u8>::new_box_slice_zeroed(SECTOR_DATA_SIZE);
|
|
@ -9,25 +9,20 @@ use std::{
|
||||||
str::from_utf8,
|
str::from_utf8,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use dyn_clone::DynClone;
|
||||||
use zerocopy::{big_endian::*, AsBytes, FromBytes, FromZeroes};
|
use zerocopy::{big_endian::*, AsBytes, FromBytes, FromZeroes};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
disc::{
|
disc::wii::{Ticket, TmdHeader},
|
||||||
gcn::DiscGCN,
|
|
||||||
wii::{DiscWii, Ticket, TmdHeader, WiiPartitionHeader},
|
|
||||||
},
|
|
||||||
fst::Node,
|
fst::Node,
|
||||||
io::DiscIO,
|
|
||||||
static_assert,
|
static_assert,
|
||||||
streams::{ReadStream, SharedWindowedReadStream},
|
streams::{ReadStream, SharedWindowedReadStream},
|
||||||
util::read::read_from,
|
Fst, Result,
|
||||||
Error, Fst, OpenOptions, Result, ResultContext,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
pub(crate) mod gcn;
|
pub(crate) mod gcn;
|
||||||
pub(crate) mod hashes;
|
pub(crate) mod hashes;
|
||||||
pub mod partition;
|
pub(crate) mod reader;
|
||||||
pub mod reader;
|
|
||||||
pub(crate) mod wii;
|
pub(crate) mod wii;
|
||||||
|
|
||||||
pub const SECTOR_SIZE: usize = 0x8000;
|
pub const SECTOR_SIZE: usize = 0x8000;
|
||||||
|
@ -251,82 +246,8 @@ impl From<u32> for PartitionKind {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Information about a GameCube or Wii disc partition.
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub struct PartitionInfo {
|
|
||||||
/// Partition group index
|
|
||||||
pub group_index: u32,
|
|
||||||
/// Partition index within the group
|
|
||||||
pub part_index: u32,
|
|
||||||
/// Partition offset within disc
|
|
||||||
pub part_offset: u64,
|
|
||||||
/// Partition kind
|
|
||||||
pub kind: PartitionKind,
|
|
||||||
/// Data offset within partition
|
|
||||||
pub data_offset: u64,
|
|
||||||
/// Data size
|
|
||||||
pub data_size: u64,
|
|
||||||
/// Raw Wii partition header
|
|
||||||
pub header: Option<WiiPartitionHeader>,
|
|
||||||
/// Lagged Fibonacci generator seed (for junk data)
|
|
||||||
pub lfg_seed: [u8; 4],
|
|
||||||
// /// Junk data start offset
|
|
||||||
// pub junk_start: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Contains a disc's header & partition information.
|
|
||||||
pub trait DiscBase: Send + Sync {
|
|
||||||
/// Retrieves the disc's header.
|
|
||||||
fn header(&self) -> &DiscHeader;
|
|
||||||
|
|
||||||
/// A list of partitions on the disc.
|
|
||||||
fn partitions(&self) -> Vec<PartitionInfo>;
|
|
||||||
|
|
||||||
/// Opens a new, decrypted partition read stream for the specified partition index.
|
|
||||||
///
|
|
||||||
/// `validate_hashes`: Validate Wii disc hashes while reading (slow!)
|
|
||||||
fn open_partition<'a>(
|
|
||||||
&self,
|
|
||||||
disc_io: &'a dyn DiscIO,
|
|
||||||
index: usize,
|
|
||||||
options: &OpenOptions,
|
|
||||||
) -> Result<Box<dyn PartitionBase + 'a>>;
|
|
||||||
|
|
||||||
/// Opens a new partition read stream for the first partition matching
|
|
||||||
/// the specified type.
|
|
||||||
///
|
|
||||||
/// `validate_hashes`: Validate Wii disc hashes while reading (slow!)
|
|
||||||
fn open_partition_kind<'a>(
|
|
||||||
&self,
|
|
||||||
disc_io: &'a dyn DiscIO,
|
|
||||||
part_type: PartitionKind,
|
|
||||||
options: &OpenOptions,
|
|
||||||
) -> Result<Box<dyn PartitionBase + 'a>>;
|
|
||||||
|
|
||||||
/// The disc's size in bytes, or an estimate if not stored by the format.
|
|
||||||
fn disc_size(&self) -> u64;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Creates a new [`DiscBase`] instance.
|
|
||||||
pub fn new(disc_io: &mut dyn DiscIO) -> Result<Box<dyn DiscBase>> {
|
|
||||||
let disc_size = disc_io.disc_size();
|
|
||||||
let mut stream = disc_io.open()?;
|
|
||||||
let header: DiscHeader = read_from(stream.as_mut()).context("Reading disc header")?;
|
|
||||||
if header.is_wii() {
|
|
||||||
Ok(Box::new(DiscWii::new(stream.as_mut(), header, disc_size)?))
|
|
||||||
} else if header.is_gamecube() {
|
|
||||||
Ok(Box::new(DiscGCN::new(stream.as_mut(), header, disc_size)?))
|
|
||||||
} else {
|
|
||||||
Err(Error::DiscFormat(format!(
|
|
||||||
"Invalid GC/Wii magic: {:#010X}/{:#010X}",
|
|
||||||
header.gcn_magic.get(),
|
|
||||||
header.wii_magic.get()
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// An open read stream for a disc partition.
|
/// An open read stream for a disc partition.
|
||||||
pub trait PartitionBase: ReadStream {
|
pub trait PartitionBase: DynClone + ReadStream + Send + Sync {
|
||||||
/// Reads the partition header and file system table.
|
/// Reads the partition header and file system table.
|
||||||
fn meta(&mut self) -> Result<Box<PartitionMeta>>;
|
fn meta(&mut self) -> Result<Box<PartitionMeta>>;
|
||||||
|
|
||||||
|
@ -366,6 +287,8 @@ pub trait PartitionBase: ReadStream {
|
||||||
fn ideal_buffer_size(&self) -> usize;
|
fn ideal_buffer_size(&self) -> usize;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dyn_clone::clone_trait_object!(PartitionBase);
|
||||||
|
|
||||||
/// Size of the disc header and partition header (boot.bin)
|
/// Size of the disc header and partition header (boot.bin)
|
||||||
pub const BOOT_SIZE: usize = size_of::<DiscHeader>() + size_of::<PartitionHeader>();
|
pub const BOOT_SIZE: usize = size_of::<DiscHeader>() + size_of::<PartitionHeader>();
|
||||||
/// Size of the debug and region information (bi2.bin)
|
/// Size of the debug and region information (bi2.bin)
|
|
@ -8,14 +8,15 @@ use zerocopy::FromZeroes;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
disc::{
|
disc::{
|
||||||
|
gcn::PartitionGC,
|
||||||
hashes::{rebuild_hashes, HashTable},
|
hashes::{rebuild_hashes, HashTable},
|
||||||
partition::PartitionReader,
|
wii::{PartitionWii, WiiPartEntry, WiiPartGroup, WiiPartitionHeader, WII_PART_GROUP_OFF},
|
||||||
wii::{WiiPartEntry, WiiPartGroup, WiiPartitionHeader, WII_PART_GROUP_OFF},
|
|
||||||
DL_DVD_SIZE, MINI_DVD_SIZE, SL_DVD_SIZE,
|
DL_DVD_SIZE, MINI_DVD_SIZE, SL_DVD_SIZE,
|
||||||
},
|
},
|
||||||
io::block::{BPartitionInfo, Block, BlockIO},
|
io::block::{Block, BlockIO, PartitionInfo},
|
||||||
util::read::{read_box, read_from, read_vec},
|
util::read::{read_box, read_from, read_vec},
|
||||||
DiscHeader, Error, PartitionHeader, PartitionKind, Result, ResultContext, SECTOR_SIZE,
|
DiscHeader, DiscMeta, Error, OpenOptions, PartitionBase, PartitionHeader, PartitionKind,
|
||||||
|
Result, ResultContext, SECTOR_SIZE,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
|
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
|
||||||
|
@ -25,7 +26,7 @@ pub enum EncryptionMode {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct DiscReader {
|
pub struct DiscReader {
|
||||||
pub(crate) io: Box<dyn BlockIO>,
|
io: Box<dyn BlockIO>,
|
||||||
block: Option<Block>,
|
block: Option<Block>,
|
||||||
block_buf: Box<[u8]>,
|
block_buf: Box<[u8]>,
|
||||||
block_idx: u32,
|
block_idx: u32,
|
||||||
|
@ -34,7 +35,7 @@ pub struct DiscReader {
|
||||||
pos: u64,
|
pos: u64,
|
||||||
mode: EncryptionMode,
|
mode: EncryptionMode,
|
||||||
disc_header: Box<DiscHeader>,
|
disc_header: Box<DiscHeader>,
|
||||||
pub(crate) partitions: Vec<BPartitionInfo>,
|
pub(crate) partitions: Vec<PartitionInfo>,
|
||||||
hash_tables: Vec<HashTable>,
|
hash_tables: Vec<HashTable>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -57,9 +58,9 @@ impl Clone for DiscReader {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DiscReader {
|
impl DiscReader {
|
||||||
pub fn new(inner: Box<dyn BlockIO>, mode: EncryptionMode) -> Result<Self> {
|
pub fn new(inner: Box<dyn BlockIO>, options: &OpenOptions) -> Result<Self> {
|
||||||
let block_size = inner.block_size();
|
let block_size = inner.block_size();
|
||||||
let meta = inner.meta()?;
|
let meta = inner.meta();
|
||||||
let mut reader = Self {
|
let mut reader = Self {
|
||||||
io: inner,
|
io: inner,
|
||||||
block: None,
|
block: None,
|
||||||
|
@ -68,7 +69,11 @@ impl DiscReader {
|
||||||
sector_buf: <[u8; SECTOR_SIZE]>::new_box_zeroed(),
|
sector_buf: <[u8; SECTOR_SIZE]>::new_box_zeroed(),
|
||||||
sector_idx: u32::MAX,
|
sector_idx: u32::MAX,
|
||||||
pos: 0,
|
pos: 0,
|
||||||
mode,
|
mode: if options.rebuild_encryption {
|
||||||
|
EncryptionMode::Encrypted
|
||||||
|
} else {
|
||||||
|
EncryptionMode::Decrypted
|
||||||
|
},
|
||||||
disc_header: DiscHeader::new_box_zeroed(),
|
disc_header: DiscHeader::new_box_zeroed(),
|
||||||
partitions: vec![],
|
partitions: vec![],
|
||||||
hash_tables: vec![],
|
hash_tables: vec![],
|
||||||
|
@ -78,7 +83,7 @@ impl DiscReader {
|
||||||
if reader.disc_header.is_wii() {
|
if reader.disc_header.is_wii() {
|
||||||
reader.partitions = read_partition_info(&mut reader)?;
|
reader.partitions = read_partition_info(&mut reader)?;
|
||||||
// Rebuild hashes if the format requires it
|
// Rebuild hashes if the format requires it
|
||||||
if mode == EncryptionMode::Encrypted && meta.needs_hash_recovery {
|
if options.rebuild_encryption && meta.needs_hash_recovery {
|
||||||
rebuild_hashes(&mut reader)?;
|
rebuild_hashes(&mut reader)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -96,16 +101,53 @@ impl DiscReader {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn disc_size(&self) -> u64 {
|
pub fn disc_size(&self) -> u64 {
|
||||||
self.io
|
self.io.meta().disc_size.unwrap_or_else(|| guess_disc_size(&self.partitions))
|
||||||
.meta()
|
|
||||||
.ok()
|
|
||||||
.and_then(|m| m.disc_size)
|
|
||||||
.unwrap_or_else(|| guess_disc_size(&self.partitions))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn header(&self) -> &DiscHeader { &self.disc_header }
|
pub fn header(&self) -> &DiscHeader { &self.disc_header }
|
||||||
|
|
||||||
pub fn partitions(&self) -> &[BPartitionInfo] { &self.partitions }
|
pub fn partitions(&self) -> &[PartitionInfo] { &self.partitions }
|
||||||
|
|
||||||
|
pub fn meta(&self) -> DiscMeta { self.io.meta() }
|
||||||
|
|
||||||
|
/// Opens a new, decrypted partition read stream for the specified partition index.
|
||||||
|
pub fn open_partition(
|
||||||
|
&self,
|
||||||
|
index: usize,
|
||||||
|
options: &OpenOptions,
|
||||||
|
) -> Result<Box<dyn PartitionBase>> {
|
||||||
|
if self.disc_header.is_gamecube() {
|
||||||
|
if index == 0 {
|
||||||
|
Ok(PartitionGC::new(self.io.clone(), self.disc_header.clone())?)
|
||||||
|
} else {
|
||||||
|
Err(Error::DiscFormat("GameCube discs only have one partition".to_string()))
|
||||||
|
}
|
||||||
|
} else if let Some(part) = self.partitions.get(index) {
|
||||||
|
Ok(PartitionWii::new(self.io.clone(), self.disc_header.clone(), part, options)?)
|
||||||
|
} else {
|
||||||
|
Err(Error::DiscFormat(format!("Partition {index} not found")))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Opens a new, decrypted partition read stream for the first partition matching
|
||||||
|
/// the specified type.
|
||||||
|
pub fn open_partition_kind(
|
||||||
|
&self,
|
||||||
|
part_type: PartitionKind,
|
||||||
|
options: &OpenOptions,
|
||||||
|
) -> Result<Box<dyn PartitionBase>> {
|
||||||
|
if self.disc_header.is_gamecube() {
|
||||||
|
if part_type == PartitionKind::Data {
|
||||||
|
Ok(PartitionGC::new(self.io.clone(), self.disc_header.clone())?)
|
||||||
|
} else {
|
||||||
|
Err(Error::DiscFormat("GameCube discs only have a data partition".to_string()))
|
||||||
|
}
|
||||||
|
} else if let Some(part) = self.partitions.iter().find(|v| v.kind == part_type) {
|
||||||
|
Ok(PartitionWii::new(self.io.clone(), self.disc_header.clone(), part, options)?)
|
||||||
|
} else {
|
||||||
|
Err(Error::DiscFormat(format!("Partition type {part_type} not found")))
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Read for DiscReader {
|
impl Read for DiscReader {
|
||||||
|
@ -135,14 +177,14 @@ impl Read for DiscReader {
|
||||||
if let Some(partition) = partition {
|
if let Some(partition) = partition {
|
||||||
match self.mode {
|
match self.mode {
|
||||||
EncryptionMode::Decrypted => block.decrypt(
|
EncryptionMode::Decrypted => block.decrypt(
|
||||||
&mut self.sector_buf,
|
self.sector_buf.as_mut(),
|
||||||
self.block_buf.as_ref(),
|
self.block_buf.as_ref(),
|
||||||
block_idx,
|
block_idx,
|
||||||
abs_sector,
|
abs_sector,
|
||||||
partition,
|
partition,
|
||||||
)?,
|
)?,
|
||||||
EncryptionMode::Encrypted => block.encrypt(
|
EncryptionMode::Encrypted => block.encrypt(
|
||||||
&mut self.sector_buf,
|
self.sector_buf.as_mut(),
|
||||||
self.block_buf.as_ref(),
|
self.block_buf.as_ref(),
|
||||||
block_idx,
|
block_idx,
|
||||||
abs_sector,
|
abs_sector,
|
||||||
|
@ -151,7 +193,7 @@ impl Read for DiscReader {
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
block.copy_raw(
|
block.copy_raw(
|
||||||
&mut self.sector_buf,
|
self.sector_buf.as_mut(),
|
||||||
self.block_buf.as_ref(),
|
self.block_buf.as_ref(),
|
||||||
block_idx,
|
block_idx,
|
||||||
abs_sector,
|
abs_sector,
|
||||||
|
@ -186,26 +228,26 @@ impl Seek for DiscReader {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn read_partition_info(stream: &mut DiscReader) -> crate::Result<Vec<BPartitionInfo>> {
|
fn read_partition_info(reader: &mut DiscReader) -> crate::Result<Vec<PartitionInfo>> {
|
||||||
stream.seek(SeekFrom::Start(WII_PART_GROUP_OFF)).context("Seeking to partition groups")?;
|
reader.seek(SeekFrom::Start(WII_PART_GROUP_OFF)).context("Seeking to partition groups")?;
|
||||||
let part_groups: [WiiPartGroup; 4] = read_from(stream).context("Reading partition groups")?;
|
let part_groups: [WiiPartGroup; 4] = read_from(reader).context("Reading partition groups")?;
|
||||||
let mut part_info = Vec::new();
|
let mut part_info = Vec::new();
|
||||||
for (group_idx, group) in part_groups.iter().enumerate() {
|
for (group_idx, group) in part_groups.iter().enumerate() {
|
||||||
let part_count = group.part_count.get();
|
let part_count = group.part_count.get();
|
||||||
if part_count == 0 {
|
if part_count == 0 {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
stream
|
reader
|
||||||
.seek(SeekFrom::Start(group.part_entry_off()))
|
.seek(SeekFrom::Start(group.part_entry_off()))
|
||||||
.with_context(|| format!("Seeking to partition group {group_idx}"))?;
|
.with_context(|| format!("Seeking to partition group {group_idx}"))?;
|
||||||
let entries: Vec<WiiPartEntry> = read_vec(stream, part_count as usize)
|
let entries: Vec<WiiPartEntry> = read_vec(reader, part_count as usize)
|
||||||
.with_context(|| format!("Reading partition group {group_idx}"))?;
|
.with_context(|| format!("Reading partition group {group_idx}"))?;
|
||||||
for (part_idx, entry) in entries.iter().enumerate() {
|
for (part_idx, entry) in entries.iter().enumerate() {
|
||||||
let offset = entry.offset();
|
let offset = entry.offset();
|
||||||
stream
|
reader
|
||||||
.seek(SeekFrom::Start(offset))
|
.seek(SeekFrom::Start(offset))
|
||||||
.with_context(|| format!("Seeking to partition data {group_idx}:{part_idx}"))?;
|
.with_context(|| format!("Seeking to partition data {group_idx}:{part_idx}"))?;
|
||||||
let header: Box<WiiPartitionHeader> = read_box(stream)
|
let header: Box<WiiPartitionHeader> = read_box(reader)
|
||||||
.with_context(|| format!("Reading partition header {group_idx}:{part_idx}"))?;
|
.with_context(|| format!("Reading partition header {group_idx}:{part_idx}"))?;
|
||||||
|
|
||||||
let key = header.ticket.decrypt_title_key()?;
|
let key = header.ticket.decrypt_title_key()?;
|
||||||
|
@ -224,8 +266,8 @@ fn read_partition_info(stream: &mut DiscReader) -> crate::Result<Vec<BPartitionI
|
||||||
"Partition {group_idx}:{part_idx} data is not sector aligned",
|
"Partition {group_idx}:{part_idx} data is not sector aligned",
|
||||||
)));
|
)));
|
||||||
}
|
}
|
||||||
let mut info = BPartitionInfo {
|
let mut info = PartitionInfo {
|
||||||
index: part_info.len() as u32,
|
index: part_info.len(),
|
||||||
kind: entry.kind.get().into(),
|
kind: entry.kind.get().into(),
|
||||||
start_sector: (start_offset / SECTOR_SIZE as u64) as u32,
|
start_sector: (start_offset / SECTOR_SIZE as u64) as u32,
|
||||||
data_start_sector: (data_start_offset / SECTOR_SIZE as u64) as u32,
|
data_start_sector: (data_start_offset / SECTOR_SIZE as u64) as u32,
|
||||||
|
@ -237,7 +279,12 @@ fn read_partition_info(stream: &mut DiscReader) -> crate::Result<Vec<BPartitionI
|
||||||
hash_table: None,
|
hash_table: None,
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut partition_reader = PartitionReader::new(stream.io.clone(), &info)?;
|
let mut partition_reader = PartitionWii::new(
|
||||||
|
reader.io.clone(),
|
||||||
|
reader.disc_header.clone(),
|
||||||
|
&info,
|
||||||
|
&OpenOptions::default(),
|
||||||
|
)?;
|
||||||
info.disc_header = read_box(&mut partition_reader).context("Reading disc header")?;
|
info.disc_header = read_box(&mut partition_reader).context("Reading disc header")?;
|
||||||
info.partition_header =
|
info.partition_header =
|
||||||
read_box(&mut partition_reader).context("Reading partition header")?;
|
read_box(&mut partition_reader).context("Reading partition header")?;
|
||||||
|
@ -248,7 +295,7 @@ fn read_partition_info(stream: &mut DiscReader) -> crate::Result<Vec<BPartitionI
|
||||||
Ok(part_info)
|
Ok(part_info)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn guess_disc_size(part_info: &[BPartitionInfo]) -> u64 {
|
fn guess_disc_size(part_info: &[PartitionInfo]) -> u64 {
|
||||||
let max_offset = part_info
|
let max_offset = part_info
|
||||||
.iter()
|
.iter()
|
||||||
.flat_map(|v| {
|
.flat_map(|v| {
|
|
@ -0,0 +1,447 @@
|
||||||
|
use std::{
|
||||||
|
cmp::min,
|
||||||
|
ffi::CStr,
|
||||||
|
io,
|
||||||
|
io::{Read, Seek, SeekFrom},
|
||||||
|
mem::size_of,
|
||||||
|
};
|
||||||
|
|
||||||
|
use sha1::{digest, Digest, Sha1};
|
||||||
|
use zerocopy::{big_endian::*, AsBytes, FromBytes, FromZeroes};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
array_ref,
|
||||||
|
disc::{
|
||||||
|
gcn::{read_part_meta, PartitionGC},
|
||||||
|
PartitionBase, PartitionKind, PartitionMeta, SECTOR_SIZE,
|
||||||
|
},
|
||||||
|
fst::{Node, NodeKind},
|
||||||
|
io::{
|
||||||
|
aes_decrypt,
|
||||||
|
block::{Block, BlockIO, PartitionInfo},
|
||||||
|
KeyBytes,
|
||||||
|
},
|
||||||
|
static_assert,
|
||||||
|
streams::{ReadStream, SharedWindowedReadStream},
|
||||||
|
util::{div_rem, read::read_box_slice},
|
||||||
|
DiscHeader, Error, OpenOptions, Result, ResultContext,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub(crate) const HASHES_SIZE: usize = 0x400;
|
||||||
|
pub(crate) const SECTOR_DATA_SIZE: usize = SECTOR_SIZE - HASHES_SIZE; // 0x7C00
|
||||||
|
|
||||||
|
// ppki (Retail)
|
||||||
|
const RVL_CERT_ISSUER_PPKI_TICKET: &str = "Root-CA00000001-XS00000003";
|
||||||
|
#[rustfmt::skip]
|
||||||
|
const RETAIL_COMMON_KEYS: [KeyBytes; 3] = [
|
||||||
|
/* RVL_KEY_RETAIL */
|
||||||
|
[0xeb, 0xe4, 0x2a, 0x22, 0x5e, 0x85, 0x93, 0xe4, 0x48, 0xd9, 0xc5, 0x45, 0x73, 0x81, 0xaa, 0xf7],
|
||||||
|
/* RVL_KEY_KOREAN */
|
||||||
|
[0x63, 0xb8, 0x2b, 0xb4, 0xf4, 0x61, 0x4e, 0x2e, 0x13, 0xf2, 0xfe, 0xfb, 0xba, 0x4c, 0x9b, 0x7e],
|
||||||
|
/* vWii_KEY_RETAIL */
|
||||||
|
[0x30, 0xbf, 0xc7, 0x6e, 0x7c, 0x19, 0xaf, 0xbb, 0x23, 0x16, 0x33, 0x30, 0xce, 0xd7, 0xc2, 0x8d],
|
||||||
|
];
|
||||||
|
|
||||||
|
// dpki (Debug)
|
||||||
|
const RVL_CERT_ISSUER_DPKI_TICKET: &str = "Root-CA00000002-XS00000006";
|
||||||
|
#[rustfmt::skip]
|
||||||
|
const DEBUG_COMMON_KEYS: [KeyBytes; 3] = [
|
||||||
|
/* RVL_KEY_DEBUG */
|
||||||
|
[0xa1, 0x60, 0x4a, 0x6a, 0x71, 0x23, 0xb5, 0x29, 0xae, 0x8b, 0xec, 0x32, 0xc8, 0x16, 0xfc, 0xaa],
|
||||||
|
/* RVL_KEY_KOREAN_DEBUG */
|
||||||
|
[0x67, 0x45, 0x8b, 0x6b, 0xc6, 0x23, 0x7b, 0x32, 0x69, 0x98, 0x3c, 0x64, 0x73, 0x48, 0x33, 0x66],
|
||||||
|
/* vWii_KEY_DEBUG */
|
||||||
|
[0x2f, 0x5c, 0x1b, 0x29, 0x44, 0xe7, 0xfd, 0x6f, 0xc3, 0x97, 0x96, 0x4b, 0x05, 0x76, 0x91, 0xfa],
|
||||||
|
];
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||||||
|
#[repr(C, align(4))]
|
||||||
|
pub(crate) struct WiiPartEntry {
|
||||||
|
pub(crate) offset: U32,
|
||||||
|
pub(crate) kind: U32,
|
||||||
|
}
|
||||||
|
|
||||||
|
static_assert!(size_of::<WiiPartEntry>() == 8);
|
||||||
|
|
||||||
|
impl WiiPartEntry {
|
||||||
|
pub(crate) fn offset(&self) -> u64 { (self.offset.get() as u64) << 2 }
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub(crate) struct WiiPartInfo {
|
||||||
|
pub(crate) group_idx: u32,
|
||||||
|
pub(crate) part_idx: u32,
|
||||||
|
pub(crate) offset: u64,
|
||||||
|
pub(crate) kind: PartitionKind,
|
||||||
|
pub(crate) header: WiiPartitionHeader,
|
||||||
|
pub(crate) junk_id: [u8; 4],
|
||||||
|
pub(crate) junk_start: u64,
|
||||||
|
pub(crate) title_key: KeyBytes,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) const WII_PART_GROUP_OFF: u64 = 0x40000;
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||||||
|
#[repr(C, align(4))]
|
||||||
|
pub(crate) struct WiiPartGroup {
|
||||||
|
pub(crate) part_count: U32,
|
||||||
|
pub(crate) part_entry_off: U32,
|
||||||
|
}
|
||||||
|
|
||||||
|
static_assert!(size_of::<WiiPartGroup>() == 8);
|
||||||
|
|
||||||
|
impl WiiPartGroup {
|
||||||
|
pub(crate) fn part_entry_off(&self) -> u64 { (self.part_entry_off.get() as u64) << 2 }
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||||||
|
#[repr(C, align(4))]
|
||||||
|
pub struct SignedHeader {
|
||||||
|
/// Signature type, always 0x00010001 (RSA-2048)
|
||||||
|
pub sig_type: U32,
|
||||||
|
/// RSA-2048 signature
|
||||||
|
pub sig: [u8; 256],
|
||||||
|
_pad: [u8; 60],
|
||||||
|
}
|
||||||
|
|
||||||
|
static_assert!(size_of::<SignedHeader>() == 0x140);
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, PartialEq, Default, FromBytes, FromZeroes, AsBytes)]
|
||||||
|
#[repr(C, align(4))]
|
||||||
|
pub struct TicketTimeLimit {
|
||||||
|
pub enable_time_limit: U32,
|
||||||
|
pub time_limit: U32,
|
||||||
|
}
|
||||||
|
|
||||||
|
static_assert!(size_of::<TicketTimeLimit>() == 8);
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||||||
|
#[repr(C, align(4))]
|
||||||
|
pub struct Ticket {
|
||||||
|
pub header: SignedHeader,
|
||||||
|
pub sig_issuer: [u8; 64],
|
||||||
|
pub ecdh: [u8; 60],
|
||||||
|
pub version: u8,
|
||||||
|
_pad1: U16,
|
||||||
|
pub title_key: KeyBytes,
|
||||||
|
_pad2: u8,
|
||||||
|
pub ticket_id: [u8; 8],
|
||||||
|
pub console_id: [u8; 4],
|
||||||
|
pub title_id: [u8; 8],
|
||||||
|
_pad3: U16,
|
||||||
|
pub ticket_title_version: U16,
|
||||||
|
pub permitted_titles_mask: U32,
|
||||||
|
pub permit_mask: U32,
|
||||||
|
pub title_export_allowed: u8,
|
||||||
|
pub common_key_idx: u8,
|
||||||
|
_pad4: [u8; 48],
|
||||||
|
pub content_access_permissions: [u8; 64],
|
||||||
|
_pad5: [u8; 2],
|
||||||
|
pub time_limits: [TicketTimeLimit; 8],
|
||||||
|
}
|
||||||
|
|
||||||
|
static_assert!(size_of::<Ticket>() == 0x2A4);
|
||||||
|
|
||||||
|
impl Ticket {
|
||||||
|
pub fn decrypt_title_key(&self) -> Result<KeyBytes> {
|
||||||
|
let mut iv: KeyBytes = [0; 16];
|
||||||
|
iv[..8].copy_from_slice(&self.title_id);
|
||||||
|
let cert_issuer_ticket =
|
||||||
|
CStr::from_bytes_until_nul(&self.sig_issuer).ok().and_then(|c| c.to_str().ok());
|
||||||
|
let common_keys = match cert_issuer_ticket {
|
||||||
|
Some(RVL_CERT_ISSUER_PPKI_TICKET) => &RETAIL_COMMON_KEYS,
|
||||||
|
Some(RVL_CERT_ISSUER_DPKI_TICKET) => &DEBUG_COMMON_KEYS,
|
||||||
|
Some(v) => {
|
||||||
|
return Err(Error::DiscFormat(format!("unknown certificate issuer {:?}", v)));
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
return Err(Error::DiscFormat("failed to parse certificate issuer".to_string()));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let common_key = common_keys.get(self.common_key_idx as usize).ok_or(Error::DiscFormat(
|
||||||
|
format!("unknown common key index {}", self.common_key_idx),
|
||||||
|
))?;
|
||||||
|
let mut title_key = self.title_key;
|
||||||
|
aes_decrypt(common_key, iv, &mut title_key);
|
||||||
|
Ok(title_key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||||||
|
#[repr(C, align(4))]
|
||||||
|
pub struct TmdHeader {
|
||||||
|
pub header: SignedHeader,
|
||||||
|
pub sig_issuer: [u8; 64],
|
||||||
|
pub version: u8,
|
||||||
|
pub ca_crl_version: u8,
|
||||||
|
pub signer_crl_version: u8,
|
||||||
|
pub is_vwii: u8,
|
||||||
|
pub ios_id: [u8; 8],
|
||||||
|
pub title_id: [u8; 8],
|
||||||
|
pub title_type: u32,
|
||||||
|
pub group_id: U16,
|
||||||
|
_pad1: [u8; 2],
|
||||||
|
pub region: U16,
|
||||||
|
pub ratings: KeyBytes,
|
||||||
|
_pad2: [u8; 12],
|
||||||
|
pub ipc_mask: [u8; 12],
|
||||||
|
_pad3: [u8; 18],
|
||||||
|
pub access_flags: U32,
|
||||||
|
pub title_version: U16,
|
||||||
|
pub num_contents: U16,
|
||||||
|
pub boot_idx: U16,
|
||||||
|
pub minor_version: U16,
|
||||||
|
}
|
||||||
|
|
||||||
|
static_assert!(size_of::<TmdHeader>() == 0x1E4);
|
||||||
|
|
||||||
|
pub const H3_TABLE_SIZE: usize = 0x18000;
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||||||
|
#[repr(C, align(4))]
|
||||||
|
pub struct WiiPartitionHeader {
|
||||||
|
pub ticket: Ticket,
|
||||||
|
tmd_size: U32,
|
||||||
|
tmd_off: U32,
|
||||||
|
cert_chain_size: U32,
|
||||||
|
cert_chain_off: U32,
|
||||||
|
h3_table_off: U32,
|
||||||
|
data_off: U32,
|
||||||
|
data_size: U32,
|
||||||
|
}
|
||||||
|
|
||||||
|
static_assert!(size_of::<WiiPartitionHeader>() == 0x2C0);
|
||||||
|
|
||||||
|
impl WiiPartitionHeader {
|
||||||
|
pub fn tmd_size(&self) -> u64 { self.tmd_size.get() as u64 }
|
||||||
|
|
||||||
|
pub fn tmd_off(&self) -> u64 { (self.tmd_off.get() as u64) << 2 }
|
||||||
|
|
||||||
|
pub fn cert_chain_size(&self) -> u64 { self.cert_chain_size.get() as u64 }
|
||||||
|
|
||||||
|
pub fn cert_chain_off(&self) -> u64 { (self.cert_chain_off.get() as u64) << 2 }
|
||||||
|
|
||||||
|
pub fn h3_table_off(&self) -> u64 { (self.h3_table_off.get() as u64) << 2 }
|
||||||
|
|
||||||
|
pub fn h3_table_size(&self) -> u64 { H3_TABLE_SIZE as u64 }
|
||||||
|
|
||||||
|
pub fn data_off(&self) -> u64 { (self.data_off.get() as u64) << 2 }
|
||||||
|
|
||||||
|
pub fn data_size(&self) -> u64 { (self.data_size.get() as u64) << 2 }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct PartitionWii {
|
||||||
|
io: Box<dyn BlockIO>,
|
||||||
|
partition: PartitionInfo,
|
||||||
|
block: Option<Block>,
|
||||||
|
block_buf: Box<[u8]>,
|
||||||
|
block_idx: u32,
|
||||||
|
sector_buf: Box<[u8; SECTOR_SIZE]>,
|
||||||
|
sector: u32,
|
||||||
|
pos: u64,
|
||||||
|
verify: bool,
|
||||||
|
raw_tmd: Box<[u8]>,
|
||||||
|
raw_cert_chain: Box<[u8]>,
|
||||||
|
raw_h3_table: Box<[u8]>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Clone for PartitionWii {
|
||||||
|
fn clone(&self) -> Self {
|
||||||
|
Self {
|
||||||
|
io: self.io.clone(),
|
||||||
|
partition: self.partition.clone(),
|
||||||
|
block: None,
|
||||||
|
block_buf: <u8>::new_box_slice_zeroed(self.block_buf.len()),
|
||||||
|
block_idx: u32::MAX,
|
||||||
|
sector_buf: <[u8; SECTOR_SIZE]>::new_box_zeroed(),
|
||||||
|
sector: u32::MAX,
|
||||||
|
pos: 0,
|
||||||
|
verify: self.verify,
|
||||||
|
raw_tmd: self.raw_tmd.clone(),
|
||||||
|
raw_cert_chain: self.raw_cert_chain.clone(),
|
||||||
|
raw_h3_table: self.raw_h3_table.clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PartitionWii {
|
||||||
|
pub fn new(
|
||||||
|
inner: Box<dyn BlockIO>,
|
||||||
|
disc_header: Box<DiscHeader>,
|
||||||
|
partition: &PartitionInfo,
|
||||||
|
options: &OpenOptions,
|
||||||
|
) -> Result<Box<Self>> {
|
||||||
|
let block_size = inner.block_size();
|
||||||
|
let mut reader = PartitionGC::new(inner, disc_header)?;
|
||||||
|
|
||||||
|
// Read TMD, cert chain, and H3 table
|
||||||
|
let offset = partition.start_sector as u64 * SECTOR_SIZE as u64;
|
||||||
|
reader
|
||||||
|
.seek(SeekFrom::Start(offset + partition.header.tmd_off()))
|
||||||
|
.context("Seeking to TMD offset")?;
|
||||||
|
let raw_tmd: Box<[u8]> = read_box_slice(&mut reader, partition.header.tmd_size() as usize)
|
||||||
|
.context("Reading TMD")?;
|
||||||
|
reader
|
||||||
|
.seek(SeekFrom::Start(offset + partition.header.cert_chain_off()))
|
||||||
|
.context("Seeking to cert chain offset")?;
|
||||||
|
let raw_cert_chain: Box<[u8]> =
|
||||||
|
read_box_slice(&mut reader, partition.header.cert_chain_size() as usize)
|
||||||
|
.context("Reading cert chain")?;
|
||||||
|
reader
|
||||||
|
.seek(SeekFrom::Start(offset + partition.header.h3_table_off()))
|
||||||
|
.context("Seeking to H3 table offset")?;
|
||||||
|
let raw_h3_table: Box<[u8]> =
|
||||||
|
read_box_slice(&mut reader, H3_TABLE_SIZE).context("Reading H3 table")?;
|
||||||
|
|
||||||
|
Ok(Box::new(Self {
|
||||||
|
io: reader.into_inner(),
|
||||||
|
partition: partition.clone(),
|
||||||
|
block: None,
|
||||||
|
block_buf: <u8>::new_box_slice_zeroed(block_size as usize),
|
||||||
|
block_idx: u32::MAX,
|
||||||
|
sector_buf: <[u8; SECTOR_SIZE]>::new_box_zeroed(),
|
||||||
|
sector: u32::MAX,
|
||||||
|
pos: 0,
|
||||||
|
verify: options.validate_hashes,
|
||||||
|
raw_tmd,
|
||||||
|
raw_cert_chain,
|
||||||
|
raw_h3_table,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Read for PartitionWii {
|
||||||
|
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||||
|
let partition_sector = (self.pos / SECTOR_DATA_SIZE as u64) as u32;
|
||||||
|
let sector = self.partition.data_start_sector + partition_sector;
|
||||||
|
if sector >= self.partition.data_end_sector {
|
||||||
|
return Ok(0);
|
||||||
|
}
|
||||||
|
let block_idx = (sector as u64 * SECTOR_SIZE as u64 / self.block_buf.len() as u64) as u32;
|
||||||
|
|
||||||
|
// Read new block if necessary
|
||||||
|
if block_idx != self.block_idx {
|
||||||
|
self.block =
|
||||||
|
self.io.read_block(self.block_buf.as_mut(), block_idx, Some(&self.partition))?;
|
||||||
|
self.block_idx = block_idx;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decrypt sector if necessary
|
||||||
|
if sector != self.sector {
|
||||||
|
let Some(block) = &self.block else {
|
||||||
|
return Ok(0);
|
||||||
|
};
|
||||||
|
block.decrypt(
|
||||||
|
self.sector_buf.as_mut(),
|
||||||
|
self.block_buf.as_ref(),
|
||||||
|
block_idx,
|
||||||
|
sector,
|
||||||
|
&self.partition,
|
||||||
|
)?;
|
||||||
|
if self.verify {
|
||||||
|
verify_hashes(&self.sector_buf, sector)?;
|
||||||
|
}
|
||||||
|
self.sector = sector;
|
||||||
|
}
|
||||||
|
|
||||||
|
let offset = (self.pos % SECTOR_DATA_SIZE as u64) as usize;
|
||||||
|
let len = min(buf.len(), SECTOR_DATA_SIZE - offset);
|
||||||
|
buf[..len]
|
||||||
|
.copy_from_slice(&self.sector_buf[HASHES_SIZE + offset..HASHES_SIZE + offset + len]);
|
||||||
|
self.pos += len as u64;
|
||||||
|
Ok(len)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Seek for PartitionWii {
|
||||||
|
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
|
||||||
|
self.pos = match pos {
|
||||||
|
SeekFrom::Start(v) => v,
|
||||||
|
SeekFrom::End(_) => {
|
||||||
|
return Err(io::Error::new(
|
||||||
|
io::ErrorKind::Unsupported,
|
||||||
|
"WiiPartitionReader: SeekFrom::End is not supported".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
SeekFrom::Current(v) => self.pos.saturating_add_signed(v),
|
||||||
|
};
|
||||||
|
Ok(self.pos)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
pub(crate) fn as_digest(slice: &[u8; 20]) -> digest::Output<Sha1> { (*slice).into() }
|
||||||
|
|
||||||
|
fn verify_hashes(buf: &[u8; SECTOR_SIZE], sector: u32) -> io::Result<()> {
|
||||||
|
let (mut group, sub_group) = div_rem(sector as usize, 8);
|
||||||
|
group %= 8;
|
||||||
|
|
||||||
|
// H0 hashes
|
||||||
|
for i in 0..31 {
|
||||||
|
let mut hash = Sha1::new();
|
||||||
|
hash.update(array_ref![buf, (i + 1) * 0x400, 0x400]);
|
||||||
|
let expected = as_digest(array_ref![buf, i * 20, 20]);
|
||||||
|
let output = hash.finalize();
|
||||||
|
if output != expected {
|
||||||
|
return Err(io::Error::new(
|
||||||
|
io::ErrorKind::InvalidData,
|
||||||
|
format!("Invalid H0 hash! (block {:?}) {:x}\n\texpected {:x}", i, output, expected),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// H1 hash
|
||||||
|
{
|
||||||
|
let mut hash = Sha1::new();
|
||||||
|
hash.update(array_ref![buf, 0, 0x26C]);
|
||||||
|
let expected = as_digest(array_ref![buf, 0x280 + sub_group * 20, 20]);
|
||||||
|
let output = hash.finalize();
|
||||||
|
if output != expected {
|
||||||
|
return Err(io::Error::new(
|
||||||
|
io::ErrorKind::InvalidData,
|
||||||
|
format!(
|
||||||
|
"Invalid H1 hash! (subgroup {:?}) {:x}\n\texpected {:x}",
|
||||||
|
sub_group, output, expected
|
||||||
|
),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// H2 hash
|
||||||
|
{
|
||||||
|
let mut hash = Sha1::new();
|
||||||
|
hash.update(array_ref![buf, 0x280, 0xA0]);
|
||||||
|
let expected = as_digest(array_ref![buf, 0x340 + group * 20, 20]);
|
||||||
|
let output = hash.finalize();
|
||||||
|
if output != expected {
|
||||||
|
return Err(io::Error::new(
|
||||||
|
io::ErrorKind::InvalidData,
|
||||||
|
format!(
|
||||||
|
"Invalid H2 hash! (group {:?}) {:x}\n\texpected {:x}",
|
||||||
|
group, output, expected
|
||||||
|
),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// TODO H3 hash
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PartitionBase for PartitionWii {
|
||||||
|
fn meta(&mut self) -> Result<Box<PartitionMeta>> {
|
||||||
|
self.seek(SeekFrom::Start(0)).context("Seeking to partition header")?;
|
||||||
|
let mut meta = read_part_meta(self, true)?;
|
||||||
|
meta.raw_ticket = Some(Box::from(self.partition.header.ticket.as_bytes()));
|
||||||
|
meta.raw_tmd = Some(self.raw_tmd.clone());
|
||||||
|
meta.raw_cert_chain = Some(self.raw_cert_chain.clone());
|
||||||
|
meta.raw_h3_table = Some(self.raw_h3_table.clone());
|
||||||
|
Ok(meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn open_file(&mut self, node: &Node) -> io::Result<SharedWindowedReadStream> {
|
||||||
|
assert_eq!(node.kind(), NodeKind::File);
|
||||||
|
self.new_window(node.offset(true), node.length(true))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn ideal_buffer_size(&self) -> usize { SECTOR_DATA_SIZE }
|
||||||
|
}
|
|
@ -12,8 +12,7 @@ use crate::{
|
||||||
},
|
},
|
||||||
io::{aes_decrypt, aes_encrypt, ciso, iso, nfs, wbfs, wia, KeyBytes, MagicBytes},
|
io::{aes_decrypt, aes_encrypt, ciso, iso, nfs, wbfs, wia, KeyBytes, MagicBytes},
|
||||||
util::{lfg::LaggedFibonacci, read::read_from},
|
util::{lfg::LaggedFibonacci, read::read_from},
|
||||||
DiscHeader, DiscMeta, Error, OpenOptions, PartitionHeader, PartitionKind, Result,
|
DiscHeader, DiscMeta, Error, PartitionHeader, PartitionKind, Result, ResultContext,
|
||||||
ResultContext,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Block I/O trait for reading disc images.
|
/// Block I/O trait for reading disc images.
|
||||||
|
@ -23,20 +22,20 @@ pub trait BlockIO: DynClone + Send + Sync {
|
||||||
&mut self,
|
&mut self,
|
||||||
out: &mut [u8],
|
out: &mut [u8],
|
||||||
block: u32,
|
block: u32,
|
||||||
partition: Option<&BPartitionInfo>,
|
partition: Option<&PartitionInfo>,
|
||||||
) -> io::Result<Option<Block>>;
|
) -> io::Result<Option<Block>>;
|
||||||
|
|
||||||
/// The format's block size in bytes. Must be a multiple of the sector size (0x8000).
|
/// The format's block size in bytes. Must be a multiple of the sector size (0x8000).
|
||||||
fn block_size(&self) -> u32;
|
fn block_size(&self) -> u32;
|
||||||
|
|
||||||
/// Returns extra metadata included in the disc file format, if any.
|
/// Returns extra metadata included in the disc file format, if any.
|
||||||
fn meta(&self) -> Result<DiscMeta>;
|
fn meta(&self) -> DiscMeta;
|
||||||
}
|
}
|
||||||
|
|
||||||
dyn_clone::clone_trait_object!(BlockIO);
|
dyn_clone::clone_trait_object!(BlockIO);
|
||||||
|
|
||||||
/// Creates a new [`BlockIO`] instance.
|
/// Creates a new [`BlockIO`] instance.
|
||||||
pub fn open(filename: &Path, options: &OpenOptions) -> Result<Box<dyn BlockIO>> {
|
pub fn open(filename: &Path) -> Result<Box<dyn BlockIO>> {
|
||||||
let path_result = fs::canonicalize(filename);
|
let path_result = fs::canonicalize(filename);
|
||||||
if let Err(err) = path_result {
|
if let Err(err) = path_result {
|
||||||
return Err(Error::Io(format!("Failed to open {}", filename.display()), err));
|
return Err(Error::Io(format!("Failed to open {}", filename.display()), err));
|
||||||
|
@ -58,20 +57,18 @@ pub fn open(filename: &Path, options: &OpenOptions) -> Result<Box<dyn BlockIO>>
|
||||||
match magic {
|
match magic {
|
||||||
ciso::CISO_MAGIC => Ok(ciso::DiscIOCISO::new(path)?),
|
ciso::CISO_MAGIC => Ok(ciso::DiscIOCISO::new(path)?),
|
||||||
nfs::NFS_MAGIC => match path.parent() {
|
nfs::NFS_MAGIC => match path.parent() {
|
||||||
Some(parent) if parent.is_dir() => {
|
Some(parent) if parent.is_dir() => Ok(nfs::DiscIONFS::new(path.parent().unwrap())?),
|
||||||
Ok(nfs::DiscIONFS::new(path.parent().unwrap(), options)?)
|
|
||||||
}
|
|
||||||
_ => Err(Error::DiscFormat("Failed to locate NFS parent directory".to_string())),
|
_ => Err(Error::DiscFormat("Failed to locate NFS parent directory".to_string())),
|
||||||
},
|
},
|
||||||
wbfs::WBFS_MAGIC => Ok(wbfs::DiscIOWBFS::new(path)?),
|
wbfs::WBFS_MAGIC => Ok(wbfs::DiscIOWBFS::new(path)?),
|
||||||
wia::WIA_MAGIC | wia::RVZ_MAGIC => Ok(wia::DiscIOWIA::new(path, options)?),
|
wia::WIA_MAGIC | wia::RVZ_MAGIC => Ok(wia::DiscIOWIA::new(path)?),
|
||||||
_ => Ok(iso::DiscIOISO::new(path)?),
|
_ => Ok(iso::DiscIOISO::new(path)?),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct BPartitionInfo {
|
pub struct PartitionInfo {
|
||||||
pub index: u32,
|
pub index: usize,
|
||||||
pub kind: PartitionKind,
|
pub kind: PartitionKind,
|
||||||
pub start_sector: u32,
|
pub start_sector: u32,
|
||||||
pub data_start_sector: u32,
|
pub data_start_sector: u32,
|
||||||
|
@ -99,13 +96,14 @@ pub enum Block {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Block {
|
impl Block {
|
||||||
|
/// Decrypts the block's data (if necessary) and writes it to the output buffer.
|
||||||
pub(crate) fn decrypt(
|
pub(crate) fn decrypt(
|
||||||
self,
|
self,
|
||||||
out: &mut [u8; SECTOR_SIZE],
|
out: &mut [u8; SECTOR_SIZE],
|
||||||
data: &[u8],
|
data: &[u8],
|
||||||
block_idx: u32,
|
block_idx: u32,
|
||||||
abs_sector: u32,
|
abs_sector: u32,
|
||||||
partition: &BPartitionInfo,
|
partition: &PartitionInfo,
|
||||||
) -> io::Result<()> {
|
) -> io::Result<()> {
|
||||||
let rel_sector = abs_sector - self.start_sector(block_idx, data.len());
|
let rel_sector = abs_sector - self.start_sector(block_idx, data.len());
|
||||||
match self {
|
match self {
|
||||||
|
@ -131,13 +129,14 @@ impl Block {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Encrypts the block's data (if necessary) and writes it to the output buffer.
|
||||||
pub(crate) fn encrypt(
|
pub(crate) fn encrypt(
|
||||||
self,
|
self,
|
||||||
out: &mut [u8; SECTOR_SIZE],
|
out: &mut [u8; SECTOR_SIZE],
|
||||||
data: &[u8],
|
data: &[u8],
|
||||||
block_idx: u32,
|
block_idx: u32,
|
||||||
abs_sector: u32,
|
abs_sector: u32,
|
||||||
partition: &BPartitionInfo,
|
partition: &PartitionInfo,
|
||||||
) -> io::Result<()> {
|
) -> io::Result<()> {
|
||||||
let rel_sector = abs_sector - self.start_sector(block_idx, data.len());
|
let rel_sector = abs_sector - self.start_sector(block_idx, data.len());
|
||||||
match self {
|
match self {
|
||||||
|
@ -165,6 +164,7 @@ impl Block {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Copies the block's raw data to the output buffer.
|
||||||
pub(crate) fn copy_raw(
|
pub(crate) fn copy_raw(
|
||||||
self,
|
self,
|
||||||
out: &mut [u8; SECTOR_SIZE],
|
out: &mut [u8; SECTOR_SIZE],
|
||||||
|
@ -225,7 +225,7 @@ fn block_sector<const N: usize>(data: &[u8], sector_idx: u32) -> io::Result<&[u8
|
||||||
fn generate_junk(
|
fn generate_junk(
|
||||||
out: &mut [u8; SECTOR_SIZE],
|
out: &mut [u8; SECTOR_SIZE],
|
||||||
sector: u32,
|
sector: u32,
|
||||||
partition: Option<&BPartitionInfo>,
|
partition: Option<&PartitionInfo>,
|
||||||
disc_header: &DiscHeader,
|
disc_header: &DiscHeader,
|
||||||
) {
|
) {
|
||||||
let mut pos = if let Some(partition) = partition {
|
let mut pos = if let Some(partition) = partition {
|
||||||
|
@ -248,7 +248,7 @@ fn generate_junk(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn rebuild_hash_block(out: &mut [u8; SECTOR_SIZE], sector: u32, partition: &BPartitionInfo) {
|
fn rebuild_hash_block(out: &mut [u8; SECTOR_SIZE], sector: u32, partition: &PartitionInfo) {
|
||||||
let Some(hash_table) = partition.hash_table.as_ref() else {
|
let Some(hash_table) = partition.hash_table.as_ref() else {
|
||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
|
@ -264,14 +264,14 @@ fn rebuild_hash_block(out: &mut [u8; SECTOR_SIZE], sector: u32, partition: &BPar
|
||||||
out[0x340..0x3E0].copy_from_slice(h2_hashes);
|
out[0x340..0x3E0].copy_from_slice(h2_hashes);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn encrypt_sector(out: &mut [u8; SECTOR_SIZE], partition: &BPartitionInfo) {
|
fn encrypt_sector(out: &mut [u8; SECTOR_SIZE], partition: &PartitionInfo) {
|
||||||
aes_encrypt(&partition.key, [0u8; 16], &mut out[..HASHES_SIZE]);
|
aes_encrypt(&partition.key, [0u8; 16], &mut out[..HASHES_SIZE]);
|
||||||
// Data IV from encrypted hash block
|
// Data IV from encrypted hash block
|
||||||
let iv = *array_ref![out, 0x3D0, 16];
|
let iv = *array_ref![out, 0x3D0, 16];
|
||||||
aes_encrypt(&partition.key, iv, &mut out[HASHES_SIZE..]);
|
aes_encrypt(&partition.key, iv, &mut out[HASHES_SIZE..]);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn decrypt_sector(out: &mut [u8; SECTOR_SIZE], partition: &BPartitionInfo) {
|
fn decrypt_sector(out: &mut [u8; SECTOR_SIZE], partition: &PartitionInfo) {
|
||||||
// Data IV from encrypted hash block
|
// Data IV from encrypted hash block
|
||||||
let iv = *array_ref![out, 0x3D0, 16];
|
let iv = *array_ref![out, 0x3D0, 16];
|
||||||
aes_decrypt(&partition.key, [0u8; 16], &mut out[..HASHES_SIZE]);
|
aes_decrypt(&partition.key, [0u8; 16], &mut out[..HASHES_SIZE]);
|
|
@ -10,10 +10,10 @@ use zerocopy::{little_endian::*, AsBytes, FromBytes, FromZeroes};
|
||||||
use crate::{
|
use crate::{
|
||||||
disc::SECTOR_SIZE,
|
disc::SECTOR_SIZE,
|
||||||
io::{
|
io::{
|
||||||
block::{BPartitionInfo, Block, BlockIO},
|
block::{Block, BlockIO, PartitionInfo},
|
||||||
nkit::NKitHeader,
|
nkit::NKitHeader,
|
||||||
split::SplitFileReader,
|
split::SplitFileReader,
|
||||||
MagicBytes,
|
Format, MagicBytes,
|
||||||
},
|
},
|
||||||
static_assert,
|
static_assert,
|
||||||
util::read::read_from,
|
util::read::read_from,
|
||||||
|
@ -101,7 +101,7 @@ impl BlockIO for DiscIOCISO {
|
||||||
&mut self,
|
&mut self,
|
||||||
out: &mut [u8],
|
out: &mut [u8],
|
||||||
block: u32,
|
block: u32,
|
||||||
_partition: Option<&BPartitionInfo>,
|
_partition: Option<&PartitionInfo>,
|
||||||
) -> io::Result<Option<Block>> {
|
) -> io::Result<Option<Block>> {
|
||||||
if block >= CISO_MAP_SIZE as u32 {
|
if block >= CISO_MAP_SIZE as u32 {
|
||||||
// Out of bounds
|
// Out of bounds
|
||||||
|
@ -130,7 +130,15 @@ impl BlockIO for DiscIOCISO {
|
||||||
|
|
||||||
fn block_size(&self) -> u32 { self.header.block_size.get() }
|
fn block_size(&self) -> u32 { self.header.block_size.get() }
|
||||||
|
|
||||||
fn meta(&self) -> Result<DiscMeta> {
|
fn meta(&self) -> DiscMeta {
|
||||||
Ok(self.nkit_header.as_ref().map(DiscMeta::from).unwrap_or_default())
|
let mut result = DiscMeta {
|
||||||
|
format: Format::Ciso,
|
||||||
|
block_size: Some(self.header.block_size.get()),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
if let Some(nkit_header) = &self.nkit_header {
|
||||||
|
nkit_header.apply(&mut result);
|
||||||
|
}
|
||||||
|
result
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -7,8 +7,9 @@ use std::{
|
||||||
use crate::{
|
use crate::{
|
||||||
disc::SECTOR_SIZE,
|
disc::SECTOR_SIZE,
|
||||||
io::{
|
io::{
|
||||||
block::{BPartitionInfo, Block, BlockIO},
|
block::{Block, BlockIO, PartitionInfo},
|
||||||
split::SplitFileReader,
|
split::SplitFileReader,
|
||||||
|
Format,
|
||||||
},
|
},
|
||||||
DiscMeta, Error, Result,
|
DiscMeta, Error, Result,
|
||||||
};
|
};
|
||||||
|
@ -35,7 +36,7 @@ impl BlockIO for DiscIOISO {
|
||||||
&mut self,
|
&mut self,
|
||||||
out: &mut [u8],
|
out: &mut [u8],
|
||||||
block: u32,
|
block: u32,
|
||||||
_partition: Option<&BPartitionInfo>,
|
_partition: Option<&PartitionInfo>,
|
||||||
) -> io::Result<Option<Block>> {
|
) -> io::Result<Option<Block>> {
|
||||||
let offset = block as u64 * SECTOR_SIZE as u64;
|
let offset = block as u64 * SECTOR_SIZE as u64;
|
||||||
if offset >= self.inner.len() {
|
if offset >= self.inner.len() {
|
||||||
|
@ -50,7 +51,12 @@ impl BlockIO for DiscIOISO {
|
||||||
|
|
||||||
fn block_size(&self) -> u32 { SECTOR_SIZE as u32 }
|
fn block_size(&self) -> u32 { SECTOR_SIZE as u32 }
|
||||||
|
|
||||||
fn meta(&self) -> Result<DiscMeta> {
|
fn meta(&self) -> DiscMeta {
|
||||||
Ok(DiscMeta { lossless: true, disc_size: Some(self.inner.len()), ..Default::default() })
|
DiscMeta {
|
||||||
|
format: Format::Iso,
|
||||||
|
lossless: true,
|
||||||
|
disc_size: Some(self.inner.len()),
|
||||||
|
..Default::default()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -1,5 +1,7 @@
|
||||||
//! Disc file format related logic (CISO, NFS, WBFS, WIA, etc.)
|
//! Disc file format related logic (CISO, NFS, WBFS, WIA, etc.)
|
||||||
|
|
||||||
|
use std::fmt;
|
||||||
|
|
||||||
use crate::{streams::ReadStream, Result};
|
use crate::{streams::ReadStream, Result};
|
||||||
|
|
||||||
pub(crate) mod block;
|
pub(crate) mod block;
|
||||||
|
@ -33,9 +35,75 @@ pub trait DiscIO: Send + Sync {
|
||||||
fn disc_size(&self) -> Option<u64>;
|
fn disc_size(&self) -> Option<u64>;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
||||||
|
pub enum Format {
|
||||||
|
/// Raw ISO
|
||||||
|
#[default]
|
||||||
|
Iso,
|
||||||
|
/// CISO
|
||||||
|
Ciso,
|
||||||
|
/// NFS (Wii U VC)
|
||||||
|
Nfs,
|
||||||
|
/// RVZ
|
||||||
|
Rvz,
|
||||||
|
/// WBFS
|
||||||
|
Wbfs,
|
||||||
|
/// WIA
|
||||||
|
Wia,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for Format {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
match self {
|
||||||
|
Format::Iso => write!(f, "ISO"),
|
||||||
|
Format::Ciso => write!(f, "CISO"),
|
||||||
|
Format::Nfs => write!(f, "NFS"),
|
||||||
|
Format::Rvz => write!(f, "RVZ"),
|
||||||
|
Format::Wbfs => write!(f, "WBFS"),
|
||||||
|
Format::Wia => write!(f, "WIA"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
||||||
|
pub enum Compression {
|
||||||
|
/// No compression
|
||||||
|
#[default]
|
||||||
|
None,
|
||||||
|
/// Purge (WIA only)
|
||||||
|
Purge,
|
||||||
|
/// BZIP2
|
||||||
|
Bzip2,
|
||||||
|
/// LZMA
|
||||||
|
Lzma,
|
||||||
|
/// LZMA2
|
||||||
|
Lzma2,
|
||||||
|
/// Zstandard
|
||||||
|
Zstandard,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for Compression {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
match self {
|
||||||
|
Compression::None => write!(f, "None"),
|
||||||
|
Compression::Purge => write!(f, "Purge"),
|
||||||
|
Compression::Bzip2 => write!(f, "BZIP2"),
|
||||||
|
Compression::Lzma => write!(f, "LZMA"),
|
||||||
|
Compression::Lzma2 => write!(f, "LZMA2"),
|
||||||
|
Compression::Zstandard => write!(f, "Zstandard"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Extra metadata about the underlying disc file format.
|
/// Extra metadata about the underlying disc file format.
|
||||||
#[derive(Debug, Clone, Default)]
|
#[derive(Debug, Clone, Default)]
|
||||||
pub struct DiscMeta {
|
pub struct DiscMeta {
|
||||||
|
/// The disc file format.
|
||||||
|
pub format: Format,
|
||||||
|
/// The format's compression algorithm.
|
||||||
|
pub compression: Compression,
|
||||||
|
/// If the format uses blocks, the block size in bytes.
|
||||||
|
pub block_size: Option<u32>,
|
||||||
/// Whether Wii partitions are stored decrypted in the format.
|
/// Whether Wii partitions are stored decrypted in the format.
|
||||||
pub decrypted: bool,
|
pub decrypted: bool,
|
||||||
/// Whether the format omits Wii partition data hashes.
|
/// Whether the format omits Wii partition data hashes.
|
|
@ -12,13 +12,13 @@ use crate::{
|
||||||
disc::SECTOR_SIZE,
|
disc::SECTOR_SIZE,
|
||||||
io::{
|
io::{
|
||||||
aes_decrypt,
|
aes_decrypt,
|
||||||
block::{BPartitionInfo, Block, BlockIO},
|
block::{Block, BlockIO, PartitionInfo},
|
||||||
split::SplitFileReader,
|
split::SplitFileReader,
|
||||||
KeyBytes, MagicBytes,
|
Format, KeyBytes, MagicBytes,
|
||||||
},
|
},
|
||||||
static_assert,
|
static_assert,
|
||||||
util::read::read_from,
|
util::read::read_from,
|
||||||
DiscMeta, Error, OpenOptions, Result, ResultContext,
|
DiscMeta, Error, Result, ResultContext,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub const NFS_MAGIC: MagicBytes = *b"EGGS";
|
pub const NFS_MAGIC: MagicBytes = *b"EGGS";
|
||||||
|
@ -89,7 +89,6 @@ pub struct DiscIONFS {
|
||||||
raw_size: u64,
|
raw_size: u64,
|
||||||
disc_size: u64,
|
disc_size: u64,
|
||||||
key: KeyBytes,
|
key: KeyBytes,
|
||||||
encrypt: bool,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Clone for DiscIONFS {
|
impl Clone for DiscIONFS {
|
||||||
|
@ -100,20 +99,18 @@ impl Clone for DiscIONFS {
|
||||||
raw_size: self.raw_size,
|
raw_size: self.raw_size,
|
||||||
disc_size: self.disc_size,
|
disc_size: self.disc_size,
|
||||||
key: self.key,
|
key: self.key,
|
||||||
encrypt: self.encrypt,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DiscIONFS {
|
impl DiscIONFS {
|
||||||
pub fn new(directory: &Path, options: &OpenOptions) -> Result<Box<Self>> {
|
pub fn new(directory: &Path) -> Result<Box<Self>> {
|
||||||
let mut disc_io = Box::new(Self {
|
let mut disc_io = Box::new(Self {
|
||||||
inner: SplitFileReader::empty(),
|
inner: SplitFileReader::empty(),
|
||||||
header: NFSHeader::new_zeroed(),
|
header: NFSHeader::new_zeroed(),
|
||||||
raw_size: 0,
|
raw_size: 0,
|
||||||
disc_size: 0,
|
disc_size: 0,
|
||||||
key: [0; 16],
|
key: [0; 16],
|
||||||
encrypt: options.rebuild_encryption,
|
|
||||||
});
|
});
|
||||||
disc_io.load_files(directory)?;
|
disc_io.load_files(directory)?;
|
||||||
Ok(disc_io)
|
Ok(disc_io)
|
||||||
|
@ -125,7 +122,7 @@ impl BlockIO for DiscIONFS {
|
||||||
&mut self,
|
&mut self,
|
||||||
out: &mut [u8],
|
out: &mut [u8],
|
||||||
sector: u32,
|
sector: u32,
|
||||||
partition: Option<&BPartitionInfo>,
|
partition: Option<&PartitionInfo>,
|
||||||
) -> io::Result<Option<Block>> {
|
) -> io::Result<Option<Block>> {
|
||||||
// Calculate physical sector
|
// Calculate physical sector
|
||||||
let phys_sector = self.header.phys_sector(sector);
|
let phys_sector = self.header.phys_sector(sector);
|
||||||
|
@ -157,8 +154,8 @@ impl BlockIO for DiscIONFS {
|
||||||
|
|
||||||
fn block_size(&self) -> u32 { SECTOR_SIZE as u32 }
|
fn block_size(&self) -> u32 { SECTOR_SIZE as u32 }
|
||||||
|
|
||||||
fn meta(&self) -> Result<DiscMeta> {
|
fn meta(&self) -> DiscMeta {
|
||||||
Ok(DiscMeta { decrypted: true, lossless: true, ..Default::default() })
|
DiscMeta { format: Format::Nfs, decrypted: true, ..Default::default() }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -68,6 +68,7 @@ pub struct NKitHeader {
|
||||||
pub xxhash64: Option<u64>,
|
pub xxhash64: Option<u64>,
|
||||||
/// Bitstream of blocks that are junk data
|
/// Bitstream of blocks that are junk data
|
||||||
pub junk_bits: Option<Vec<u8>>,
|
pub junk_bits: Option<Vec<u8>>,
|
||||||
|
pub block_size: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
const VERSION_PREFIX: [u8; 7] = *b"NKIT v";
|
const VERSION_PREFIX: [u8; 7] = *b"NKIT v";
|
||||||
|
@ -146,7 +147,7 @@ impl NKitHeader {
|
||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(Self { version, flags, size, crc32, md5, sha1, xxhash64, junk_bits })
|
Ok(Self { version, flags, size, crc32, md5, sha1, xxhash64, junk_bits, block_size })
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_junk_block(&self, block: u32) -> Option<bool> {
|
pub fn is_junk_block(&self, block: u32) -> Option<bool> {
|
||||||
|
@ -155,19 +156,14 @@ impl NKitHeader {
|
||||||
.and_then(|v| v.get((block / 8) as usize))
|
.and_then(|v| v.get((block / 8) as usize))
|
||||||
.map(|&b| b & (1 << (7 - (block & 7))) != 0)
|
.map(|&b| b & (1 << (7 - (block & 7))) != 0)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
impl From<&NKitHeader> for DiscMeta {
|
pub fn apply(&self, meta: &mut DiscMeta) {
|
||||||
fn from(value: &NKitHeader) -> Self {
|
meta.needs_hash_recovery |= self.junk_bits.is_some();
|
||||||
Self {
|
meta.lossless |= self.size.is_some() && self.junk_bits.is_some();
|
||||||
needs_hash_recovery: value.junk_bits.is_some(),
|
meta.disc_size = meta.disc_size.or(self.size);
|
||||||
lossless: value.size.is_some() && value.junk_bits.is_some(),
|
meta.crc32 = self.crc32;
|
||||||
disc_size: value.size,
|
meta.md5 = self.md5;
|
||||||
crc32: value.crc32,
|
meta.sha1 = self.sha1;
|
||||||
md5: value.md5,
|
meta.xxhash64 = self.xxhash64;
|
||||||
sha1: value.sha1,
|
|
||||||
xxhash64: value.xxhash64,
|
|
||||||
..Default::default()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -9,10 +9,10 @@ use zerocopy::{big_endian::*, AsBytes, FromBytes, FromZeroes};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
io::{
|
io::{
|
||||||
block::{BPartitionInfo, Block, BlockIO},
|
block::{Block, BlockIO, PartitionInfo},
|
||||||
nkit::NKitHeader,
|
nkit::NKitHeader,
|
||||||
split::SplitFileReader,
|
split::SplitFileReader,
|
||||||
DiscMeta, MagicBytes,
|
DiscMeta, Format, MagicBytes,
|
||||||
},
|
},
|
||||||
util::read::{read_box_slice, read_from},
|
util::read::{read_box_slice, read_from},
|
||||||
Error, Result, ResultContext,
|
Error, Result, ResultContext,
|
||||||
|
@ -113,7 +113,7 @@ impl BlockIO for DiscIOWBFS {
|
||||||
&mut self,
|
&mut self,
|
||||||
out: &mut [u8],
|
out: &mut [u8],
|
||||||
block: u32,
|
block: u32,
|
||||||
_partition: Option<&BPartitionInfo>,
|
_partition: Option<&PartitionInfo>,
|
||||||
) -> io::Result<Option<Block>> {
|
) -> io::Result<Option<Block>> {
|
||||||
let block_size = self.header.block_size();
|
let block_size = self.header.block_size();
|
||||||
if block >= self.header.max_blocks() {
|
if block >= self.header.max_blocks() {
|
||||||
|
@ -134,7 +134,15 @@ impl BlockIO for DiscIOWBFS {
|
||||||
|
|
||||||
fn block_size(&self) -> u32 { self.header.block_size() }
|
fn block_size(&self) -> u32 { self.header.block_size() }
|
||||||
|
|
||||||
fn meta(&self) -> Result<DiscMeta> {
|
fn meta(&self) -> DiscMeta {
|
||||||
Ok(self.nkit_header.as_ref().map(DiscMeta::from).unwrap_or_default())
|
let mut result = DiscMeta {
|
||||||
|
format: Format::Wbfs,
|
||||||
|
block_size: Some(self.header.block_size()),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
if let Some(nkit_header) = &self.nkit_header {
|
||||||
|
nkit_header.apply(&mut result);
|
||||||
|
}
|
||||||
|
result
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -14,10 +14,10 @@ use crate::{
|
||||||
SECTOR_SIZE,
|
SECTOR_SIZE,
|
||||||
},
|
},
|
||||||
io::{
|
io::{
|
||||||
block::{BPartitionInfo, Block, BlockIO},
|
block::{Block, BlockIO, PartitionInfo},
|
||||||
nkit::NKitHeader,
|
nkit::NKitHeader,
|
||||||
split::SplitFileReader,
|
split::SplitFileReader,
|
||||||
HashBytes, KeyBytes, MagicBytes,
|
Compression, Format, HashBytes, KeyBytes, MagicBytes,
|
||||||
},
|
},
|
||||||
static_assert,
|
static_assert,
|
||||||
util::{
|
util::{
|
||||||
|
@ -26,7 +26,7 @@ use crate::{
|
||||||
read::{read_box_slice, read_from, read_u16_be, read_vec},
|
read::{read_box_slice, read_from, read_u16_be, read_vec},
|
||||||
take_seek::TakeSeekExt,
|
take_seek::TakeSeekExt,
|
||||||
},
|
},
|
||||||
DiscMeta, Error, OpenOptions, Result, ResultContext,
|
DiscMeta, Error, Result, ResultContext,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub const WIA_MAGIC: MagicBytes = *b"WIA\x01";
|
pub const WIA_MAGIC: MagicBytes = *b"WIA\x01";
|
||||||
|
@ -114,7 +114,7 @@ impl TryFrom<u32> for DiscType {
|
||||||
|
|
||||||
/// Compression type
|
/// Compression type
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||||
pub enum Compression {
|
pub enum WIACompression {
|
||||||
/// No compression.
|
/// No compression.
|
||||||
None,
|
None,
|
||||||
/// (WIA only) See [WIASegment]
|
/// (WIA only) See [WIASegment]
|
||||||
|
@ -129,7 +129,7 @@ pub enum Compression {
|
||||||
Zstandard,
|
Zstandard,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TryFrom<u32> for Compression {
|
impl TryFrom<u32> for WIACompression {
|
||||||
type Error = Error;
|
type Error = Error;
|
||||||
|
|
||||||
fn try_from(value: u32) -> Result<Self> {
|
fn try_from(value: u32) -> Result<Self> {
|
||||||
|
@ -161,7 +161,7 @@ pub struct WIADisc {
|
||||||
///
|
///
|
||||||
/// RVZ only:
|
/// RVZ only:
|
||||||
/// > This is signed (instead of unsigned) to support negative compression levels in
|
/// > This is signed (instead of unsigned) to support negative compression levels in
|
||||||
/// [Zstandard](Compression::Zstandard) (RVZ only).
|
/// [Zstandard](WIACompression::Zstandard) (RVZ only).
|
||||||
pub compression_level: I32,
|
pub compression_level: I32,
|
||||||
/// The size of the chunks that data is divided into.
|
/// The size of the chunks that data is divided into.
|
||||||
///
|
///
|
||||||
|
@ -208,14 +208,14 @@ pub struct WIADisc {
|
||||||
pub compr_data_len: u8,
|
pub compr_data_len: u8,
|
||||||
/// Compressor specific data.
|
/// Compressor specific data.
|
||||||
///
|
///
|
||||||
/// If the compression method is [None](Compression::None), [Purge](Compression::Purge),
|
/// If the compression method is [None](WIACompression::None), [Purge](WIACompression::Purge),
|
||||||
/// [Bzip2](Compression::Bzip2), or [Zstandard](Compression::Zstandard) (RVZ only),
|
/// [Bzip2](WIACompression::Bzip2), or [Zstandard](WIACompression::Zstandard) (RVZ only),
|
||||||
/// [compr_data_len](Self::compr_data_len) is 0. If the compression method is
|
/// [compr_data_len](Self::compr_data_len) is 0. If the compression method is
|
||||||
/// [Lzma](Compression::Lzma) or [Lzma2](Compression::Lzma2), the compressor specific data is
|
/// [Lzma](WIACompression::Lzma) or [Lzma2](WIACompression::Lzma2), the compressor specific data is
|
||||||
/// stored in the format used by the 7-Zip SDK. It needs to be converted if you are using e.g.
|
/// stored in the format used by the 7-Zip SDK. It needs to be converted if you are using e.g.
|
||||||
/// liblzma.
|
/// liblzma.
|
||||||
///
|
///
|
||||||
/// For [Lzma](Compression::Lzma), the data is 5 bytes long. The first byte encodes the `lc`,
|
/// For [Lzma](WIACompression::Lzma), the data is 5 bytes long. The first byte encodes the `lc`,
|
||||||
/// `pb`, and `lp` parameters, and the four other bytes encode the dictionary size in little
|
/// `pb`, and `lp` parameters, and the four other bytes encode the dictionary size in little
|
||||||
/// endian.
|
/// endian.
|
||||||
pub compr_data: [u8; 7],
|
pub compr_data: [u8; 7],
|
||||||
|
@ -226,7 +226,7 @@ static_assert!(size_of::<WIADisc>() == 0xDC);
|
||||||
impl WIADisc {
|
impl WIADisc {
|
||||||
pub fn validate(&self) -> Result<()> {
|
pub fn validate(&self) -> Result<()> {
|
||||||
DiscType::try_from(self.disc_type.get())?;
|
DiscType::try_from(self.disc_type.get())?;
|
||||||
Compression::try_from(self.compression.get())?;
|
WIACompression::try_from(self.compression.get())?;
|
||||||
if self.partition_type_size.get() != size_of::<WIAPartition>() as u32 {
|
if self.partition_type_size.get() != size_of::<WIAPartition>() as u32 {
|
||||||
return Err(Error::DiscFormat(format!(
|
return Err(Error::DiscFormat(format!(
|
||||||
"WIA partition type size is {}, expected {}",
|
"WIA partition type size is {}, expected {}",
|
||||||
|
@ -237,8 +237,8 @@ impl WIADisc {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn compression(&self) -> Compression {
|
pub fn compression(&self) -> WIACompression {
|
||||||
Compression::try_from(self.compression.get()).unwrap()
|
WIACompression::try_from(self.compression.get()).unwrap()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -428,8 +428,8 @@ pub struct WIAException {
|
||||||
///
|
///
|
||||||
/// For memory management reasons, programs which read WIA files might place a limit on how many
|
/// For memory management reasons, programs which read WIA files might place a limit on how many
|
||||||
/// exceptions there can be in a [WIAExceptionList]. Dolphin's reading code has a limit of
|
/// exceptions there can be in a [WIAExceptionList]. Dolphin's reading code has a limit of
|
||||||
/// `52 × 64 = 3328` (unless the compression method is [None](Compression::None) or
|
/// `52 × 64 = 3328` (unless the compression method is [None](WIACompression::None) or
|
||||||
/// [Purge](Compression::Purge), in which case there is no limit), which is enough to cover all
|
/// [Purge](WIACompression::Purge), in which case there is no limit), which is enough to cover all
|
||||||
/// hashes and all padding. wit's reading code seems to be written as if `47 × 64 = 3008` is the
|
/// hashes and all padding. wit's reading code seems to be written as if `47 × 64 = 3008` is the
|
||||||
/// maximum it needs to be able to handle, which is enough to cover all hashes but not any padding.
|
/// maximum it needs to be able to handle, which is enough to cover all hashes but not any padding.
|
||||||
/// However, because wit allocates more memory than needed, it seems to be possible to exceed 3008
|
/// However, because wit allocates more memory than needed, it seems to be possible to exceed 3008
|
||||||
|
@ -438,12 +438,12 @@ pub struct WIAException {
|
||||||
///
|
///
|
||||||
/// Somewhat ironically, there are exceptions to how [WIAExceptionList] structs are handled:
|
/// Somewhat ironically, there are exceptions to how [WIAExceptionList] structs are handled:
|
||||||
///
|
///
|
||||||
/// For the compression method [Purge](Compression::Purge), the [WIAExceptionList] structs are
|
/// For the compression method [Purge](WIACompression::Purge), the [WIAExceptionList] structs are
|
||||||
/// stored uncompressed (in other words, before the first [WIASegment]). For
|
/// stored uncompressed (in other words, before the first [WIASegment]). For
|
||||||
/// [Bzip2](Compression::Bzip2), [Lzma](Compression::Lzma) and [Lzma2](Compression::Lzma2), they are
|
/// [Bzip2](WIACompression::Bzip2), [Lzma](WIACompression::Lzma) and [Lzma2](WIACompression::Lzma2), they are
|
||||||
/// compressed along with the rest of the data.
|
/// compressed along with the rest of the data.
|
||||||
///
|
///
|
||||||
/// For the compression methods [None](Compression::None) and [Purge](Compression::Purge), if the
|
/// For the compression methods [None](WIACompression::None) and [Purge](WIACompression::Purge), if the
|
||||||
/// end offset of the last [WIAExceptionList] is not evenly divisible by 4, padding is inserted
|
/// end offset of the last [WIAExceptionList] is not evenly divisible by 4, padding is inserted
|
||||||
/// after it so that the data afterwards will start at a 4 byte boundary. This padding is not
|
/// after it so that the data afterwards will start at a 4 byte boundary. This padding is not
|
||||||
/// inserted for the other compression methods.
|
/// inserted for the other compression methods.
|
||||||
|
@ -466,15 +466,15 @@ impl Decompressor {
|
||||||
pub fn new(disc: &WIADisc) -> Result<Self> {
|
pub fn new(disc: &WIADisc) -> Result<Self> {
|
||||||
let data = &disc.compr_data[..disc.compr_data_len as usize];
|
let data = &disc.compr_data[..disc.compr_data_len as usize];
|
||||||
match disc.compression() {
|
match disc.compression() {
|
||||||
Compression::None => Ok(Self::None),
|
WIACompression::None => Ok(Self::None),
|
||||||
#[cfg(feature = "compress-bzip2")]
|
#[cfg(feature = "compress-bzip2")]
|
||||||
Compression::Bzip2 => Ok(Self::Bzip2),
|
WIACompression::Bzip2 => Ok(Self::Bzip2),
|
||||||
#[cfg(feature = "compress-lzma")]
|
#[cfg(feature = "compress-lzma")]
|
||||||
Compression::Lzma => Ok(Self::Lzma(Box::from(data))),
|
WIACompression::Lzma => Ok(Self::Lzma(Box::from(data))),
|
||||||
#[cfg(feature = "compress-lzma")]
|
#[cfg(feature = "compress-lzma")]
|
||||||
Compression::Lzma2 => Ok(Self::Lzma2(Box::from(data))),
|
WIACompression::Lzma2 => Ok(Self::Lzma2(Box::from(data))),
|
||||||
#[cfg(feature = "compress-zstd")]
|
#[cfg(feature = "compress-zstd")]
|
||||||
Compression::Zstandard => Ok(Self::Zstandard),
|
WIACompression::Zstandard => Ok(Self::Zstandard),
|
||||||
comp => Err(Error::DiscFormat(format!("Unsupported WIA/RVZ compression: {:?}", comp))),
|
comp => Err(Error::DiscFormat(format!("Unsupported WIA/RVZ compression: {:?}", comp))),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -556,7 +556,7 @@ fn verify_hash(buf: &[u8], expected: &HashBytes) -> Result<()> {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DiscIOWIA {
|
impl DiscIOWIA {
|
||||||
pub fn new(filename: &Path, _options: &OpenOptions) -> Result<Box<Self>> {
|
pub fn new(filename: &Path) -> Result<Box<Self>> {
|
||||||
let mut inner = SplitFileReader::new(filename)?;
|
let mut inner = SplitFileReader::new(filename)?;
|
||||||
|
|
||||||
// Load & verify file header
|
// Load & verify file header
|
||||||
|
@ -690,7 +690,7 @@ impl BlockIO for DiscIOWIA {
|
||||||
&mut self,
|
&mut self,
|
||||||
out: &mut [u8],
|
out: &mut [u8],
|
||||||
sector: u32,
|
sector: u32,
|
||||||
partition: Option<&BPartitionInfo>,
|
partition: Option<&PartitionInfo>,
|
||||||
) -> io::Result<Option<Block>> {
|
) -> io::Result<Option<Block>> {
|
||||||
let mut chunk_size = self.disc.chunk_size.get();
|
let mut chunk_size = self.disc.chunk_size.get();
|
||||||
let sectors_per_chunk = chunk_size / SECTOR_SIZE as u32;
|
let sectors_per_chunk = chunk_size / SECTOR_SIZE as u32;
|
||||||
|
@ -705,7 +705,7 @@ impl BlockIO for DiscIOWIA {
|
||||||
|
|
||||||
let (group_index, group_sector) = if let Some(partition) = partition {
|
let (group_index, group_sector) = if let Some(partition) = partition {
|
||||||
// Find the partition
|
// Find the partition
|
||||||
let Some(wia_part) = self.partitions.get(partition.index as usize) else {
|
let Some(wia_part) = self.partitions.get(partition.index) else {
|
||||||
return Err(io::Error::new(
|
return Err(io::Error::new(
|
||||||
io::ErrorKind::InvalidInput,
|
io::ErrorKind::InvalidInput,
|
||||||
format!("Couldn't find WIA/RVZ partition index {}", partition.index),
|
format!("Couldn't find WIA/RVZ partition index {}", partition.index),
|
||||||
|
@ -803,7 +803,7 @@ impl BlockIO for DiscIOWIA {
|
||||||
|
|
||||||
let mut reader = (&mut self.inner).take_seek(group.data_size() as u64);
|
let mut reader = (&mut self.inner).take_seek(group.data_size() as u64);
|
||||||
let uncompressed_exception_lists =
|
let uncompressed_exception_lists =
|
||||||
matches!(self.disc.compression(), Compression::None | Compression::Purge)
|
matches!(self.disc.compression(), WIACompression::None | WIACompression::Purge)
|
||||||
|| !group.is_compressed();
|
|| !group.is_compressed();
|
||||||
if uncompressed_exception_lists {
|
if uncompressed_exception_lists {
|
||||||
self.exception_lists = read_exception_lists(
|
self.exception_lists = read_exception_lists(
|
||||||
|
@ -891,12 +891,27 @@ impl BlockIO for DiscIOWIA {
|
||||||
SECTOR_SIZE as u32
|
SECTOR_SIZE as u32
|
||||||
}
|
}
|
||||||
|
|
||||||
fn meta(&self) -> Result<DiscMeta> {
|
fn meta(&self) -> DiscMeta {
|
||||||
let mut meta = self.nkit_header.as_ref().map(DiscMeta::from).unwrap_or_default();
|
let mut result = DiscMeta {
|
||||||
meta.decrypted = true;
|
format: if self.header.is_rvz() { Format::Rvz } else { Format::Wia },
|
||||||
meta.needs_hash_recovery = true;
|
block_size: Some(self.disc.chunk_size.get()),
|
||||||
meta.lossless = true;
|
compression: match self.disc.compression() {
|
||||||
meta.disc_size = Some(self.header.iso_file_size.get());
|
WIACompression::None => Compression::None,
|
||||||
Ok(meta)
|
WIACompression::Purge => Compression::Purge,
|
||||||
|
WIACompression::Bzip2 => Compression::Bzip2,
|
||||||
|
WIACompression::Lzma => Compression::Lzma,
|
||||||
|
WIACompression::Lzma2 => Compression::Lzma2,
|
||||||
|
WIACompression::Zstandard => Compression::Zstandard,
|
||||||
|
},
|
||||||
|
decrypted: true,
|
||||||
|
needs_hash_recovery: true,
|
||||||
|
lossless: true,
|
||||||
|
disc_size: Some(self.header.iso_file_size.get()),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
if let Some(nkit_header) = &self.nkit_header {
|
||||||
|
nkit_header.apply(&mut result);
|
||||||
|
}
|
||||||
|
result
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -36,22 +36,22 @@
|
||||||
//! }
|
//! }
|
||||||
//! ```
|
//! ```
|
||||||
|
|
||||||
use std::path::Path;
|
use std::{
|
||||||
|
io::{Read, Seek},
|
||||||
|
path::Path,
|
||||||
|
};
|
||||||
|
|
||||||
pub use disc::{
|
pub use disc::{
|
||||||
AppLoaderHeader, DiscHeader, DolHeader, PartitionBase, PartitionHeader, PartitionInfo,
|
AppLoaderHeader, DiscHeader, DolHeader, PartitionBase, PartitionHeader, PartitionKind,
|
||||||
PartitionKind, PartitionMeta, BI2_SIZE, BOOT_SIZE, SECTOR_SIZE,
|
PartitionMeta, BI2_SIZE, BOOT_SIZE, SECTOR_SIZE,
|
||||||
};
|
};
|
||||||
pub use fst::{Fst, Node, NodeKind};
|
pub use fst::{Fst, Node, NodeKind};
|
||||||
pub use io::DiscMeta;
|
pub use io::{block::PartitionInfo, Compression, DiscMeta, Format};
|
||||||
use io::{block, block::BPartitionInfo};
|
|
||||||
pub use streams::ReadStream;
|
pub use streams::ReadStream;
|
||||||
|
|
||||||
use crate::disc::reader::{DiscReader, EncryptionMode};
|
|
||||||
|
|
||||||
mod disc;
|
mod disc;
|
||||||
mod fst;
|
mod fst;
|
||||||
pub mod io;
|
mod io;
|
||||||
mod streams;
|
mod streams;
|
||||||
mod util;
|
mod util;
|
||||||
|
|
||||||
|
@ -110,24 +110,15 @@ where E: ErrorContext
|
||||||
|
|
||||||
#[derive(Default, Debug, Clone)]
|
#[derive(Default, Debug, Clone)]
|
||||||
pub struct OpenOptions {
|
pub struct OpenOptions {
|
||||||
/// Wii: Validate partition data hashes while reading the disc image if present.
|
/// Wii: Rebuild partition data encryption and hashes if the underlying format stores data
|
||||||
pub validate_hashes: bool,
|
/// decrypted or with hashes removed. (e.g. WIA/RVZ, NFS)
|
||||||
/// Wii: Rebuild partition data hashes for the disc image if the underlying format
|
|
||||||
/// does not store them. (e.g. WIA/RVZ)
|
|
||||||
pub rebuild_hashes: bool,
|
|
||||||
/// Wii: Rebuild partition data encryption if the underlying format stores data decrypted.
|
|
||||||
/// (e.g. WIA/RVZ, NFS)
|
|
||||||
///
|
|
||||||
/// Unnecessary if only opening a disc partition stream, which will already provide a decrypted
|
|
||||||
/// stream. In this case, this will cause unnecessary processing.
|
|
||||||
///
|
|
||||||
/// Only valid in combination with `rebuild_hashes`, as the data encryption is derived from the
|
|
||||||
/// partition data hashes.
|
|
||||||
pub rebuild_encryption: bool,
|
pub rebuild_encryption: bool,
|
||||||
|
/// Wii: Validate partition data hashes while reading the disc image.
|
||||||
|
pub validate_hashes: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct Disc {
|
pub struct Disc {
|
||||||
pub reader: DiscReader,
|
reader: disc::reader::DiscReader,
|
||||||
options: OpenOptions,
|
options: OpenOptions,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -139,8 +130,8 @@ impl Disc {
|
||||||
|
|
||||||
/// Opens a disc image from a file path with custom options.
|
/// Opens a disc image from a file path with custom options.
|
||||||
pub fn new_with_options<P: AsRef<Path>>(path: P, options: &OpenOptions) -> Result<Disc> {
|
pub fn new_with_options<P: AsRef<Path>>(path: P, options: &OpenOptions) -> Result<Disc> {
|
||||||
let io = block::open(path.as_ref(), options)?;
|
let io = io::block::open(path.as_ref())?;
|
||||||
let reader = DiscReader::new(io, EncryptionMode::Encrypted)?;
|
let reader = disc::reader::DiscReader::new(io, options)?;
|
||||||
Ok(Disc { reader, options: options.clone() })
|
Ok(Disc { reader, options: options.clone() })
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -148,30 +139,36 @@ impl Disc {
|
||||||
pub fn header(&self) -> &DiscHeader { self.reader.header() }
|
pub fn header(&self) -> &DiscHeader { self.reader.header() }
|
||||||
|
|
||||||
/// Returns extra metadata included in the disc file format, if any.
|
/// Returns extra metadata included in the disc file format, if any.
|
||||||
pub fn meta(&self) -> Result<DiscMeta> { self.reader.io.meta() }
|
pub fn meta(&self) -> DiscMeta { self.reader.meta() }
|
||||||
|
|
||||||
/// The disc's size in bytes or an estimate if not stored by the format.
|
/// The disc's size in bytes, or an estimate if not stored by the format.
|
||||||
pub fn disc_size(&self) -> u64 { self.reader.disc_size() }
|
pub fn disc_size(&self) -> u64 { self.reader.disc_size() }
|
||||||
|
|
||||||
/// A list of partitions on the disc.
|
/// A list of Wii partitions on the disc.
|
||||||
///
|
///
|
||||||
/// For GameCube discs, this will return a single data partition spanning the entire disc.
|
/// For GameCube discs, this will return an empty slice.
|
||||||
pub fn partitions(&self) -> &[BPartitionInfo] { self.reader.partitions() }
|
pub fn partitions(&self) -> &[PartitionInfo] { self.reader.partitions() }
|
||||||
|
|
||||||
// /// Opens a new read stream for the base disc image.
|
/// Opens a new, decrypted partition read stream for the specified partition index.
|
||||||
// ///
|
///
|
||||||
// /// Generally does _not_ need to be used directly. Opening a partition will provide a
|
/// For GameCube discs, the index must always be 0.
|
||||||
// /// decrypted stream instead.
|
pub fn open_partition(&self, index: usize) -> Result<Box<dyn PartitionBase>> {
|
||||||
// pub fn open(&self) -> Result<Box<dyn ReadStream + '_>> { self.io.open() }
|
self.reader.open_partition(index, &self.options)
|
||||||
//
|
}
|
||||||
// /// Opens a new, decrypted partition read stream for the specified partition index.
|
|
||||||
// pub fn open_partition(&self, index: usize) -> Result<Box<dyn PartitionBase + '_>> {
|
/// Opens a new partition read stream for the first partition matching
|
||||||
// self.base.open_partition(self.io.as_ref(), index, &self.options)
|
/// the specified type.
|
||||||
// }
|
///
|
||||||
//
|
/// For GameCube discs, the kind must always be `PartitionKind::Data`.
|
||||||
// /// Opens a new partition read stream for the first partition matching
|
pub fn open_partition_kind(&self, kind: PartitionKind) -> Result<Box<dyn PartitionBase>> {
|
||||||
// /// the specified type.
|
self.reader.open_partition_kind(kind, &self.options)
|
||||||
// pub fn open_partition_kind(&self, kind: PartitionKind) -> Result<Box<dyn PartitionBase + '_>> {
|
}
|
||||||
// self.base.open_partition_kind(self.io.as_ref(), kind, &self.options)
|
}
|
||||||
// }
|
|
||||||
|
impl Read for Disc {
|
||||||
|
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> { self.reader.read(buf) }
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Seek for Disc {
|
||||||
|
fn seek(&mut self, pos: std::io::SeekFrom) -> std::io::Result<u64> { self.reader.seek(pos) }
|
||||||
}
|
}
|
|
@ -0,0 +1,46 @@
|
||||||
|
[package]
|
||||||
|
name = "nodtool"
|
||||||
|
version = "0.2.0"
|
||||||
|
edition = "2021"
|
||||||
|
rust-version = "1.59.0"
|
||||||
|
authors = ["Luke Street <luke@street.dev>"]
|
||||||
|
license = "MIT OR Apache-2.0"
|
||||||
|
repository = "https://github.com/encounter/nod-rs"
|
||||||
|
documentation = "https://docs.rs/nod"
|
||||||
|
readme = "../README.md"
|
||||||
|
description = """
|
||||||
|
CLI tool for verifying and converting GameCube and Wii disc images.
|
||||||
|
"""
|
||||||
|
keywords = ["gamecube", "wii", "iso", "wbfs", "rvz"]
|
||||||
|
categories = ["command-line-utilities", "parser-implementations"]
|
||||||
|
build = "build.rs"
|
||||||
|
|
||||||
|
[features]
|
||||||
|
asm = ["md-5/asm", "nod/asm", "sha1/asm"]
|
||||||
|
nightly = ["crc32fast/nightly"]
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
argp = "0.3.0"
|
||||||
|
base16ct = "0.2.0"
|
||||||
|
crc32fast = "1.4.0"
|
||||||
|
digest = "0.10.7"
|
||||||
|
enable-ansi-support = "0.2.1"
|
||||||
|
indicatif = "0.17.8"
|
||||||
|
itertools = "0.12.1"
|
||||||
|
log = "0.4.20"
|
||||||
|
md-5 = "0.10.6"
|
||||||
|
nod = { path = "../nod" }
|
||||||
|
sha1 = "0.10.6"
|
||||||
|
size = "0.4.1"
|
||||||
|
supports-color = "3.0.0"
|
||||||
|
tracing = "0.1.40"
|
||||||
|
tracing-attributes = "0.1.27"
|
||||||
|
tracing-subscriber = { version = "0.3.18", features = ["env-filter"] }
|
||||||
|
xxhash-rust = { version = "0.8.10", features = ["xxh64"] }
|
||||||
|
zerocopy = { version = "0.7.32", features = ["alloc", "derive"] }
|
||||||
|
|
||||||
|
[build-dependencies]
|
||||||
|
hex = { version = "0.4.3", features = ["serde"] }
|
||||||
|
quick-xml = { version = "0.31.0", features = ["serialize"] }
|
||||||
|
serde = { version = "1.0.197", features = ["derive"] }
|
||||||
|
zerocopy = { version = "0.7.32", features = ["alloc", "derive"] }
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,109 @@
|
||||||
|
use std::{
|
||||||
|
env,
|
||||||
|
fs::File,
|
||||||
|
io::{BufReader, BufWriter, Write},
|
||||||
|
mem::size_of,
|
||||||
|
path::Path,
|
||||||
|
};
|
||||||
|
|
||||||
|
use hex::deserialize as deserialize_hex;
|
||||||
|
use serde::Deserialize;
|
||||||
|
use zerocopy::AsBytes;
|
||||||
|
|
||||||
|
// Keep in sync with build.rs
|
||||||
|
#[derive(Clone, Debug, AsBytes)]
|
||||||
|
#[repr(C, align(4))]
|
||||||
|
struct Header {
|
||||||
|
entry_count: u32,
|
||||||
|
entry_size: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Keep in sync with redump.rs
|
||||||
|
#[derive(Clone, Debug, AsBytes)]
|
||||||
|
#[repr(C, align(4))]
|
||||||
|
struct GameEntry {
|
||||||
|
crc32: u32,
|
||||||
|
string_table_offset: u32,
|
||||||
|
sectors: u32,
|
||||||
|
md5: [u8; 16],
|
||||||
|
sha1: [u8; 20],
|
||||||
|
}
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
let output = std::process::Command::new("git")
|
||||||
|
.args(["rev-parse", "HEAD"])
|
||||||
|
.output()
|
||||||
|
.expect("Failed to execute git");
|
||||||
|
let rev = String::from_utf8(output.stdout).expect("Failed to parse git output");
|
||||||
|
println!("cargo:rustc-env=GIT_COMMIT_SHA={rev}");
|
||||||
|
println!("cargo:rustc-rerun-if-changed=.git/HEAD");
|
||||||
|
|
||||||
|
let out_dir = env::var("OUT_DIR").unwrap();
|
||||||
|
let dest_path = Path::new(&out_dir).join("parsed-dats.bin");
|
||||||
|
let mut f = BufWriter::new(File::create(&dest_path).unwrap());
|
||||||
|
|
||||||
|
// Parse dat files
|
||||||
|
let mut entries = Vec::<(GameEntry, String)>::new();
|
||||||
|
for path in ["assets/redump-gc.dat", "assets/redump-wii.dat"] {
|
||||||
|
let file = BufReader::new(File::open(path).expect("Failed to open dat file"));
|
||||||
|
let dat: DatFile = quick_xml::de::from_reader(file).expect("Failed to parse dat file");
|
||||||
|
entries.extend(dat.games.into_iter().map(|game| {
|
||||||
|
(
|
||||||
|
GameEntry {
|
||||||
|
string_table_offset: 0,
|
||||||
|
crc32: u32::from_be_bytes(game.rom.crc32),
|
||||||
|
md5: game.rom.md5,
|
||||||
|
sha1: game.rom.sha1,
|
||||||
|
sectors: game.rom.size.div_ceil(0x8000) as u32,
|
||||||
|
},
|
||||||
|
game.name,
|
||||||
|
)
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort by CRC32
|
||||||
|
entries.sort_by_key(|(entry, _)| entry.crc32);
|
||||||
|
|
||||||
|
// Write game entries
|
||||||
|
let header =
|
||||||
|
Header { entry_count: entries.len() as u32, entry_size: size_of::<GameEntry>() as u32 };
|
||||||
|
f.write_all(header.as_bytes()).unwrap();
|
||||||
|
let mut string_table_offset = 0u32;
|
||||||
|
for (entry, name) in &mut entries {
|
||||||
|
entry.string_table_offset = string_table_offset;
|
||||||
|
f.write_all(entry.as_bytes()).unwrap();
|
||||||
|
string_table_offset += name.as_bytes().len() as u32 + 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write string table
|
||||||
|
for (_, name) in &entries {
|
||||||
|
f.write_all(&(name.len() as u32).to_le_bytes()).unwrap();
|
||||||
|
f.write_all(name.as_bytes()).unwrap();
|
||||||
|
}
|
||||||
|
f.flush().unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Deserialize)]
|
||||||
|
struct DatFile {
|
||||||
|
#[serde(rename = "game")]
|
||||||
|
games: Vec<DatGame>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Deserialize)]
|
||||||
|
struct DatGame {
|
||||||
|
#[serde(rename = "@name")]
|
||||||
|
name: String,
|
||||||
|
rom: DatGameRom,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Deserialize)]
|
||||||
|
struct DatGameRom {
|
||||||
|
#[serde(rename = "@size")]
|
||||||
|
size: u64,
|
||||||
|
#[serde(rename = "@crc", deserialize_with = "deserialize_hex")]
|
||||||
|
crc32: [u8; 4],
|
||||||
|
#[serde(rename = "@md5", deserialize_with = "deserialize_hex")]
|
||||||
|
md5: [u8; 16],
|
||||||
|
#[serde(rename = "@sha1", deserialize_with = "deserialize_hex")]
|
||||||
|
sha1: [u8; 20],
|
||||||
|
}
|
|
@ -0,0 +1,96 @@
|
||||||
|
use std::{
|
||||||
|
fmt,
|
||||||
|
sync::{
|
||||||
|
mpsc::{sync_channel, SyncSender},
|
||||||
|
Arc,
|
||||||
|
},
|
||||||
|
thread,
|
||||||
|
thread::JoinHandle,
|
||||||
|
};
|
||||||
|
|
||||||
|
use digest::{Digest, Output};
|
||||||
|
|
||||||
|
pub type DigestThread = (SyncSender<Arc<[u8]>>, JoinHandle<DigestResult>);
|
||||||
|
|
||||||
|
pub fn digest_thread<H>() -> DigestThread
|
||||||
|
where H: Hasher + Send + 'static {
|
||||||
|
let (tx, rx) = sync_channel::<Arc<[u8]>>(1);
|
||||||
|
let handle = thread::spawn(move || {
|
||||||
|
let mut hasher = H::new();
|
||||||
|
while let Ok(data) = rx.recv() {
|
||||||
|
hasher.update(data.as_ref());
|
||||||
|
}
|
||||||
|
hasher.finalize()
|
||||||
|
});
|
||||||
|
(tx, handle)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||||
|
pub enum DigestResult {
|
||||||
|
Crc32(u32),
|
||||||
|
Md5([u8; 16]),
|
||||||
|
Sha1([u8; 20]),
|
||||||
|
Xxh64(u64),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DigestResult {
|
||||||
|
pub fn name(&self) -> &'static str {
|
||||||
|
match self {
|
||||||
|
DigestResult::Crc32(_) => "CRC32",
|
||||||
|
DigestResult::Md5(_) => "MD5",
|
||||||
|
DigestResult::Sha1(_) => "SHA-1",
|
||||||
|
DigestResult::Xxh64(_) => "XXH64",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for DigestResult {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
match self {
|
||||||
|
DigestResult::Crc32(crc) => write!(f, "{:08x}", crc),
|
||||||
|
DigestResult::Md5(md5) => write!(f, "{:032x}", <Output<md5::Md5>>::from(*md5)),
|
||||||
|
DigestResult::Sha1(sha1) => write!(f, "{:040x}", <Output<sha1::Sha1>>::from(*sha1)),
|
||||||
|
DigestResult::Xxh64(xxh64) => write!(f, "{:016x}", xxh64),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub trait Hasher {
|
||||||
|
fn new() -> Self;
|
||||||
|
fn finalize(self) -> DigestResult;
|
||||||
|
fn update(&mut self, data: &[u8]);
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Hasher for md5::Md5 {
|
||||||
|
fn new() -> Self { Digest::new() }
|
||||||
|
|
||||||
|
fn finalize(self) -> DigestResult { DigestResult::Md5(Digest::finalize(self).into()) }
|
||||||
|
|
||||||
|
fn update(&mut self, data: &[u8]) { Digest::update(self, data) }
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Hasher for sha1::Sha1 {
|
||||||
|
fn new() -> Self { Digest::new() }
|
||||||
|
|
||||||
|
fn finalize(self) -> DigestResult { DigestResult::Sha1(Digest::finalize(self).into()) }
|
||||||
|
|
||||||
|
fn update(&mut self, data: &[u8]) { Digest::update(self, data) }
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Hasher for crc32fast::Hasher {
|
||||||
|
fn new() -> Self { crc32fast::Hasher::new() }
|
||||||
|
|
||||||
|
fn finalize(self) -> DigestResult { DigestResult::Crc32(crc32fast::Hasher::finalize(self)) }
|
||||||
|
|
||||||
|
fn update(&mut self, data: &[u8]) { crc32fast::Hasher::update(self, data) }
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Hasher for xxhash_rust::xxh64::Xxh64 {
|
||||||
|
fn new() -> Self { xxhash_rust::xxh64::Xxh64::new(0) }
|
||||||
|
|
||||||
|
fn finalize(self) -> DigestResult {
|
||||||
|
DigestResult::Xxh64(xxhash_rust::xxh64::Xxh64::digest(&self))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn update(&mut self, data: &[u8]) { xxhash_rust::xxh64::Xxh64::update(self, data) }
|
||||||
|
}
|
|
@ -1,4 +1,6 @@
|
||||||
mod argp_version;
|
mod argp_version;
|
||||||
|
mod digest;
|
||||||
|
mod redump;
|
||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
borrow::Cow,
|
borrow::Cow,
|
||||||
|
@ -12,23 +14,20 @@ use std::{
|
||||||
io::{BufWriter, Read, Write},
|
io::{BufWriter, Read, Write},
|
||||||
path::{Path, PathBuf},
|
path::{Path, PathBuf},
|
||||||
str::FromStr,
|
str::FromStr,
|
||||||
sync::{
|
sync::{mpsc::sync_channel, Arc},
|
||||||
mpsc::{sync_channel, SyncSender},
|
|
||||||
Arc,
|
|
||||||
},
|
|
||||||
thread,
|
thread,
|
||||||
thread::JoinHandle,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
use argp::{FromArgValue, FromArgs};
|
use argp::{FromArgValue, FromArgs};
|
||||||
use digest::{Digest, Output};
|
use digest::{digest_thread, DigestResult};
|
||||||
use enable_ansi_support::enable_ansi_support;
|
use enable_ansi_support::enable_ansi_support;
|
||||||
use indicatif::{ProgressBar, ProgressState, ProgressStyle};
|
use indicatif::{ProgressBar, ProgressState, ProgressStyle};
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
use nod::{
|
use nod::{
|
||||||
Disc, DiscHeader, Fst, Node, OpenOptions, PartitionBase, PartitionKind, PartitionMeta, Result,
|
Compression, Disc, DiscHeader, DiscMeta, Fst, Node, OpenOptions, PartitionBase, PartitionKind,
|
||||||
ResultContext, SECTOR_SIZE,
|
PartitionMeta, Result, ResultContext, SECTOR_SIZE,
|
||||||
};
|
};
|
||||||
|
use size::Size;
|
||||||
use supports_color::Stream;
|
use supports_color::Stream;
|
||||||
use tracing::level_filters::LevelFilter;
|
use tracing::level_filters::LevelFilter;
|
||||||
use tracing_subscriber::EnvFilter;
|
use tracing_subscriber::EnvFilter;
|
||||||
|
@ -107,12 +106,12 @@ struct ConvertArgs {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(FromArgs, Debug)]
|
#[derive(FromArgs, Debug)]
|
||||||
/// Verifies a disc image.
|
/// Verifies disc images.
|
||||||
#[argp(subcommand, name = "verify")]
|
#[argp(subcommand, name = "verify")]
|
||||||
struct VerifyArgs {
|
struct VerifyArgs {
|
||||||
#[argp(positional)]
|
#[argp(positional)]
|
||||||
/// path to disc image
|
/// path to disc image(s)
|
||||||
file: PathBuf,
|
file: Vec<PathBuf>,
|
||||||
#[argp(switch)]
|
#[argp(switch)]
|
||||||
/// enable MD5 hashing (slower)
|
/// enable MD5 hashing (slower)
|
||||||
md5: bool,
|
md5: bool,
|
||||||
|
@ -226,8 +225,24 @@ fn main() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn print_header(header: &DiscHeader) {
|
fn print_header(header: &DiscHeader, meta: &DiscMeta) {
|
||||||
println!("Name: {}", header.game_title_str());
|
println!("Format: {}", meta.format);
|
||||||
|
if meta.compression != Compression::None {
|
||||||
|
println!("Compression: {}", meta.compression);
|
||||||
|
}
|
||||||
|
if let Some(block_size) = meta.block_size {
|
||||||
|
println!("Block size: {}", Size::from_bytes(block_size));
|
||||||
|
}
|
||||||
|
println!("Lossless: {}", meta.lossless);
|
||||||
|
println!(
|
||||||
|
"Verification data: {}",
|
||||||
|
meta.crc32.is_some()
|
||||||
|
|| meta.md5.is_some()
|
||||||
|
|| meta.sha1.is_some()
|
||||||
|
|| meta.xxhash64.is_some()
|
||||||
|
);
|
||||||
|
println!();
|
||||||
|
println!("Title: {}", header.game_title_str());
|
||||||
println!("Game ID: {}", header.game_id_str());
|
println!("Game ID: {}", header.game_id_str());
|
||||||
println!("Disc {}, Revision {}", header.disc_num + 1, header.disc_version);
|
println!("Disc {}, Revision {}", header.disc_num + 1, header.disc_version);
|
||||||
if header.no_partition_hashes != 0 {
|
if header.no_partition_hashes != 0 {
|
||||||
|
@ -240,12 +255,12 @@ fn print_header(header: &DiscHeader) {
|
||||||
|
|
||||||
fn info(args: InfoArgs) -> Result<()> {
|
fn info(args: InfoArgs) -> Result<()> {
|
||||||
let disc = Disc::new_with_options(args.file, &OpenOptions {
|
let disc = Disc::new_with_options(args.file, &OpenOptions {
|
||||||
rebuild_hashes: false,
|
|
||||||
validate_hashes: false,
|
|
||||||
rebuild_encryption: false,
|
rebuild_encryption: false,
|
||||||
|
validate_hashes: false,
|
||||||
})?;
|
})?;
|
||||||
let header = disc.header();
|
let header = disc.header();
|
||||||
print_header(header);
|
let meta = disc.meta();
|
||||||
|
print_header(header, &meta);
|
||||||
|
|
||||||
if header.is_wii() {
|
if header.is_wii() {
|
||||||
for (idx, info) in disc.partitions().iter().enumerate() {
|
for (idx, info) in disc.partitions().iter().enumerate() {
|
||||||
|
@ -260,7 +275,7 @@ fn info(args: InfoArgs) -> Result<()> {
|
||||||
"\tData offset / size: {:#X} / {:#X} ({})",
|
"\tData offset / size: {:#X} / {:#X} ({})",
|
||||||
info.data_start_sector as u64 * SECTOR_SIZE as u64,
|
info.data_start_sector as u64 * SECTOR_SIZE as u64,
|
||||||
data_size,
|
data_size,
|
||||||
file_size::fit_4(data_size)
|
Size::from_bytes(data_size)
|
||||||
);
|
);
|
||||||
println!(
|
println!(
|
||||||
"\tTMD offset / size: {:#X} / {:#X}",
|
"\tTMD offset / size: {:#X} / {:#X}",
|
||||||
|
@ -278,26 +293,25 @@ fn info(args: InfoArgs) -> Result<()> {
|
||||||
info.header.h3_table_size()
|
info.header.h3_table_size()
|
||||||
);
|
);
|
||||||
|
|
||||||
// let mut partition = disc.open_partition(idx)?;
|
let mut partition = disc.open_partition(idx)?;
|
||||||
// let meta = partition.meta()?;
|
let meta = partition.meta()?;
|
||||||
// let header = meta.header();
|
let tmd = meta.tmd_header();
|
||||||
// let tmd = meta.tmd_header();
|
let title_id_str = if let Some(tmd) = tmd {
|
||||||
// let title_id_str = if let Some(tmd) = tmd {
|
format!(
|
||||||
// format!(
|
"{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}",
|
||||||
// "{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}",
|
tmd.title_id[0],
|
||||||
// tmd.title_id[0],
|
tmd.title_id[1],
|
||||||
// tmd.title_id[1],
|
tmd.title_id[2],
|
||||||
// tmd.title_id[2],
|
tmd.title_id[3],
|
||||||
// tmd.title_id[3],
|
tmd.title_id[4],
|
||||||
// tmd.title_id[4],
|
tmd.title_id[5],
|
||||||
// tmd.title_id[5],
|
tmd.title_id[6],
|
||||||
// tmd.title_id[6],
|
tmd.title_id[7]
|
||||||
// tmd.title_id[7]
|
)
|
||||||
// )
|
} else {
|
||||||
// } else {
|
"N/A".to_string()
|
||||||
let title_id_str = "N/A".to_string();
|
};
|
||||||
// };
|
println!("\tTitle: {}", info.disc_header.game_title_str());
|
||||||
println!("\tName: {}", info.disc_header.game_title_str());
|
|
||||||
println!("\tGame ID: {} ({})", info.disc_header.game_id_str(), title_id_str);
|
println!("\tGame ID: {} ({})", info.disc_header.game_id_str(), title_id_str);
|
||||||
println!(
|
println!(
|
||||||
"\tDisc {}, Revision {}",
|
"\tDisc {}, Revision {}",
|
||||||
|
@ -321,19 +335,24 @@ fn convert(args: ConvertArgs) -> Result<()> {
|
||||||
convert_and_verify(&args.file, Some(&args.out), args.md5)
|
convert_and_verify(&args.file, Some(&args.out), args.md5)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn verify(args: VerifyArgs) -> Result<()> { convert_and_verify(&args.file, None, args.md5) }
|
fn verify(args: VerifyArgs) -> Result<()> {
|
||||||
|
for file in &args.file {
|
||||||
|
convert_and_verify(file, None, args.md5)?;
|
||||||
|
println!();
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
fn convert_and_verify(in_file: &Path, out_file: Option<&Path>, md5: bool) -> Result<()> {
|
fn convert_and_verify(in_file: &Path, out_file: Option<&Path>, md5: bool) -> Result<()> {
|
||||||
println!("Loading {}", in_file.display());
|
println!("Loading {}", in_file.display());
|
||||||
let mut disc = Disc::new_with_options(in_file, &OpenOptions {
|
let mut disc = Disc::new_with_options(in_file, &OpenOptions {
|
||||||
rebuild_hashes: true,
|
|
||||||
validate_hashes: false,
|
|
||||||
rebuild_encryption: true,
|
rebuild_encryption: true,
|
||||||
|
validate_hashes: false,
|
||||||
})?;
|
})?;
|
||||||
let header = disc.header();
|
let header = disc.header();
|
||||||
print_header(header);
|
let meta = disc.meta();
|
||||||
|
print_header(header, &meta);
|
||||||
|
|
||||||
let meta = disc.meta()?;
|
|
||||||
let disc_size = disc.disc_size();
|
let disc_size = disc.disc_size();
|
||||||
|
|
||||||
let mut file = if let Some(out_file) = out_file {
|
let mut file = if let Some(out_file) = out_file {
|
||||||
|
@ -345,7 +364,11 @@ fn convert_and_verify(in_file: &Path, out_file: Option<&Path>, md5: bool) -> Res
|
||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
println!("\nHashing...");
|
if out_file.is_some() {
|
||||||
|
println!("\nConverting...");
|
||||||
|
} else {
|
||||||
|
println!("\nVerifying...");
|
||||||
|
}
|
||||||
let pb = ProgressBar::new(disc_size);
|
let pb = ProgressBar::new(disc_size);
|
||||||
pb.set_style(ProgressStyle::with_template("{spinner:.green} [{elapsed_precise}] [{wide_bar:.cyan/blue}] {bytes}/{total_bytes} ({bytes_per_sec}, {eta})")
|
pb.set_style(ProgressStyle::with_template("{spinner:.green} [{elapsed_precise}] [{wide_bar:.cyan/blue}] {bytes}/{total_bytes} ({bytes_per_sec}, {eta})")
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -394,7 +417,7 @@ fn convert_and_verify(in_file: &Path, out_file: Option<&Path>, md5: bool) -> Res
|
||||||
let mut buf = <u8>::new_box_slice_zeroed(BUFFER_SIZE);
|
let mut buf = <u8>::new_box_slice_zeroed(BUFFER_SIZE);
|
||||||
while total_read < disc_size {
|
while total_read < disc_size {
|
||||||
let read = min(BUFFER_SIZE as u64, disc_size - total_read) as usize;
|
let read = min(BUFFER_SIZE as u64, disc_size - total_read) as usize;
|
||||||
disc.reader.read_exact(&mut buf[..read]).with_context(|| {
|
disc.read_exact(&mut buf[..read]).with_context(|| {
|
||||||
format!("Reading {} bytes at disc offset {}", BUFFER_SIZE, total_read)
|
format!("Reading {} bytes at disc offset {}", BUFFER_SIZE, total_read)
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
|
@ -410,61 +433,65 @@ fn convert_and_verify(in_file: &Path, out_file: Option<&Path>, md5: bool) -> Res
|
||||||
|
|
||||||
println!();
|
println!();
|
||||||
if let Some(path) = out_file {
|
if let Some(path) = out_file {
|
||||||
println!("Wrote {} to {}", file_size::fit_4(total_read), path.display());
|
println!("Wrote {} to {}", Size::from_bytes(total_read), path.display());
|
||||||
}
|
}
|
||||||
|
|
||||||
println!();
|
println!();
|
||||||
|
let mut crc32 = None;
|
||||||
|
let mut md5 = None;
|
||||||
|
let mut sha1 = None;
|
||||||
|
let mut xxh64 = None;
|
||||||
for (tx, handle) in digest_threads {
|
for (tx, handle) in digest_threads {
|
||||||
drop(tx); // Close channel
|
drop(tx); // Close channel
|
||||||
match handle.join().unwrap() {
|
match handle.join().unwrap() {
|
||||||
DigestResult::Crc32(crc) => {
|
DigestResult::Crc32(v) => crc32 = Some(v),
|
||||||
print!("CRC32: {:08x}", crc);
|
DigestResult::Md5(v) => md5 = Some(v),
|
||||||
if let Some(expected_crc) = meta.crc32 {
|
DigestResult::Sha1(v) => sha1 = Some(v),
|
||||||
if expected_crc != crc {
|
DigestResult::Xxh64(v) => xxh64 = Some(v),
|
||||||
print!(" ❌ (expected: {:08x})", expected_crc);
|
|
||||||
} else {
|
|
||||||
print!(" ✅");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
println!();
|
|
||||||
}
|
|
||||||
DigestResult::Md5(md5) => {
|
|
||||||
print!("MD5: {:032x}", md5);
|
|
||||||
if let Some(expected_md5) = meta.md5 {
|
|
||||||
let expected_md5 = <Output<md5::Md5>>::from(expected_md5);
|
|
||||||
if expected_md5 != md5 {
|
|
||||||
print!(" ❌ (expected: {:032x})", expected_md5);
|
|
||||||
} else {
|
|
||||||
print!(" ✅");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
println!();
|
|
||||||
}
|
|
||||||
DigestResult::Sha1(sha1) => {
|
|
||||||
print!("SHA-1: {:040x}", sha1);
|
|
||||||
if let Some(expected_sha1) = meta.sha1 {
|
|
||||||
let expected_sha1 = <Output<sha1::Sha1>>::from(expected_sha1);
|
|
||||||
if expected_sha1 != sha1 {
|
|
||||||
print!(" ❌ (expected: {:040x})", expected_sha1);
|
|
||||||
} else {
|
|
||||||
print!(" ✅");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
println!();
|
|
||||||
}
|
|
||||||
DigestResult::Xxh64(xxh64) => {
|
|
||||||
print!("XXH64: {:016x}", xxh64);
|
|
||||||
if let Some(expected_xxh64) = meta.xxhash64 {
|
|
||||||
if expected_xxh64 != xxh64 {
|
|
||||||
print!(" ❌ (expected: {:016x})", expected_xxh64);
|
|
||||||
} else {
|
|
||||||
print!(" ✅");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
println!();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let redump_entry = if let (Some(crc32), Some(sha1)) = (crc32, sha1) {
|
||||||
|
redump::find_by_hashes(crc32, sha1.into())
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
let expected_crc32 = meta.crc32.or(redump_entry.as_ref().map(|e| e.crc32));
|
||||||
|
let expected_md5 = meta.md5.or(redump_entry.as_ref().map(|e| e.md5));
|
||||||
|
let expected_sha1 = meta.sha1.or(redump_entry.as_ref().map(|e| e.sha1));
|
||||||
|
let expected_xxh64 = meta.xxhash64;
|
||||||
|
|
||||||
|
fn print_digest(value: DigestResult, expected: Option<DigestResult>) {
|
||||||
|
print!("{:<6}: ", value.name());
|
||||||
|
if let Some(expected) = expected {
|
||||||
|
if expected != value {
|
||||||
|
print!("{} ❌ (expected: {})", value, expected);
|
||||||
|
} else {
|
||||||
|
print!("{} ✅", value);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
print!("{}", value);
|
||||||
|
}
|
||||||
|
println!();
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(entry) = &redump_entry {
|
||||||
|
println!("Redump: {} ✅", entry.name);
|
||||||
|
} else {
|
||||||
|
println!("Redump: Not found ❌");
|
||||||
|
}
|
||||||
|
if let Some(crc32) = crc32 {
|
||||||
|
print_digest(DigestResult::Crc32(crc32), expected_crc32.map(DigestResult::Crc32));
|
||||||
|
}
|
||||||
|
if let Some(md5) = md5 {
|
||||||
|
print_digest(DigestResult::Md5(md5), expected_md5.map(DigestResult::Md5));
|
||||||
|
}
|
||||||
|
if let Some(sha1) = sha1 {
|
||||||
|
print_digest(DigestResult::Sha1(sha1), expected_sha1.map(DigestResult::Sha1));
|
||||||
|
}
|
||||||
|
if let Some(xxh64) = xxh64 {
|
||||||
|
print_digest(DigestResult::Xxh64(xxh64), expected_xxh64.map(DigestResult::Xxh64));
|
||||||
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -490,42 +517,41 @@ fn extract(args: ExtractArgs) -> Result<()> {
|
||||||
output_dir = args.file.with_extension("");
|
output_dir = args.file.with_extension("");
|
||||||
}
|
}
|
||||||
let disc = Disc::new_with_options(&args.file, &OpenOptions {
|
let disc = Disc::new_with_options(&args.file, &OpenOptions {
|
||||||
rebuild_hashes: args.validate,
|
|
||||||
validate_hashes: args.validate,
|
|
||||||
rebuild_encryption: false,
|
rebuild_encryption: false,
|
||||||
|
validate_hashes: args.validate,
|
||||||
})?;
|
})?;
|
||||||
let is_wii = disc.header().is_wii();
|
let is_wii = disc.header().is_wii();
|
||||||
// let mut partition = disc.open_partition_kind(PartitionKind::Data)?;
|
let mut partition = disc.open_partition_kind(PartitionKind::Data)?;
|
||||||
// let meta = partition.meta()?;
|
let meta = partition.meta()?;
|
||||||
// extract_sys_files(meta.as_ref(), &output_dir.join("sys"), args.quiet)?;
|
extract_sys_files(meta.as_ref(), &output_dir.join("sys"), args.quiet)?;
|
||||||
//
|
|
||||||
// // Extract FST
|
// Extract FST
|
||||||
// let files_dir = output_dir.join("files");
|
let files_dir = output_dir.join("files");
|
||||||
// let fst = Fst::new(&meta.raw_fst)?;
|
let fst = Fst::new(&meta.raw_fst)?;
|
||||||
// let mut path_segments = Vec::<(Cow<str>, usize)>::new();
|
let mut path_segments = Vec::<(Cow<str>, usize)>::new();
|
||||||
// for (idx, node, name) in fst.iter() {
|
for (idx, node, name) in fst.iter() {
|
||||||
// // Remove ended path segments
|
// Remove ended path segments
|
||||||
// let mut new_size = 0;
|
let mut new_size = 0;
|
||||||
// for (_, end) in path_segments.iter() {
|
for (_, end) in path_segments.iter() {
|
||||||
// if *end == idx {
|
if *end == idx {
|
||||||
// break;
|
break;
|
||||||
// }
|
}
|
||||||
// new_size += 1;
|
new_size += 1;
|
||||||
// }
|
}
|
||||||
// path_segments.truncate(new_size);
|
path_segments.truncate(new_size);
|
||||||
//
|
|
||||||
// // Add the new path segment
|
// Add the new path segment
|
||||||
// let end = if node.is_dir() { node.length(false) as usize } else { idx + 1 };
|
let end = if node.is_dir() { node.length(false) as usize } else { idx + 1 };
|
||||||
// path_segments.push((name?, end));
|
path_segments.push((name?, end));
|
||||||
//
|
|
||||||
// let path = path_segments.iter().map(|(name, _)| name.as_ref()).join("/");
|
let path = path_segments.iter().map(|(name, _)| name.as_ref()).join("/");
|
||||||
// if node.is_dir() {
|
if node.is_dir() {
|
||||||
// fs::create_dir_all(files_dir.join(&path))
|
fs::create_dir_all(files_dir.join(&path))
|
||||||
// .with_context(|| format!("Creating directory {}", path))?;
|
.with_context(|| format!("Creating directory {}", path))?;
|
||||||
// } else {
|
} else {
|
||||||
// extract_node(node, partition.as_mut(), &files_dir, &path, is_wii, args.quiet)?;
|
extract_node(node, partition.as_mut(), &files_dir, &path, is_wii, args.quiet)?;
|
||||||
// }
|
}
|
||||||
// }
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -542,11 +568,7 @@ fn extract_sys_files(data: &PartitionMeta, out_dir: &Path, quiet: bool) -> Resul
|
||||||
|
|
||||||
fn extract_file(bytes: &[u8], out_path: &Path, quiet: bool) -> Result<()> {
|
fn extract_file(bytes: &[u8], out_path: &Path, quiet: bool) -> Result<()> {
|
||||||
if !quiet {
|
if !quiet {
|
||||||
println!(
|
println!("Extracting {} (size: {})", out_path.display(), Size::from_bytes(bytes.len()));
|
||||||
"Extracting {} (size: {})",
|
|
||||||
out_path.display(),
|
|
||||||
file_size::fit_4(bytes.len() as u64)
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
fs::write(out_path, bytes).with_context(|| format!("Writing file {}", out_path.display()))?;
|
fs::write(out_path, bytes).with_context(|| format!("Writing file {}", out_path.display()))?;
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -565,7 +587,7 @@ fn extract_node(
|
||||||
println!(
|
println!(
|
||||||
"Extracting {} (size: {})",
|
"Extracting {} (size: {})",
|
||||||
file_path.display(),
|
file_path.display(),
|
||||||
file_size::fit_4(node.length(is_wii))
|
Size::from_bytes(node.length(is_wii))
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
let file = File::create(&file_path)
|
let file = File::create(&file_path)
|
||||||
|
@ -583,65 +605,3 @@ fn extract_node(
|
||||||
w.flush().with_context(|| format!("Flushing file {}", file_path.display()))?;
|
w.flush().with_context(|| format!("Flushing file {}", file_path.display()))?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
type DigestThread = (SyncSender<Arc<[u8]>>, JoinHandle<DigestResult>);
|
|
||||||
|
|
||||||
fn digest_thread<H>() -> DigestThread
|
|
||||||
where H: Hasher + Send + 'static {
|
|
||||||
let (tx, rx) = sync_channel::<Arc<[u8]>>(1);
|
|
||||||
let handle = thread::spawn(move || {
|
|
||||||
let mut hasher = H::new();
|
|
||||||
while let Ok(data) = rx.recv() {
|
|
||||||
hasher.update(data.as_ref());
|
|
||||||
}
|
|
||||||
hasher.finalize()
|
|
||||||
});
|
|
||||||
(tx, handle)
|
|
||||||
}
|
|
||||||
|
|
||||||
enum DigestResult {
|
|
||||||
Crc32(u32),
|
|
||||||
Md5(Output<md5::Md5>),
|
|
||||||
Sha1(Output<sha1::Sha1>),
|
|
||||||
Xxh64(u64),
|
|
||||||
}
|
|
||||||
|
|
||||||
trait Hasher {
|
|
||||||
fn new() -> Self;
|
|
||||||
fn finalize(self) -> DigestResult;
|
|
||||||
fn update(&mut self, data: &[u8]);
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Hasher for md5::Md5 {
|
|
||||||
fn new() -> Self { Digest::new() }
|
|
||||||
|
|
||||||
fn finalize(self) -> DigestResult { DigestResult::Md5(Digest::finalize(self)) }
|
|
||||||
|
|
||||||
fn update(&mut self, data: &[u8]) { Digest::update(self, data) }
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Hasher for sha1::Sha1 {
|
|
||||||
fn new() -> Self { Digest::new() }
|
|
||||||
|
|
||||||
fn finalize(self) -> DigestResult { DigestResult::Sha1(Digest::finalize(self)) }
|
|
||||||
|
|
||||||
fn update(&mut self, data: &[u8]) { Digest::update(self, data) }
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Hasher for crc32fast::Hasher {
|
|
||||||
fn new() -> Self { crc32fast::Hasher::new() }
|
|
||||||
|
|
||||||
fn finalize(self) -> DigestResult { DigestResult::Crc32(crc32fast::Hasher::finalize(self)) }
|
|
||||||
|
|
||||||
fn update(&mut self, data: &[u8]) { crc32fast::Hasher::update(self, data) }
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Hasher for xxhash_rust::xxh64::Xxh64 {
|
|
||||||
fn new() -> Self { xxhash_rust::xxh64::Xxh64::new(0) }
|
|
||||||
|
|
||||||
fn finalize(self) -> DigestResult {
|
|
||||||
DigestResult::Xxh64(xxhash_rust::xxh64::Xxh64::digest(&self))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn update(&mut self, data: &[u8]) { xxhash_rust::xxh64::Xxh64::update(self, data) }
|
|
||||||
}
|
|
|
@ -0,0 +1,70 @@
|
||||||
|
use std::{mem::size_of, str};
|
||||||
|
|
||||||
|
use nod::{array_ref, SECTOR_SIZE};
|
||||||
|
use zerocopy::{FromBytes, FromZeroes};
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct GameResult {
|
||||||
|
pub name: &'static str,
|
||||||
|
pub crc32: u32,
|
||||||
|
pub md5: [u8; 16],
|
||||||
|
pub sha1: [u8; 20],
|
||||||
|
pub size: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn find_by_hashes(crc32: u32, sha1: [u8; 20]) -> Option<GameResult> {
|
||||||
|
let header: &Header = Header::ref_from_prefix(&DATA.0).unwrap();
|
||||||
|
assert_eq!(header.entry_size as usize, size_of::<GameEntry>());
|
||||||
|
|
||||||
|
let entries_size = header.entry_count as usize * size_of::<GameEntry>();
|
||||||
|
let entries: &[GameEntry] =
|
||||||
|
GameEntry::slice_from(&DATA.0[size_of::<Header>()..size_of::<Header>() + entries_size])
|
||||||
|
.unwrap();
|
||||||
|
let string_table: &[u8] = &DATA.0[size_of::<Header>() + entries_size..];
|
||||||
|
|
||||||
|
// Binary search by CRC32
|
||||||
|
let index = entries.binary_search_by_key(&crc32, |entry| entry.crc32).ok()?;
|
||||||
|
|
||||||
|
// Verify SHA-1
|
||||||
|
let entry = &entries[index];
|
||||||
|
if entry.sha1 != sha1 {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse the entry
|
||||||
|
let offset = entry.string_table_offset as usize;
|
||||||
|
let name_size = u32::from_ne_bytes(*array_ref![string_table, offset, 4]) as usize;
|
||||||
|
let name = str::from_utf8(&string_table[offset + 4..offset + 4 + name_size]).unwrap();
|
||||||
|
Some(GameResult {
|
||||||
|
name,
|
||||||
|
crc32: entry.crc32,
|
||||||
|
md5: entry.md5,
|
||||||
|
sha1: entry.sha1,
|
||||||
|
size: entry.sectors as u64 * SECTOR_SIZE as u64,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[repr(C, align(4))]
|
||||||
|
struct Aligned<T: ?Sized>(T);
|
||||||
|
|
||||||
|
const DATA: &'static Aligned<[u8]> =
|
||||||
|
&Aligned(*include_bytes!(concat!(env!("OUT_DIR"), "/parsed-dats.bin")));
|
||||||
|
|
||||||
|
// Keep in sync with build.rs
|
||||||
|
#[derive(Clone, Debug, FromBytes, FromZeroes)]
|
||||||
|
#[repr(C, align(4))]
|
||||||
|
struct Header {
|
||||||
|
entry_count: u32,
|
||||||
|
entry_size: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Keep in sync with build.rs
|
||||||
|
#[derive(Clone, Debug, FromBytes, FromZeroes)]
|
||||||
|
#[repr(C, align(4))]
|
||||||
|
struct GameEntry {
|
||||||
|
crc32: u32,
|
||||||
|
string_table_offset: u32,
|
||||||
|
sectors: u32,
|
||||||
|
md5: [u8; 16],
|
||||||
|
sha1: [u8; 20],
|
||||||
|
}
|
242
src/disc/gcn.rs
242
src/disc/gcn.rs
|
@ -1,242 +0,0 @@
|
||||||
use std::{
|
|
||||||
io,
|
|
||||||
io::{Read, Seek, SeekFrom},
|
|
||||||
mem::size_of,
|
|
||||||
};
|
|
||||||
|
|
||||||
use zerocopy::FromBytes;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
array_ref,
|
|
||||||
disc::{
|
|
||||||
AppLoaderHeader, DiscBase, DiscHeader, DiscIO, DolHeader, PartitionBase, PartitionHeader,
|
|
||||||
PartitionInfo, PartitionKind, PartitionMeta, BI2_SIZE, BOOT_SIZE, MINI_DVD_SIZE,
|
|
||||||
SECTOR_SIZE,
|
|
||||||
},
|
|
||||||
fst::{Node, NodeKind},
|
|
||||||
streams::{ReadStream, SharedWindowedReadStream},
|
|
||||||
util::{
|
|
||||||
div_rem,
|
|
||||||
read::{read_box, read_box_slice, read_vec},
|
|
||||||
},
|
|
||||||
Error, OpenOptions, Result, ResultContext,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub(crate) struct DiscGCN {
|
|
||||||
pub(crate) header: DiscHeader,
|
|
||||||
pub(crate) disc_size: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DiscGCN {
|
|
||||||
pub(crate) fn new(
|
|
||||||
_stream: &mut dyn ReadStream,
|
|
||||||
header: DiscHeader,
|
|
||||||
disc_size: Option<u64>,
|
|
||||||
) -> Result<DiscGCN> {
|
|
||||||
Ok(DiscGCN { header, disc_size: disc_size.unwrap_or(MINI_DVD_SIZE) })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn open_partition<'a>(disc_io: &'a dyn DiscIO) -> Result<Box<dyn PartitionBase + 'a>> {
|
|
||||||
let stream = disc_io.open()?;
|
|
||||||
Ok(Box::new(PartitionGC { stream, offset: 0, cur_block: u32::MAX, buf: [0; SECTOR_SIZE] }))
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DiscBase for DiscGCN {
|
|
||||||
fn header(&self) -> &DiscHeader { &self.header }
|
|
||||||
|
|
||||||
fn partitions(&self) -> Vec<PartitionInfo> {
|
|
||||||
vec![PartitionInfo {
|
|
||||||
group_index: 0,
|
|
||||||
part_index: 0,
|
|
||||||
part_offset: 0,
|
|
||||||
kind: PartitionKind::Data,
|
|
||||||
data_offset: 0,
|
|
||||||
data_size: self.disc_size,
|
|
||||||
header: None,
|
|
||||||
lfg_seed: *array_ref!(self.header.game_id, 0, 4),
|
|
||||||
// junk_start: self.junk_start,
|
|
||||||
}]
|
|
||||||
}
|
|
||||||
|
|
||||||
fn open_partition<'a>(
|
|
||||||
&self,
|
|
||||||
disc_io: &'a dyn DiscIO,
|
|
||||||
index: usize,
|
|
||||||
_options: &OpenOptions,
|
|
||||||
) -> Result<Box<dyn PartitionBase + 'a>> {
|
|
||||||
if index != 0 {
|
|
||||||
return Err(Error::DiscFormat(format!(
|
|
||||||
"Invalid partition index {} for GameCube disc",
|
|
||||||
index
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
open_partition(disc_io)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn open_partition_kind<'a>(
|
|
||||||
&self,
|
|
||||||
disc_io: &'a dyn DiscIO,
|
|
||||||
part_type: PartitionKind,
|
|
||||||
_options: &OpenOptions,
|
|
||||||
) -> Result<Box<dyn PartitionBase + 'a>> {
|
|
||||||
if part_type != PartitionKind::Data {
|
|
||||||
return Err(Error::DiscFormat(format!(
|
|
||||||
"Invalid partition type {:?} for GameCube disc",
|
|
||||||
part_type
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
open_partition(disc_io)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn disc_size(&self) -> u64 { self.disc_size }
|
|
||||||
}
|
|
||||||
|
|
||||||
struct PartitionGC<'a> {
|
|
||||||
stream: Box<dyn ReadStream + 'a>,
|
|
||||||
offset: u64,
|
|
||||||
cur_block: u32,
|
|
||||||
buf: [u8; SECTOR_SIZE],
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> Read for PartitionGC<'a> {
|
|
||||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
|
||||||
let (block, block_offset) = div_rem(self.offset, SECTOR_SIZE as u64);
|
|
||||||
let mut block = block as u32;
|
|
||||||
let mut block_offset = block_offset as usize;
|
|
||||||
|
|
||||||
let mut rem = buf.len();
|
|
||||||
let mut read: usize = 0;
|
|
||||||
|
|
||||||
while rem > 0 {
|
|
||||||
if block != self.cur_block {
|
|
||||||
self.stream.read_exact(&mut self.buf)?;
|
|
||||||
self.cur_block = block;
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut cache_size = rem;
|
|
||||||
if cache_size + block_offset > SECTOR_SIZE {
|
|
||||||
cache_size = SECTOR_SIZE - block_offset;
|
|
||||||
}
|
|
||||||
|
|
||||||
buf[read..read + cache_size]
|
|
||||||
.copy_from_slice(&self.buf[block_offset..block_offset + cache_size]);
|
|
||||||
read += cache_size;
|
|
||||||
rem -= cache_size;
|
|
||||||
block_offset = 0;
|
|
||||||
block += 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
self.offset += buf.len() as u64;
|
|
||||||
Ok(buf.len())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> Seek for PartitionGC<'a> {
|
|
||||||
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
|
|
||||||
self.offset = match pos {
|
|
||||||
SeekFrom::Start(v) => v,
|
|
||||||
SeekFrom::End(_) => {
|
|
||||||
return Err(io::Error::new(
|
|
||||||
io::ErrorKind::Unsupported,
|
|
||||||
"PartitionGC: SeekFrom::End is not supported",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
SeekFrom::Current(v) => self.offset.saturating_add_signed(v),
|
|
||||||
};
|
|
||||||
let block = self.offset / SECTOR_SIZE as u64;
|
|
||||||
if block as u32 != self.cur_block {
|
|
||||||
self.stream.seek(SeekFrom::Start(block * SECTOR_SIZE as u64))?;
|
|
||||||
self.cur_block = u32::MAX;
|
|
||||||
}
|
|
||||||
Ok(self.offset)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn stream_position(&mut self) -> io::Result<u64> { Ok(self.offset) }
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> PartitionBase for PartitionGC<'a> {
|
|
||||||
fn meta(&mut self) -> Result<Box<PartitionMeta>> {
|
|
||||||
self.seek(SeekFrom::Start(0)).context("Seeking to partition header")?;
|
|
||||||
read_part_header(self, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn open_file(&mut self, node: &Node) -> io::Result<SharedWindowedReadStream> {
|
|
||||||
assert_eq!(node.kind(), NodeKind::File);
|
|
||||||
self.new_window(node.offset(false), node.length(false))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn ideal_buffer_size(&self) -> usize { SECTOR_SIZE }
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn read_part_header<R>(reader: &mut R, is_wii: bool) -> Result<Box<PartitionMeta>>
|
|
||||||
where R: Read + Seek + ?Sized {
|
|
||||||
// boot.bin
|
|
||||||
let raw_boot: Box<[u8; BOOT_SIZE]> = read_box(reader).context("Reading boot.bin")?;
|
|
||||||
let partition_header = PartitionHeader::ref_from(&raw_boot[size_of::<DiscHeader>()..]).unwrap();
|
|
||||||
|
|
||||||
// bi2.bin
|
|
||||||
let raw_bi2: Box<[u8; BI2_SIZE]> = read_box(reader).context("Reading bi2.bin")?;
|
|
||||||
|
|
||||||
// apploader.bin
|
|
||||||
let mut raw_apploader: Vec<u8> =
|
|
||||||
read_vec(reader, size_of::<AppLoaderHeader>()).context("Reading apploader header")?;
|
|
||||||
let apploader_header = AppLoaderHeader::ref_from(raw_apploader.as_slice()).unwrap();
|
|
||||||
raw_apploader.resize(
|
|
||||||
size_of::<AppLoaderHeader>()
|
|
||||||
+ apploader_header.size.get() as usize
|
|
||||||
+ apploader_header.trailer_size.get() as usize,
|
|
||||||
0,
|
|
||||||
);
|
|
||||||
reader
|
|
||||||
.read_exact(&mut raw_apploader[size_of::<AppLoaderHeader>()..])
|
|
||||||
.context("Reading apploader")?;
|
|
||||||
|
|
||||||
// fst.bin
|
|
||||||
reader
|
|
||||||
.seek(SeekFrom::Start(partition_header.fst_off(is_wii)))
|
|
||||||
.context("Seeking to FST offset")?;
|
|
||||||
let raw_fst: Box<[u8]> = read_box_slice(reader, partition_header.fst_sz(is_wii) as usize)
|
|
||||||
.with_context(|| {
|
|
||||||
format!(
|
|
||||||
"Reading partition FST (offset {}, size {})",
|
|
||||||
partition_header.fst_off, partition_header.fst_sz
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
// main.dol
|
|
||||||
reader
|
|
||||||
.seek(SeekFrom::Start(partition_header.dol_off(is_wii)))
|
|
||||||
.context("Seeking to DOL offset")?;
|
|
||||||
let mut raw_dol: Vec<u8> =
|
|
||||||
read_vec(reader, size_of::<DolHeader>()).context("Reading DOL header")?;
|
|
||||||
let dol_header = DolHeader::ref_from(raw_dol.as_slice()).unwrap();
|
|
||||||
let dol_size = dol_header
|
|
||||||
.text_offs
|
|
||||||
.iter()
|
|
||||||
.zip(&dol_header.text_sizes)
|
|
||||||
.map(|(offs, size)| offs.get() + size.get())
|
|
||||||
.chain(
|
|
||||||
dol_header
|
|
||||||
.data_offs
|
|
||||||
.iter()
|
|
||||||
.zip(&dol_header.data_sizes)
|
|
||||||
.map(|(offs, size)| offs.get() + size.get()),
|
|
||||||
)
|
|
||||||
.max()
|
|
||||||
.unwrap_or(size_of::<DolHeader>() as u32);
|
|
||||||
raw_dol.resize(dol_size as usize, 0);
|
|
||||||
reader.read_exact(&mut raw_dol[size_of::<DolHeader>()..]).context("Reading DOL")?;
|
|
||||||
|
|
||||||
Ok(Box::new(PartitionMeta {
|
|
||||||
raw_boot,
|
|
||||||
raw_bi2,
|
|
||||||
raw_apploader: raw_apploader.into_boxed_slice(),
|
|
||||||
raw_fst,
|
|
||||||
raw_dol: raw_dol.into_boxed_slice(),
|
|
||||||
raw_ticket: None,
|
|
||||||
raw_tmd: None,
|
|
||||||
raw_cert_chain: None,
|
|
||||||
raw_h3_table: None,
|
|
||||||
}))
|
|
||||||
}
|
|
|
@ -1,177 +0,0 @@
|
||||||
use std::{
|
|
||||||
cmp::min,
|
|
||||||
io,
|
|
||||||
io::{Read, Seek, SeekFrom},
|
|
||||||
};
|
|
||||||
|
|
||||||
use sha1::{Digest, Sha1};
|
|
||||||
use zerocopy::FromZeroes;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
array_ref,
|
|
||||||
disc::wii::{as_digest, HASHES_SIZE, SECTOR_DATA_SIZE},
|
|
||||||
io::block::{BPartitionInfo, Block, BlockIO},
|
|
||||||
util::div_rem,
|
|
||||||
Result, SECTOR_SIZE,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub struct PartitionReader {
|
|
||||||
io: Box<dyn BlockIO>,
|
|
||||||
partition: BPartitionInfo,
|
|
||||||
block: Option<Block>,
|
|
||||||
block_buf: Box<[u8]>,
|
|
||||||
block_idx: u32,
|
|
||||||
sector_buf: Box<[u8; SECTOR_SIZE]>,
|
|
||||||
sector: u32,
|
|
||||||
pos: u64,
|
|
||||||
verify: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Clone for PartitionReader {
|
|
||||||
fn clone(&self) -> Self {
|
|
||||||
Self {
|
|
||||||
io: self.io.clone(),
|
|
||||||
partition: self.partition.clone(),
|
|
||||||
block: None,
|
|
||||||
block_buf: <u8>::new_box_slice_zeroed(self.block_buf.len()),
|
|
||||||
block_idx: u32::MAX,
|
|
||||||
sector_buf: <[u8; SECTOR_SIZE]>::new_box_zeroed(),
|
|
||||||
sector: u32::MAX,
|
|
||||||
pos: 0,
|
|
||||||
verify: self.verify,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PartitionReader {
|
|
||||||
pub fn new(inner: Box<dyn BlockIO>, partition: &BPartitionInfo) -> Result<Self> {
|
|
||||||
let block_size = inner.block_size();
|
|
||||||
Ok(Self {
|
|
||||||
io: inner,
|
|
||||||
partition: partition.clone(),
|
|
||||||
block: None,
|
|
||||||
block_buf: <u8>::new_box_slice_zeroed(block_size as usize),
|
|
||||||
block_idx: u32::MAX,
|
|
||||||
sector_buf: <[u8; SECTOR_SIZE]>::new_box_zeroed(),
|
|
||||||
sector: u32::MAX,
|
|
||||||
pos: 0,
|
|
||||||
verify: false,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Read for PartitionReader {
|
|
||||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
|
||||||
let partition_sector = (self.pos / SECTOR_DATA_SIZE as u64) as u32;
|
|
||||||
let sector = self.partition.data_start_sector + partition_sector;
|
|
||||||
if sector >= self.partition.data_end_sector {
|
|
||||||
return Ok(0);
|
|
||||||
}
|
|
||||||
let block_idx = (sector as u64 * SECTOR_SIZE as u64 / self.block_buf.len() as u64) as u32;
|
|
||||||
|
|
||||||
// Read new block if necessary
|
|
||||||
if block_idx != self.block_idx {
|
|
||||||
self.block =
|
|
||||||
self.io.read_block(self.block_buf.as_mut(), block_idx, Some(&self.partition))?;
|
|
||||||
self.block_idx = block_idx;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decrypt sector if necessary
|
|
||||||
if sector != self.sector {
|
|
||||||
let Some(block) = &self.block else {
|
|
||||||
return Ok(0);
|
|
||||||
};
|
|
||||||
block.decrypt(
|
|
||||||
&mut self.sector_buf,
|
|
||||||
self.block_buf.as_ref(),
|
|
||||||
block_idx,
|
|
||||||
sector,
|
|
||||||
&self.partition,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
if self.verify {
|
|
||||||
verify_hashes(&self.sector_buf, sector)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
self.sector = sector;
|
|
||||||
}
|
|
||||||
|
|
||||||
let offset = (self.pos % SECTOR_DATA_SIZE as u64) as usize;
|
|
||||||
let len = min(buf.len(), SECTOR_DATA_SIZE - offset);
|
|
||||||
buf[..len]
|
|
||||||
.copy_from_slice(&self.sector_buf[HASHES_SIZE + offset..HASHES_SIZE + offset + len]);
|
|
||||||
self.pos += len as u64;
|
|
||||||
Ok(len)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Seek for PartitionReader {
|
|
||||||
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
|
|
||||||
self.pos = match pos {
|
|
||||||
SeekFrom::Start(v) => v,
|
|
||||||
SeekFrom::End(_) => {
|
|
||||||
return Err(io::Error::new(
|
|
||||||
io::ErrorKind::Unsupported,
|
|
||||||
"PartitionReader: SeekFrom::End is not supported".to_string(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
SeekFrom::Current(v) => self.pos.saturating_add_signed(v),
|
|
||||||
};
|
|
||||||
Ok(self.pos)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn verify_hashes(buf: &[u8; SECTOR_SIZE], sector: u32) -> io::Result<()> {
|
|
||||||
let (mut group, sub_group) = div_rem(sector as usize, 8);
|
|
||||||
group %= 8;
|
|
||||||
|
|
||||||
// H0 hashes
|
|
||||||
for i in 0..31 {
|
|
||||||
let mut hash = Sha1::new();
|
|
||||||
hash.update(array_ref![buf, (i + 1) * 0x400, 0x400]);
|
|
||||||
let expected = as_digest(array_ref![buf, i * 20, 20]);
|
|
||||||
let output = hash.finalize();
|
|
||||||
if output != expected {
|
|
||||||
return Err(io::Error::new(
|
|
||||||
io::ErrorKind::InvalidData,
|
|
||||||
format!("Invalid H0 hash! (block {:?}) {:x}\n\texpected {:x}", i, output, expected),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// H1 hash
|
|
||||||
{
|
|
||||||
let mut hash = Sha1::new();
|
|
||||||
hash.update(array_ref![buf, 0, 0x26C]);
|
|
||||||
let expected = as_digest(array_ref![buf, 0x280 + sub_group * 20, 20]);
|
|
||||||
let output = hash.finalize();
|
|
||||||
if output != expected {
|
|
||||||
return Err(io::Error::new(
|
|
||||||
io::ErrorKind::InvalidData,
|
|
||||||
format!(
|
|
||||||
"Invalid H1 hash! (subgroup {:?}) {:x}\n\texpected {:x}",
|
|
||||||
sub_group, output, expected
|
|
||||||
),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// H2 hash
|
|
||||||
{
|
|
||||||
let mut hash = Sha1::new();
|
|
||||||
hash.update(array_ref![buf, 0x280, 0xA0]);
|
|
||||||
let expected = as_digest(array_ref![buf, 0x340 + group * 20, 20]);
|
|
||||||
let output = hash.finalize();
|
|
||||||
if output != expected {
|
|
||||||
return Err(io::Error::new(
|
|
||||||
io::ErrorKind::InvalidData,
|
|
||||||
format!(
|
|
||||||
"Invalid H2 hash! (group {:?}) {:x}\n\texpected {:x}",
|
|
||||||
group, output, expected
|
|
||||||
),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// TODO H3 hash
|
|
||||||
Ok(())
|
|
||||||
}
|
|
638
src/disc/wii.rs
638
src/disc/wii.rs
|
@ -1,638 +0,0 @@
|
||||||
use std::{
|
|
||||||
cmp::min,
|
|
||||||
ffi::CStr,
|
|
||||||
io,
|
|
||||||
io::{Read, Seek, SeekFrom},
|
|
||||||
mem::size_of,
|
|
||||||
};
|
|
||||||
|
|
||||||
use sha1::{digest, Digest, Sha1};
|
|
||||||
use zerocopy::{big_endian::*, AsBytes, FromBytes, FromZeroes};
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
array_ref,
|
|
||||||
disc::{
|
|
||||||
gcn::read_part_header, DiscBase, DiscHeader, DiscIO, PartitionBase, PartitionInfo,
|
|
||||||
PartitionKind, PartitionMeta, DL_DVD_SIZE, MINI_DVD_SIZE, SECTOR_SIZE, SL_DVD_SIZE,
|
|
||||||
},
|
|
||||||
fst::{Node, NodeKind},
|
|
||||||
io::{aes_decrypt, KeyBytes},
|
|
||||||
static_assert,
|
|
||||||
streams::{ReadStream, SharedWindowedReadStream},
|
|
||||||
util::{
|
|
||||||
div_rem,
|
|
||||||
read::{read_from, read_vec},
|
|
||||||
},
|
|
||||||
Error, OpenOptions, PartitionHeader, Result, ResultContext,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub(crate) const HASHES_SIZE: usize = 0x400;
|
|
||||||
pub(crate) const SECTOR_DATA_SIZE: usize = SECTOR_SIZE - HASHES_SIZE; // 0x7C00
|
|
||||||
|
|
||||||
// ppki (Retail)
|
|
||||||
const RVL_CERT_ISSUER_PPKI_TICKET: &str = "Root-CA00000001-XS00000003";
|
|
||||||
#[rustfmt::skip]
|
|
||||||
const RETAIL_COMMON_KEYS: [KeyBytes; 3] = [
|
|
||||||
/* RVL_KEY_RETAIL */
|
|
||||||
[0xeb, 0xe4, 0x2a, 0x22, 0x5e, 0x85, 0x93, 0xe4, 0x48, 0xd9, 0xc5, 0x45, 0x73, 0x81, 0xaa, 0xf7],
|
|
||||||
/* RVL_KEY_KOREAN */
|
|
||||||
[0x63, 0xb8, 0x2b, 0xb4, 0xf4, 0x61, 0x4e, 0x2e, 0x13, 0xf2, 0xfe, 0xfb, 0xba, 0x4c, 0x9b, 0x7e],
|
|
||||||
/* vWii_KEY_RETAIL */
|
|
||||||
[0x30, 0xbf, 0xc7, 0x6e, 0x7c, 0x19, 0xaf, 0xbb, 0x23, 0x16, 0x33, 0x30, 0xce, 0xd7, 0xc2, 0x8d],
|
|
||||||
];
|
|
||||||
|
|
||||||
// dpki (Debug)
|
|
||||||
const RVL_CERT_ISSUER_DPKI_TICKET: &str = "Root-CA00000002-XS00000006";
|
|
||||||
#[rustfmt::skip]
|
|
||||||
const DEBUG_COMMON_KEYS: [KeyBytes; 3] = [
|
|
||||||
/* RVL_KEY_DEBUG */
|
|
||||||
[0xa1, 0x60, 0x4a, 0x6a, 0x71, 0x23, 0xb5, 0x29, 0xae, 0x8b, 0xec, 0x32, 0xc8, 0x16, 0xfc, 0xaa],
|
|
||||||
/* RVL_KEY_KOREAN_DEBUG */
|
|
||||||
[0x67, 0x45, 0x8b, 0x6b, 0xc6, 0x23, 0x7b, 0x32, 0x69, 0x98, 0x3c, 0x64, 0x73, 0x48, 0x33, 0x66],
|
|
||||||
/* vWii_KEY_DEBUG */
|
|
||||||
[0x2f, 0x5c, 0x1b, 0x29, 0x44, 0xe7, 0xfd, 0x6f, 0xc3, 0x97, 0x96, 0x4b, 0x05, 0x76, 0x91, 0xfa],
|
|
||||||
];
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
|
||||||
#[repr(C, align(4))]
|
|
||||||
pub(crate) struct WiiPartEntry {
|
|
||||||
pub(crate) offset: U32,
|
|
||||||
pub(crate) kind: U32,
|
|
||||||
}
|
|
||||||
|
|
||||||
static_assert!(size_of::<WiiPartEntry>() == 8);
|
|
||||||
|
|
||||||
impl WiiPartEntry {
|
|
||||||
pub(crate) fn offset(&self) -> u64 { (self.offset.get() as u64) << 2 }
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq)]
|
|
||||||
pub(crate) struct WiiPartInfo {
|
|
||||||
pub(crate) group_idx: u32,
|
|
||||||
pub(crate) part_idx: u32,
|
|
||||||
pub(crate) offset: u64,
|
|
||||||
pub(crate) kind: PartitionKind,
|
|
||||||
pub(crate) header: WiiPartitionHeader,
|
|
||||||
pub(crate) junk_id: [u8; 4],
|
|
||||||
pub(crate) junk_start: u64,
|
|
||||||
pub(crate) title_key: KeyBytes,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) const WII_PART_GROUP_OFF: u64 = 0x40000;
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
|
||||||
#[repr(C, align(4))]
|
|
||||||
pub(crate) struct WiiPartGroup {
|
|
||||||
pub(crate) part_count: U32,
|
|
||||||
pub(crate) part_entry_off: U32,
|
|
||||||
}
|
|
||||||
|
|
||||||
static_assert!(size_of::<WiiPartGroup>() == 8);
|
|
||||||
|
|
||||||
impl WiiPartGroup {
|
|
||||||
pub(crate) fn part_entry_off(&self) -> u64 { (self.part_entry_off.get() as u64) << 2 }
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
|
||||||
#[repr(C, align(4))]
|
|
||||||
pub struct SignedHeader {
|
|
||||||
/// Signature type, always 0x00010001 (RSA-2048)
|
|
||||||
pub sig_type: U32,
|
|
||||||
/// RSA-2048 signature
|
|
||||||
pub sig: [u8; 256],
|
|
||||||
_pad: [u8; 60],
|
|
||||||
}
|
|
||||||
|
|
||||||
static_assert!(size_of::<SignedHeader>() == 0x140);
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Default, FromBytes, FromZeroes, AsBytes)]
|
|
||||||
#[repr(C, align(4))]
|
|
||||||
pub struct TicketTimeLimit {
|
|
||||||
pub enable_time_limit: U32,
|
|
||||||
pub time_limit: U32,
|
|
||||||
}
|
|
||||||
|
|
||||||
static_assert!(size_of::<TicketTimeLimit>() == 8);
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
|
||||||
#[repr(C, align(4))]
|
|
||||||
pub struct Ticket {
|
|
||||||
pub header: SignedHeader,
|
|
||||||
pub sig_issuer: [u8; 64],
|
|
||||||
pub ecdh: [u8; 60],
|
|
||||||
pub version: u8,
|
|
||||||
_pad1: U16,
|
|
||||||
pub title_key: KeyBytes,
|
|
||||||
_pad2: u8,
|
|
||||||
pub ticket_id: [u8; 8],
|
|
||||||
pub console_id: [u8; 4],
|
|
||||||
pub title_id: [u8; 8],
|
|
||||||
_pad3: U16,
|
|
||||||
pub ticket_title_version: U16,
|
|
||||||
pub permitted_titles_mask: U32,
|
|
||||||
pub permit_mask: U32,
|
|
||||||
pub title_export_allowed: u8,
|
|
||||||
pub common_key_idx: u8,
|
|
||||||
_pad4: [u8; 48],
|
|
||||||
pub content_access_permissions: [u8; 64],
|
|
||||||
_pad5: [u8; 2],
|
|
||||||
pub time_limits: [TicketTimeLimit; 8],
|
|
||||||
}
|
|
||||||
|
|
||||||
static_assert!(size_of::<Ticket>() == 0x2A4);
|
|
||||||
|
|
||||||
impl Ticket {
|
|
||||||
pub fn decrypt_title_key(&self) -> Result<KeyBytes> {
|
|
||||||
let mut iv: KeyBytes = [0; 16];
|
|
||||||
iv[..8].copy_from_slice(&self.title_id);
|
|
||||||
let cert_issuer_ticket =
|
|
||||||
CStr::from_bytes_until_nul(&self.sig_issuer).ok().and_then(|c| c.to_str().ok());
|
|
||||||
let common_keys = match cert_issuer_ticket {
|
|
||||||
Some(RVL_CERT_ISSUER_PPKI_TICKET) => &RETAIL_COMMON_KEYS,
|
|
||||||
Some(RVL_CERT_ISSUER_DPKI_TICKET) => &DEBUG_COMMON_KEYS,
|
|
||||||
Some(v) => {
|
|
||||||
return Err(Error::DiscFormat(format!("unknown certificate issuer {:?}", v)));
|
|
||||||
}
|
|
||||||
None => {
|
|
||||||
return Err(Error::DiscFormat("failed to parse certificate issuer".to_string()));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
let common_key = common_keys.get(self.common_key_idx as usize).ok_or(Error::DiscFormat(
|
|
||||||
format!("unknown common key index {}", self.common_key_idx),
|
|
||||||
))?;
|
|
||||||
let mut title_key = self.title_key;
|
|
||||||
aes_decrypt(common_key, iv, &mut title_key);
|
|
||||||
Ok(title_key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
|
||||||
#[repr(C, align(4))]
|
|
||||||
pub struct TmdHeader {
|
|
||||||
pub header: SignedHeader,
|
|
||||||
pub sig_issuer: [u8; 64],
|
|
||||||
pub version: u8,
|
|
||||||
pub ca_crl_version: u8,
|
|
||||||
pub signer_crl_version: u8,
|
|
||||||
pub is_vwii: u8,
|
|
||||||
pub ios_id: [u8; 8],
|
|
||||||
pub title_id: [u8; 8],
|
|
||||||
pub title_type: u32,
|
|
||||||
pub group_id: U16,
|
|
||||||
_pad1: [u8; 2],
|
|
||||||
pub region: U16,
|
|
||||||
pub ratings: KeyBytes,
|
|
||||||
_pad2: [u8; 12],
|
|
||||||
pub ipc_mask: [u8; 12],
|
|
||||||
_pad3: [u8; 18],
|
|
||||||
pub access_flags: U32,
|
|
||||||
pub title_version: U16,
|
|
||||||
pub num_contents: U16,
|
|
||||||
pub boot_idx: U16,
|
|
||||||
pub minor_version: U16,
|
|
||||||
}
|
|
||||||
|
|
||||||
static_assert!(size_of::<TmdHeader>() == 0x1E4);
|
|
||||||
|
|
||||||
pub const H3_TABLE_SIZE: usize = 0x18000;
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
|
||||||
#[repr(C, align(4))]
|
|
||||||
pub struct WiiPartitionHeader {
|
|
||||||
pub ticket: Ticket,
|
|
||||||
tmd_size: U32,
|
|
||||||
tmd_off: U32,
|
|
||||||
cert_chain_size: U32,
|
|
||||||
cert_chain_off: U32,
|
|
||||||
h3_table_off: U32,
|
|
||||||
data_off: U32,
|
|
||||||
data_size: U32,
|
|
||||||
}
|
|
||||||
|
|
||||||
static_assert!(size_of::<WiiPartitionHeader>() == 0x2C0);
|
|
||||||
|
|
||||||
impl WiiPartitionHeader {
|
|
||||||
pub fn tmd_size(&self) -> u64 { self.tmd_size.get() as u64 }
|
|
||||||
|
|
||||||
pub fn tmd_off(&self) -> u64 { (self.tmd_off.get() as u64) << 2 }
|
|
||||||
|
|
||||||
pub fn cert_chain_size(&self) -> u64 { self.cert_chain_size.get() as u64 }
|
|
||||||
|
|
||||||
pub fn cert_chain_off(&self) -> u64 { (self.cert_chain_off.get() as u64) << 2 }
|
|
||||||
|
|
||||||
pub fn h3_table_off(&self) -> u64 { (self.h3_table_off.get() as u64) << 2 }
|
|
||||||
|
|
||||||
pub fn h3_table_size(&self) -> u64 { H3_TABLE_SIZE as u64 }
|
|
||||||
|
|
||||||
pub fn data_off(&self) -> u64 { (self.data_off.get() as u64) << 2 }
|
|
||||||
|
|
||||||
pub fn data_size(&self) -> u64 { (self.data_size.get() as u64) << 2 }
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) struct DiscWii {
|
|
||||||
header: DiscHeader,
|
|
||||||
part_info: Vec<WiiPartInfo>,
|
|
||||||
disc_size: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DiscWii {
|
|
||||||
pub(crate) fn new(
|
|
||||||
stream: &mut dyn ReadStream,
|
|
||||||
header: DiscHeader,
|
|
||||||
disc_size: Option<u64>,
|
|
||||||
) -> Result<Self> {
|
|
||||||
let part_info = read_partition_info(stream, &header)?;
|
|
||||||
// Guess disc size if not provided
|
|
||||||
let disc_size = disc_size.unwrap_or_else(|| guess_disc_size(&part_info));
|
|
||||||
Ok(Self { header, part_info, disc_size })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn read_partition_info(
|
|
||||||
stream: &mut dyn ReadStream,
|
|
||||||
disc_header: &DiscHeader,
|
|
||||||
) -> Result<Vec<WiiPartInfo>> {
|
|
||||||
stream.seek(SeekFrom::Start(WII_PART_GROUP_OFF)).context("Seeking to partition groups")?;
|
|
||||||
let part_groups: [WiiPartGroup; 4] = read_from(stream).context("Reading partition groups")?;
|
|
||||||
let mut part_info = Vec::new();
|
|
||||||
for (group_idx, group) in part_groups.iter().enumerate() {
|
|
||||||
let part_count = group.part_count.get();
|
|
||||||
if part_count == 0 {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
stream
|
|
||||||
.seek(SeekFrom::Start(group.part_entry_off()))
|
|
||||||
.with_context(|| format!("Seeking to partition group {group_idx}"))?;
|
|
||||||
let entries: Vec<WiiPartEntry> = read_vec(stream, part_count as usize)
|
|
||||||
.with_context(|| format!("Reading partition group {group_idx}"))?;
|
|
||||||
for (part_idx, entry) in entries.iter().enumerate() {
|
|
||||||
let offset = entry.offset();
|
|
||||||
stream
|
|
||||||
.seek(SeekFrom::Start(offset))
|
|
||||||
.with_context(|| format!("Seeking to partition data {group_idx}:{part_idx}"))?;
|
|
||||||
let header: WiiPartitionHeader = read_from(stream)
|
|
||||||
.with_context(|| format!("Reading partition header {group_idx}:{part_idx}"))?;
|
|
||||||
|
|
||||||
// Open partition stream and read junk data seed
|
|
||||||
// let inner = stream
|
|
||||||
// .new_window(offset + header.data_off(), DL_DVD_SIZE) // header.data_size()
|
|
||||||
// .context("Wrapping partition stream")?;
|
|
||||||
let title_key = header.ticket.decrypt_title_key()?;
|
|
||||||
let part_offset = entry.offset() + header.data_off();
|
|
||||||
if part_offset % SECTOR_SIZE as u64 != 0 {
|
|
||||||
return Err(Error::DiscFormat(format!(
|
|
||||||
"Partition {group_idx}:{part_idx} offset is not sector aligned",
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
let start_sector = (part_offset / SECTOR_SIZE as u64) as u32;
|
|
||||||
let mut stream = PartitionWii {
|
|
||||||
start_sector,
|
|
||||||
header: header.clone(),
|
|
||||||
tmd: vec![],
|
|
||||||
cert_chain: vec![],
|
|
||||||
h3_table: vec![],
|
|
||||||
stream: Box::new(stream.as_dyn()),
|
|
||||||
key: Some(title_key),
|
|
||||||
offset: 0,
|
|
||||||
cur_block: u32::MAX,
|
|
||||||
buf: [0; SECTOR_SIZE],
|
|
||||||
has_hashes: disc_header.no_partition_hashes == 0,
|
|
||||||
validate_hashes: false,
|
|
||||||
};
|
|
||||||
let junk_id: [u8; 4] = read_from(&mut stream).context("Reading junk seed bytes")?;
|
|
||||||
stream
|
|
||||||
.seek(SeekFrom::Start(size_of::<DiscHeader>() as u64))
|
|
||||||
.context("Seeking to partition header")?;
|
|
||||||
let part_header: PartitionHeader =
|
|
||||||
read_from(&mut stream).context("Reading partition header")?;
|
|
||||||
let junk_start = part_header.fst_off(true) + part_header.fst_sz(true);
|
|
||||||
|
|
||||||
log::debug!("Header: {:?}", header);
|
|
||||||
log::debug!(
|
|
||||||
"Partition: {:?} - {:?}: {:?}",
|
|
||||||
offset + header.data_off(),
|
|
||||||
header.data_size(),
|
|
||||||
header.ticket.title_key
|
|
||||||
);
|
|
||||||
|
|
||||||
part_info.push(WiiPartInfo {
|
|
||||||
group_idx: group_idx as u32,
|
|
||||||
part_idx: part_idx as u32,
|
|
||||||
offset,
|
|
||||||
kind: entry.kind.get().into(),
|
|
||||||
header,
|
|
||||||
junk_id,
|
|
||||||
junk_start,
|
|
||||||
title_key,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(part_info)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn guess_disc_size(part_info: &[WiiPartInfo]) -> u64 {
|
|
||||||
let max_offset = part_info
|
|
||||||
.iter()
|
|
||||||
.flat_map(|v| {
|
|
||||||
[
|
|
||||||
v.offset + v.header.tmd_off() + v.header.tmd_size(),
|
|
||||||
v.offset + v.header.cert_chain_off() + v.header.cert_chain_size(),
|
|
||||||
v.offset + v.header.h3_table_off() + v.header.h3_table_size(),
|
|
||||||
v.offset + v.header.data_off() + v.header.data_size(),
|
|
||||||
]
|
|
||||||
})
|
|
||||||
.max()
|
|
||||||
.unwrap_or(0x50000);
|
|
||||||
if max_offset <= MINI_DVD_SIZE && !part_info.iter().any(|v| v.kind == PartitionKind::Data) {
|
|
||||||
// Datel disc
|
|
||||||
MINI_DVD_SIZE
|
|
||||||
} else if max_offset < SL_DVD_SIZE {
|
|
||||||
SL_DVD_SIZE
|
|
||||||
} else {
|
|
||||||
DL_DVD_SIZE
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn open_partition<'a>(
|
|
||||||
part: &WiiPartInfo,
|
|
||||||
disc_io: &'a dyn DiscIO,
|
|
||||||
options: &OpenOptions,
|
|
||||||
header: &DiscHeader,
|
|
||||||
) -> Result<Box<dyn PartitionBase + 'a>> {
|
|
||||||
let mut base = disc_io.open()?;
|
|
||||||
|
|
||||||
base.seek(SeekFrom::Start(part.offset + part.header.tmd_off()))
|
|
||||||
.context("Seeking to TMD offset")?;
|
|
||||||
let tmd: Vec<u8> =
|
|
||||||
read_vec(&mut base, part.header.tmd_size() as usize).context("Reading TMD")?;
|
|
||||||
|
|
||||||
base.seek(SeekFrom::Start(part.offset + part.header.cert_chain_off()))
|
|
||||||
.context("Seeking to cert chain offset")?;
|
|
||||||
let cert_chain: Vec<u8> = read_vec(&mut base, part.header.cert_chain_size() as usize)
|
|
||||||
.context("Reading cert chain")?;
|
|
||||||
|
|
||||||
base.seek(SeekFrom::Start(part.offset + part.header.h3_table_off()))
|
|
||||||
.context("Seeking to H3 table offset")?;
|
|
||||||
let h3_table: Vec<u8> = read_vec(&mut base, H3_TABLE_SIZE).context("Reading H3 table")?;
|
|
||||||
|
|
||||||
let key = if header.no_partition_encryption == 0 {
|
|
||||||
Some(part.header.ticket.decrypt_title_key()?)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
let data_off = part.offset + part.header.data_off();
|
|
||||||
if data_off % SECTOR_SIZE as u64 != 0 {
|
|
||||||
return Err(Error::DiscFormat(format!(
|
|
||||||
"Partition {}:{} offset is not sector aligned",
|
|
||||||
part.group_idx, part.part_idx
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
let start_sector = (data_off / SECTOR_SIZE as u64) as u32;
|
|
||||||
Ok(Box::new(PartitionWii {
|
|
||||||
start_sector,
|
|
||||||
header: part.header.clone(),
|
|
||||||
tmd,
|
|
||||||
cert_chain,
|
|
||||||
h3_table,
|
|
||||||
stream: base,
|
|
||||||
key,
|
|
||||||
offset: 0,
|
|
||||||
cur_block: u32::MAX,
|
|
||||||
buf: [0; SECTOR_SIZE],
|
|
||||||
has_hashes: header.no_partition_hashes == 0,
|
|
||||||
validate_hashes: options.validate_hashes && header.no_partition_hashes == 0,
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DiscBase for DiscWii {
|
|
||||||
fn header(&self) -> &DiscHeader { &self.header }
|
|
||||||
|
|
||||||
fn partitions(&self) -> Vec<PartitionInfo> {
|
|
||||||
self.part_info
|
|
||||||
.iter()
|
|
||||||
.map(|v| PartitionInfo {
|
|
||||||
group_index: v.group_idx,
|
|
||||||
part_index: v.part_idx,
|
|
||||||
part_offset: v.offset,
|
|
||||||
kind: v.kind,
|
|
||||||
data_offset: v.header.data_off(),
|
|
||||||
data_size: v.header.data_size(),
|
|
||||||
header: Some(v.header.clone()),
|
|
||||||
lfg_seed: v.junk_id,
|
|
||||||
// junk_start: v.junk_start,
|
|
||||||
})
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn open_partition<'a>(
|
|
||||||
&self,
|
|
||||||
disc_io: &'a dyn DiscIO,
|
|
||||||
index: usize,
|
|
||||||
options: &OpenOptions,
|
|
||||||
) -> Result<Box<dyn PartitionBase + 'a>> {
|
|
||||||
let part = self.part_info.get(index).ok_or_else(|| {
|
|
||||||
Error::DiscFormat(format!("Failed to locate partition index {}", index))
|
|
||||||
})?;
|
|
||||||
open_partition(part, disc_io, options, &self.header)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn open_partition_kind<'a>(
|
|
||||||
&self,
|
|
||||||
disc_io: &'a dyn DiscIO,
|
|
||||||
part_type: PartitionKind,
|
|
||||||
options: &OpenOptions,
|
|
||||||
) -> Result<Box<dyn PartitionBase + 'a>> {
|
|
||||||
let part = self.part_info.iter().find(|&v| v.kind == part_type).ok_or_else(|| {
|
|
||||||
Error::DiscFormat(format!("Failed to locate {:?} partition", part_type))
|
|
||||||
})?;
|
|
||||||
open_partition(part, disc_io, options, &self.header)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn disc_size(&self) -> u64 { self.disc_size }
|
|
||||||
}
|
|
||||||
|
|
||||||
struct PartitionWii<'a> {
|
|
||||||
start_sector: u32,
|
|
||||||
header: WiiPartitionHeader,
|
|
||||||
tmd: Vec<u8>,
|
|
||||||
cert_chain: Vec<u8>,
|
|
||||||
h3_table: Vec<u8>,
|
|
||||||
|
|
||||||
stream: Box<dyn ReadStream + 'a>,
|
|
||||||
key: Option<KeyBytes>,
|
|
||||||
offset: u64,
|
|
||||||
cur_block: u32,
|
|
||||||
buf: [u8; SECTOR_SIZE],
|
|
||||||
has_hashes: bool,
|
|
||||||
validate_hashes: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> PartitionBase for PartitionWii<'a> {
|
|
||||||
fn meta(&mut self) -> Result<Box<PartitionMeta>> {
|
|
||||||
self.seek(SeekFrom::Start(0)).context("Seeking to partition header")?;
|
|
||||||
let mut meta = read_part_header(self, true)?;
|
|
||||||
meta.raw_ticket = Some(Box::from(self.header.ticket.as_bytes()));
|
|
||||||
meta.raw_tmd = Some(Box::from(self.tmd.as_slice()));
|
|
||||||
meta.raw_cert_chain = Some(Box::from(self.cert_chain.as_slice()));
|
|
||||||
meta.raw_h3_table = Some(Box::from(self.h3_table.as_slice()));
|
|
||||||
Ok(meta)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn open_file(&mut self, node: &Node) -> io::Result<SharedWindowedReadStream> {
|
|
||||||
assert_eq!(node.kind(), NodeKind::File);
|
|
||||||
self.new_window(node.offset(true), node.length(true))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn ideal_buffer_size(&self) -> usize {
|
|
||||||
if self.has_hashes {
|
|
||||||
SECTOR_DATA_SIZE
|
|
||||||
} else {
|
|
||||||
SECTOR_SIZE
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline(always)]
|
|
||||||
pub(crate) fn as_digest(slice: &[u8; 20]) -> digest::Output<Sha1> { (*slice).into() }
|
|
||||||
|
|
||||||
fn decrypt_block(part: &mut PartitionWii, cluster: u32) -> io::Result<()> {
|
|
||||||
part.stream.read_exact(&mut part.buf)?;
|
|
||||||
if let Some(key) = &part.key {
|
|
||||||
// Fetch IV before decrypting header
|
|
||||||
let iv = *array_ref![part.buf, 0x3d0, 16];
|
|
||||||
// Don't need to decrypt header if we're not validating hashes
|
|
||||||
if part.validate_hashes {
|
|
||||||
aes_decrypt(key, [0; 16], &mut part.buf[..HASHES_SIZE]);
|
|
||||||
}
|
|
||||||
aes_decrypt(key, iv, &mut part.buf[HASHES_SIZE..]);
|
|
||||||
}
|
|
||||||
if part.validate_hashes {
|
|
||||||
let (mut group, sub_group) = div_rem(cluster as usize, 8);
|
|
||||||
group %= 8;
|
|
||||||
// H0 hashes
|
|
||||||
for i in 0..31 {
|
|
||||||
let mut hash = Sha1::new();
|
|
||||||
hash.update(array_ref![part.buf, (i + 1) * 0x400, 0x400]);
|
|
||||||
let expected = as_digest(array_ref![part.buf, i * 20, 20]);
|
|
||||||
let output = hash.finalize();
|
|
||||||
if output != expected {
|
|
||||||
return Err(io::Error::new(
|
|
||||||
io::ErrorKind::InvalidData,
|
|
||||||
format!(
|
|
||||||
"Invalid H0 hash! (block {:?}) {:x}\n\texpected {:x}",
|
|
||||||
i, output, expected
|
|
||||||
),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// H1 hash
|
|
||||||
{
|
|
||||||
let mut hash = Sha1::new();
|
|
||||||
hash.update(array_ref![part.buf, 0, 0x26C]);
|
|
||||||
let expected = as_digest(array_ref![part.buf, 0x280 + sub_group * 20, 20]);
|
|
||||||
let output = hash.finalize();
|
|
||||||
if output != expected {
|
|
||||||
return Err(io::Error::new(
|
|
||||||
io::ErrorKind::InvalidData,
|
|
||||||
format!(
|
|
||||||
"Invalid H1 hash! (subgroup {:?}) {:x}\n\texpected {:x}",
|
|
||||||
sub_group, output, expected
|
|
||||||
),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// H2 hash
|
|
||||||
{
|
|
||||||
let mut hash = Sha1::new();
|
|
||||||
hash.update(array_ref![part.buf, 0x280, 0xA0]);
|
|
||||||
let expected = as_digest(array_ref![part.buf, 0x340 + group * 20, 20]);
|
|
||||||
let output = hash.finalize();
|
|
||||||
if output != expected {
|
|
||||||
return Err(io::Error::new(
|
|
||||||
io::ErrorKind::InvalidData,
|
|
||||||
format!(
|
|
||||||
"Invalid H2 hash! (group {:?}) {:x}\n\texpected {:x}",
|
|
||||||
group, output, expected
|
|
||||||
),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> Read for PartitionWii<'a> {
|
|
||||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
|
||||||
let block_size = self.ideal_buffer_size() as u64;
|
|
||||||
let (block, block_offset) = div_rem(self.offset, block_size);
|
|
||||||
let block = block as u32;
|
|
||||||
if block != self.cur_block {
|
|
||||||
self.stream
|
|
||||||
.seek(SeekFrom::Start((self.start_sector + block) as u64 * SECTOR_SIZE as u64))?;
|
|
||||||
decrypt_block(self, block)?;
|
|
||||||
self.cur_block = block;
|
|
||||||
}
|
|
||||||
|
|
||||||
let offset = (SECTOR_SIZE - block_size as usize) + block_offset as usize;
|
|
||||||
let read = min(buf.len(), block_size as usize - block_offset as usize);
|
|
||||||
buf[..read].copy_from_slice(&self.buf[offset..offset + read]);
|
|
||||||
self.offset += read as u64;
|
|
||||||
Ok(read)
|
|
||||||
|
|
||||||
// let mut block = block as u32;
|
|
||||||
//
|
|
||||||
// let mut rem = buf.len();
|
|
||||||
// let mut read: usize = 0;
|
|
||||||
//
|
|
||||||
// while rem > 0 {
|
|
||||||
// if block != self.cur_block {
|
|
||||||
// decrypt_block(self, block)?;
|
|
||||||
// self.cur_block = block;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// let mut cache_size = rem;
|
|
||||||
// if cache_size as u64 + block_offset > block_size {
|
|
||||||
// cache_size = (block_size - block_offset) as usize;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// let hashes_size = SECTOR_SIZE - block_size as usize;
|
|
||||||
// let start = hashes_size + block_offset as usize;
|
|
||||||
// buf[read..read + cache_size].copy_from_slice(&self.buf[start..start + cache_size]);
|
|
||||||
// read += cache_size;
|
|
||||||
// rem -= cache_size;
|
|
||||||
// block_offset = 0;
|
|
||||||
// block += 1;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// self.offset += buf.len() as u64;
|
|
||||||
// Ok(buf.len())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline(always)]
|
|
||||||
fn to_block_size(v: u64) -> u64 {
|
|
||||||
(v / SECTOR_SIZE as u64) * SECTOR_DATA_SIZE as u64 + (v % SECTOR_SIZE as u64)
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> Seek for PartitionWii<'a> {
|
|
||||||
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
|
|
||||||
self.offset = match pos {
|
|
||||||
SeekFrom::Start(v) => v,
|
|
||||||
SeekFrom::End(_) => {
|
|
||||||
return Err(io::Error::new(
|
|
||||||
io::ErrorKind::Unsupported,
|
|
||||||
"PartitionWii: SeekFrom::End is not supported",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
SeekFrom::Current(v) => self.offset.saturating_add_signed(v),
|
|
||||||
};
|
|
||||||
// let block = self.offset / self.ideal_buffer_size() as u64;
|
|
||||||
// if block as u32 != self.cur_block {
|
|
||||||
// self.stream.seek(SeekFrom::Start((self.start_sector + block) * SECTOR_SIZE as u64))?;
|
|
||||||
// self.cur_block = u32::MAX;
|
|
||||||
// }
|
|
||||||
Ok(self.offset)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn stream_position(&mut self) -> io::Result<u64> { Ok(self.offset) }
|
|
||||||
}
|
|
Loading…
Reference in New Issue