mirror of https://github.com/encounter/nod-rs.git
1113 lines
45 KiB
Rust
1113 lines
45 KiB
Rust
use std::{
|
||
cmp::min,
|
||
fs::File,
|
||
io,
|
||
io::{BufReader, Read, Seek, SeekFrom},
|
||
mem::size_of,
|
||
path::{Path, PathBuf},
|
||
sync::{Arc, Mutex},
|
||
time::Instant,
|
||
};
|
||
|
||
use rayon::iter::{IntoParallelIterator, ParallelIterator};
|
||
use sha1::{Digest, Sha1};
|
||
use zerocopy::{big_endian::*, AsBytes, FromBytes, FromZeroes};
|
||
|
||
use crate::{
|
||
array_ref, array_ref_mut,
|
||
disc::{
|
||
wii::{BLOCK_SIZE, HASHES_SIZE},
|
||
SECTOR_SIZE,
|
||
},
|
||
io::{aes_encrypt, nkit::NKitHeader, DiscIO, DiscMeta, HashBytes, KeyBytes, MagicBytes},
|
||
static_assert,
|
||
streams::ReadStream,
|
||
util::{
|
||
compress::{lzma2_props_decode, lzma_props_decode, new_lzma2_decoder, new_lzma_decoder},
|
||
lfg::LaggedFibonacci,
|
||
reader::{read_from, read_u16_be, read_vec},
|
||
take_seek::TakeSeekExt,
|
||
},
|
||
Error, OpenOptions, Result, ResultContext,
|
||
};
|
||
|
||
pub const WIA_MAGIC: MagicBytes = *b"WIA\x01";
|
||
pub const RVZ_MAGIC: MagicBytes = *b"RVZ\x01";
|
||
|
||
/// This struct is stored at offset 0x0 and is 0x48 bytes long. The wit source code says its format
|
||
/// will never be changed.
|
||
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||
#[repr(C, align(4))]
|
||
pub struct WIAFileHeader {
|
||
pub magic: MagicBytes,
|
||
/// The WIA format version.
|
||
///
|
||
/// A short note from the wit source code about how version numbers are encoded:
|
||
///
|
||
/// ```c
|
||
/// //-----------------------------------------------------
|
||
/// // Format of version number: AABBCCDD = A.BB | A.BB.CC
|
||
/// // If D != 0x00 && D != 0xff => append: 'beta' D
|
||
/// //-----------------------------------------------------
|
||
/// ```
|
||
pub version: U32,
|
||
/// If the reading program supports the version of WIA indicated here, it can read the file.
|
||
///
|
||
/// [version](Self::version) can be higher than `version_compatible`.
|
||
pub version_compatible: U32,
|
||
/// The size of the [WIADisc] struct.
|
||
pub disc_size: U32,
|
||
/// The SHA-1 hash of the [WIADisc] struct.
|
||
///
|
||
/// The number of bytes to hash is determined by [disc_size](Self::disc_size).
|
||
pub disc_hash: HashBytes,
|
||
/// The original size of the ISO.
|
||
pub iso_file_size: U64,
|
||
/// The size of this file.
|
||
pub wia_file_size: U64,
|
||
/// The SHA-1 hash of this struct, up to but not including `file_head_hash` itself.
|
||
pub file_head_hash: HashBytes,
|
||
}
|
||
|
||
static_assert!(size_of::<WIAFileHeader>() == 0x48);
|
||
|
||
impl WIAFileHeader {
|
||
pub fn validate(&self) -> Result<()> {
|
||
// Check magic
|
||
if self.magic != WIA_MAGIC && self.magic != RVZ_MAGIC {
|
||
return Err(Error::DiscFormat(format!("Invalid WIA/RVZ magic: {:#X?}", self.magic)));
|
||
}
|
||
// Check file head hash
|
||
let bytes = self.as_bytes();
|
||
verify_hash(&bytes[..bytes.len() - size_of::<HashBytes>()], &self.file_head_hash)?;
|
||
// Check version compatibility
|
||
if self.version_compatible.get() < 0x30000 {
|
||
return Err(Error::DiscFormat(format!(
|
||
"WIA/RVZ version {:#X} is not supported",
|
||
self.version_compatible
|
||
)));
|
||
}
|
||
Ok(())
|
||
}
|
||
|
||
pub fn is_rvz(&self) -> bool { self.magic == RVZ_MAGIC }
|
||
}
|
||
|
||
/// Disc type
|
||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||
pub enum DiscType {
|
||
/// GameCube disc
|
||
GameCube,
|
||
/// Wii disc
|
||
Wii,
|
||
}
|
||
|
||
impl TryFrom<u32> for DiscType {
|
||
type Error = Error;
|
||
|
||
fn try_from(value: u32) -> Result<Self> {
|
||
match value {
|
||
1 => Ok(Self::GameCube),
|
||
2 => Ok(Self::Wii),
|
||
v => Err(Error::DiscFormat(format!("Invalid disc type {}", v))),
|
||
}
|
||
}
|
||
}
|
||
|
||
/// Compression type
|
||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||
pub enum Compression {
|
||
/// No compression.
|
||
None,
|
||
/// (WIA only) See [WIASegment]
|
||
Purge,
|
||
/// BZIP2 compression
|
||
Bzip2,
|
||
/// LZMA compression
|
||
Lzma,
|
||
/// LZMA2 compression
|
||
Lzma2,
|
||
/// (RVZ only) Zstandard compression
|
||
Zstandard,
|
||
}
|
||
|
||
impl TryFrom<u32> for Compression {
|
||
type Error = Error;
|
||
|
||
fn try_from(value: u32) -> Result<Self> {
|
||
match value {
|
||
0 => Ok(Self::None),
|
||
1 => Ok(Self::Purge),
|
||
2 => Ok(Self::Bzip2),
|
||
3 => Ok(Self::Lzma),
|
||
4 => Ok(Self::Lzma2),
|
||
5 => Ok(Self::Zstandard),
|
||
v => Err(Error::DiscFormat(format!("Invalid compression type {}", v))),
|
||
}
|
||
}
|
||
}
|
||
|
||
const DISC_HEAD_SIZE: usize = 0x80;
|
||
|
||
/// This struct is stored at offset 0x48, immediately after [WIAFileHeader].
|
||
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||
#[repr(C, align(4))]
|
||
pub struct WIADisc {
|
||
/// The disc type. (1 = GameCube, 2 = Wii)
|
||
pub disc_type: U32,
|
||
/// The compression type.
|
||
pub compression: U32,
|
||
/// The compression level used by the compressor.
|
||
///
|
||
/// The possible values are compressor-specific.
|
||
///
|
||
/// RVZ only:
|
||
/// > This is signed (instead of unsigned) to support negative compression levels in
|
||
/// [Zstandard](Compression::Zstandard) (RVZ only).
|
||
pub compression_level: I32,
|
||
/// The size of the chunks that data is divided into.
|
||
///
|
||
/// WIA only:
|
||
/// > Must be a multiple of 2 MiB.
|
||
///
|
||
/// RVZ only:
|
||
/// > Chunk sizes smaller than 2 MiB are supported. The following applies when using a chunk size
|
||
/// smaller than 2 MiB:
|
||
/// > - The chunk size must be at least 32 KiB and must be a power of two. (Just like with WIA,
|
||
/// sizes larger than 2 MiB do not have to be a power of two, they just have to be an integer
|
||
/// multiple of 2 MiB.)
|
||
/// > - For Wii partition data, each chunk contains one [WIAExceptionList] which contains
|
||
/// exceptions for that chunk (and no other chunks). Offset 0 refers to the first hash of the
|
||
/// current chunk, not the first hash of the full 2 MiB of data.
|
||
pub chunk_size: U32,
|
||
/// The first 0x80 bytes of the disc image.
|
||
pub disc_head: [u8; DISC_HEAD_SIZE],
|
||
/// The number of [WIAPartition] structs.
|
||
pub num_partitions: U32,
|
||
/// The size of one [WIAPartition] struct.
|
||
///
|
||
/// If this is smaller than the size of [WIAPartition], fill the missing bytes with 0x00.
|
||
pub partition_type_size: U32,
|
||
/// The offset in the file where the [WIAPartition] structs are stored (uncompressed).
|
||
pub partition_offset: U64,
|
||
/// The SHA-1 hash of the [WIAPartition] structs.
|
||
///
|
||
/// The number of bytes to hash is determined by `num_partitions * partition_type_size`.
|
||
pub partition_hash: HashBytes,
|
||
/// The number of [WIARawData] structs.
|
||
pub num_raw_data: U32,
|
||
/// The offset in the file where the [WIARawData] structs are stored (compressed).
|
||
pub raw_data_offset: U64,
|
||
/// The total compressed size of the [WIARawData] structs.
|
||
pub raw_data_size: U32,
|
||
/// The number of [WIAGroup] structs.
|
||
pub num_groups: U32,
|
||
/// The offset in the file where the [WIAGroup] structs are stored (compressed).
|
||
pub group_offset: U64,
|
||
/// The total compressed size of the [WIAGroup] structs.
|
||
pub group_size: U32,
|
||
/// The number of used bytes in the [compr_data](Self::compr_data) array.
|
||
pub compr_data_len: u8,
|
||
/// Compressor specific data.
|
||
///
|
||
/// If the compression method is [None](Compression::None), [Purge](Compression::Purge),
|
||
/// [Bzip2](Compression::Bzip2), or [Zstandard](Compression::Zstandard) (RVZ only),
|
||
/// [compr_data_len](Self::compr_data_len) is 0. If the compression method is
|
||
/// [Lzma](Compression::Lzma) or [Lzma2](Compression::Lzma2), the compressor specific data is
|
||
/// stored in the format used by the 7-Zip SDK. It needs to be converted if you are using e.g.
|
||
/// liblzma.
|
||
///
|
||
/// For [Lzma](Compression::Lzma), the data is 5 bytes long. The first byte encodes the `lc`,
|
||
/// `pb`, and `lp` parameters, and the four other bytes encode the dictionary size in little
|
||
/// endian.
|
||
pub compr_data: [u8; 7],
|
||
}
|
||
|
||
static_assert!(size_of::<WIADisc>() == 0xDC);
|
||
|
||
impl WIADisc {
|
||
pub fn validate(&self) -> Result<()> {
|
||
DiscType::try_from(self.disc_type.get())?;
|
||
Compression::try_from(self.compression.get())?;
|
||
if self.partition_type_size.get() != size_of::<WIAPartition>() as u32 {
|
||
return Err(Error::DiscFormat(format!(
|
||
"WIA partition type size is {}, expected {}",
|
||
self.partition_type_size.get(),
|
||
size_of::<WIAPartition>()
|
||
)));
|
||
}
|
||
Ok(())
|
||
}
|
||
|
||
pub fn compression(&self) -> Compression {
|
||
Compression::try_from(self.compression.get()).unwrap()
|
||
}
|
||
}
|
||
|
||
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||
#[repr(C, align(4))]
|
||
pub struct WIAPartitionData {
|
||
/// The sector on the disc at which this data starts.
|
||
/// One sector is 32 KiB (or 31 KiB excluding hashes).
|
||
pub first_sector: U32,
|
||
/// The number of sectors on the disc covered by this struct.
|
||
/// One sector is 32 KiB (or 31 KiB excluding hashes).
|
||
pub num_sectors: U32,
|
||
/// The index of the first [WIAGroup] struct that points to the data covered by this struct.
|
||
/// The other [WIAGroup] indices follow sequentially.
|
||
pub group_index: U32,
|
||
/// The number of [WIAGroup] structs used for this data.
|
||
pub num_groups: U32,
|
||
}
|
||
|
||
static_assert!(size_of::<WIAPartitionData>() == 0x10);
|
||
|
||
/// This struct is used for keeping track of Wii partition data that on the actual disc is encrypted
|
||
/// and hashed. This does not include the unencrypted area at the beginning of partitions that
|
||
/// contains the ticket, TMD, certificate chain, and H3 table. So for a typical game partition,
|
||
/// `pd[0].first_sector * 0x8000` would be 0x0F820000, not 0x0F800000.
|
||
///
|
||
/// Wii partition data is stored decrypted and with hashes removed. For each 0x8000 bytes on the
|
||
/// disc, 0x7C00 bytes are stored in the WIA file (prior to compression). If the hashes are desired,
|
||
/// the reading program must first recalculate the hashes as done when creating a Wii disc image
|
||
/// from scratch (see <https://wiibrew.org/wiki/Wii_Disc>), and must then apply the hash exceptions
|
||
/// which are stored along with the data (see the [WIAExceptionList] section).
|
||
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||
#[repr(C, align(4))]
|
||
pub struct WIAPartition {
|
||
/// The title key for this partition (128-bit AES), which can be used for re-encrypting the
|
||
/// partition data.
|
||
///
|
||
/// This key can be used directly, without decrypting it using the Wii common key.
|
||
pub partition_key: KeyBytes,
|
||
/// To quote the wit source code: `segment 0 is small and defined for management data (boot ..
|
||
/// fst). segment 1 takes the remaining data.`
|
||
///
|
||
/// The point at which wit splits the two segments is the FST end offset rounded up to the next
|
||
/// 2 MiB. Giving the first segment a size which is not a multiple of 2 MiB is likely a bad idea
|
||
/// (unless the second segment has a size of 0).
|
||
pub partition_data: [WIAPartitionData; 2],
|
||
}
|
||
|
||
static_assert!(size_of::<WIAPartition>() == 0x30);
|
||
|
||
/// This struct is used for keeping track of disc data that is not stored as [WIAPartition].
|
||
/// The data is stored as is (other than compression being applied).
|
||
///
|
||
/// The first [WIARawData] has `raw_data_offset` set to 0x80 and `raw_data_size` set to 0x4FF80,
|
||
/// but despite this, it actually contains 0x50000 bytes of data. (However, the first 0x80 bytes
|
||
/// should be read from [WIADisc] instead.) This should be handled by rounding the offset down to
|
||
/// the previous multiple of 0x8000 (and adding the equivalent amount to the size so that the end
|
||
/// offset stays the same), not by special casing the first [WIARawData].
|
||
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||
#[repr(C, align(4))]
|
||
pub struct WIARawData {
|
||
/// The offset on the disc at which this data starts.
|
||
pub raw_data_offset: U64,
|
||
/// The number of bytes on the disc covered by this struct.
|
||
pub raw_data_size: U64,
|
||
/// The index of the first [WIAGroup] struct that points to the data covered by this struct.
|
||
/// The other [WIAGroup] indices follow sequentially.
|
||
pub group_index: U32,
|
||
/// The number of [WIAGroup] structs used for this data.
|
||
pub num_groups: U32,
|
||
}
|
||
|
||
/// This struct points directly to the actual disc data, stored compressed.
|
||
///
|
||
/// The data is interpreted differently depending on whether the [WIAGroup] is referenced by a
|
||
/// [WIAPartitionData] or a [WIARawData] (see the [WIAPartition] section for details).
|
||
///
|
||
/// A [WIAGroup] normally contains chunk_size bytes of decompressed data
|
||
/// (or `chunk_size / 0x8000 * 0x7C00` for Wii partition data when not counting hashes), not
|
||
/// counting any [WIAExceptionList] structs. However, the last [WIAGroup] of a [WIAPartitionData]
|
||
/// or [WIARawData] contains less data than that if `num_sectors * 0x8000` (for [WIAPartitionData])
|
||
/// or `raw_data_size` (for [WIARawData]) is not evenly divisible by `chunk_size`.
|
||
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||
#[repr(C, align(4))]
|
||
pub struct WIAGroup {
|
||
/// The offset in the file where the compressed data is stored.
|
||
///
|
||
/// Stored as a `u32`, divided by 4.
|
||
pub data_offset: U32,
|
||
/// The size of the compressed data, including any [WIAExceptionList] structs. 0 is a special
|
||
/// case meaning that every byte of the decompressed data is 0x00 and the [WIAExceptionList]
|
||
/// structs (if there are supposed to be any) contain 0 exceptions.
|
||
pub data_size: U32,
|
||
}
|
||
|
||
/// Compared to [WIAGroup], [RVZGroup] changes the meaning of the most significant bit of
|
||
/// [data_size](Self::data_size) and adds one additional attribute.
|
||
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||
#[repr(C, align(4))]
|
||
pub struct RVZGroup {
|
||
/// The offset in the file where the compressed data is stored, divided by 4.
|
||
pub data_offset: U32,
|
||
/// The most significant bit is 1 if the data is compressed using the compression method
|
||
/// indicated in [WIADisc], and 0 if it is not compressed. The lower 31 bits are the size of
|
||
/// the compressed data, including any [WIAExceptionList] structs. The lower 31 bits being 0 is
|
||
/// a special case meaning that every byte of the decompressed and unpacked data is 0x00 and
|
||
/// the [WIAExceptionList] structs (if there are supposed to be any) contain 0 exceptions.
|
||
pub data_size_and_flag: U32,
|
||
/// The size after decompressing but before decoding the RVZ packing.
|
||
/// If this is 0, RVZ packing is not used for this group.
|
||
pub rvz_packed_size: U32,
|
||
}
|
||
|
||
impl RVZGroup {
|
||
pub fn data_size(&self) -> u32 { self.data_size_and_flag.get() & 0x7FFFFFFF }
|
||
|
||
pub fn is_compressed(&self) -> bool { self.data_size_and_flag.get() & 0x80000000 != 0 }
|
||
}
|
||
|
||
impl From<WIAGroup> for RVZGroup {
|
||
fn from(value: WIAGroup) -> Self {
|
||
Self {
|
||
data_offset: value.data_offset,
|
||
data_size_and_flag: U32::new(value.data_size.get() | 0x80000000),
|
||
rvz_packed_size: U32::new(0),
|
||
}
|
||
}
|
||
}
|
||
|
||
/// This struct represents a 20-byte difference between the recalculated hash data and the original
|
||
/// hash data. (See also [WIAExceptionList])
|
||
///
|
||
/// When recalculating hashes for a [WIAGroup] with a size which is not evenly divisible by 2 MiB
|
||
/// (with the size of the hashes included), the missing bytes should be treated as zeroes for the
|
||
/// purpose of hashing. (wit's writing code seems to act as if the reading code does not assume that
|
||
/// these missing bytes are zero, but both wit's and Dolphin's reading code treat them as zero.
|
||
/// Dolphin's writing code assumes that the reading code treats them as zero.)
|
||
///
|
||
/// wit's writing code only outputs [WIAException] structs for mismatches in the actual hash
|
||
/// data, not in the padding data (which normally only contains zeroes). Dolphin's writing code
|
||
/// outputs [WIAException] structs for both hash data and padding data. When Dolphin needs to
|
||
/// write [WIAException] structs for a padding area which is 32 bytes long, it writes one which
|
||
/// covers the first 20 bytes of the padding area and one which covers the last 20 bytes of the
|
||
/// padding area, generating 12 bytes of overlap between the [WIAException] structs.
|
||
#[derive(Clone, Debug, PartialEq, FromBytes, FromZeroes, AsBytes)]
|
||
#[repr(C, align(2))]
|
||
pub struct WIAException {
|
||
/// The offset among the hashes. The offsets 0x0000-0x0400 here map to the offsets 0x0000-0x0400
|
||
/// in the full 2 MiB of data, the offsets 0x0400-0x0800 here map to the offsets 0x8000-0x8400
|
||
/// in the full 2 MiB of data, and so on.
|
||
///
|
||
/// The offsets start over at 0 for each new [WIAExceptionList].
|
||
pub offset: U16,
|
||
/// The hash that the automatically generated hash at the given offset needs to be replaced
|
||
/// with.
|
||
///
|
||
/// The replacement should happen after calculating all hashes for the current 2 MiB of data
|
||
/// but before encrypting the hashes.
|
||
pub hash: HashBytes,
|
||
}
|
||
|
||
/// Each [WIAGroup] of Wii partition data contains one or more [WIAExceptionList] structs before
|
||
/// the actual data, one for each 2 MiB of data in the [WIAGroup]. The number of [WIAExceptionList]
|
||
/// structs per [WIAGroup] is always `chunk_size / 0x200000`, even for a [WIAGroup] which contains
|
||
/// less data than normal due to it being at the end of a partition.
|
||
///
|
||
/// For memory management reasons, programs which read WIA files might place a limit on how many
|
||
/// exceptions there can be in a [WIAExceptionList]. Dolphin's reading code has a limit of
|
||
/// `52 × 64 = 3328` (unless the compression method is [None](Compression::None) or
|
||
/// [Purge](Compression::Purge), in which case there is no limit), which is enough to cover all
|
||
/// hashes and all padding. wit's reading code seems to be written as if `47 × 64 = 3008` is the
|
||
/// maximum it needs to be able to handle, which is enough to cover all hashes but not any padding.
|
||
/// However, because wit allocates more memory than needed, it seems to be possible to exceed 3008
|
||
/// by some amount without problems. It should be safe for writing code to assume that reading code
|
||
/// can handle at least 3328 exceptions per [WIAExceptionList].
|
||
///
|
||
/// Somewhat ironically, there are exceptions to how [WIAExceptionList] structs are handled:
|
||
///
|
||
/// For the compression method [Purge](Compression::Purge), the [WIAExceptionList] structs are
|
||
/// stored uncompressed (in other words, before the first [WIASegment]). For
|
||
/// [Bzip2](Compression::Bzip2), [Lzma](Compression::Lzma) and [Lzma2](Compression::Lzma2), they are
|
||
/// compressed along with the rest of the data.
|
||
///
|
||
/// For the compression methods [None](Compression::None) and [Purge](Compression::Purge), if the
|
||
/// end offset of the last [WIAExceptionList] is not evenly divisible by 4, padding is inserted
|
||
/// after it so that the data afterwards will start at a 4 byte boundary. This padding is not
|
||
/// inserted for the other compression methods.
|
||
type WIAExceptionList = Vec<WIAException>;
|
||
|
||
pub enum Decompressor {
|
||
None,
|
||
#[cfg(feature = "compress-bzip2")]
|
||
Bzip2,
|
||
#[cfg(feature = "compress-lzma")]
|
||
Lzma(liblzma::stream::LzmaOptions),
|
||
#[cfg(feature = "compress-lzma")]
|
||
Lzma2(liblzma::stream::LzmaOptions),
|
||
#[cfg(feature = "compress-zstd")]
|
||
Zstandard,
|
||
}
|
||
|
||
impl Decompressor {
|
||
pub fn new(disc: &WIADisc) -> Result<Self> {
|
||
let compr_data = &disc.compr_data[..disc.compr_data_len as usize];
|
||
match disc.compression() {
|
||
Compression::None => Ok(Self::None),
|
||
#[cfg(feature = "compress-bzip2")]
|
||
Compression::Bzip2 => Ok(Self::Bzip2),
|
||
#[cfg(feature = "compress-lzma")]
|
||
Compression::Lzma => Ok(Self::Lzma(lzma_props_decode(compr_data)?)),
|
||
#[cfg(feature = "compress-lzma")]
|
||
Compression::Lzma2 => Ok(Self::Lzma2(lzma2_props_decode(compr_data)?)),
|
||
#[cfg(feature = "compress-zstd")]
|
||
Compression::Zstandard => Ok(Self::Zstandard),
|
||
comp => Err(Error::DiscFormat(format!("Unsupported WIA/RVZ compression: {:?}", comp))),
|
||
}
|
||
}
|
||
|
||
pub fn wrap<'a, R>(&mut self, reader: R) -> io::Result<Box<dyn Read + 'a>>
|
||
where R: Read + 'a {
|
||
Ok(match self {
|
||
Decompressor::None => Box::new(reader),
|
||
#[cfg(feature = "compress-bzip2")]
|
||
Decompressor::Bzip2 => Box::new(bzip2::read::BzDecoder::new(reader)),
|
||
#[cfg(feature = "compress-lzma")]
|
||
Decompressor::Lzma(options) => Box::new(new_lzma_decoder(reader, options)?),
|
||
#[cfg(feature = "compress-lzma")]
|
||
Decompressor::Lzma2(options) => Box::new(new_lzma2_decoder(reader, options)?),
|
||
#[cfg(feature = "compress-zstd")]
|
||
Decompressor::Zstandard => Box::new(zstd::stream::Decoder::new(reader)?),
|
||
})
|
||
}
|
||
}
|
||
|
||
/// In a sector, following the 0x400 byte block of hashes, each 0x400 bytes of decrypted data is
|
||
/// hashed, yielding 31 H0 hashes.
|
||
/// Then, 8 sectors are aggregated into a subgroup, and the 31 H0 hashes for each sector are hashed,
|
||
/// yielding 8 H1 hashes.
|
||
/// Then, 8 subgroups are aggregated into a group, and the 8 H1 hashes for each subgroup are hashed,
|
||
/// yielding 8 H2 hashes.
|
||
/// Finally, the 8 H2 hashes for each group are hashed, yielding 1 H3 hash.
|
||
/// The H3 hashes for each group are stored in the partition's H3 table.
|
||
pub struct HashTable {
|
||
/// SHA-1 hash of the 31 H0 hashes for each sector.
|
||
pub h1_hashes: Vec<HashBytes>,
|
||
/// SHA-1 hash of the 8 H1 hashes for each subgroup.
|
||
pub h2_hashes: Vec<HashBytes>,
|
||
/// SHA-1 hash of the 8 H2 hashes for each group.
|
||
pub h3_hashes: Vec<HashBytes>,
|
||
}
|
||
|
||
struct HashResult {
|
||
h1_hashes: [HashBytes; 64],
|
||
h2_hashes: [HashBytes; 8],
|
||
h3_hash: HashBytes,
|
||
}
|
||
|
||
impl HashTable {
|
||
fn new(num_sectors: u32) -> Self {
|
||
let num_sectors = num_sectors.next_multiple_of(64) as usize;
|
||
let num_subgroups = num_sectors / 8;
|
||
let num_groups = num_subgroups / 8;
|
||
Self {
|
||
h1_hashes: HashBytes::new_vec_zeroed(num_sectors),
|
||
h2_hashes: HashBytes::new_vec_zeroed(num_subgroups),
|
||
h3_hashes: HashBytes::new_vec_zeroed(num_groups),
|
||
}
|
||
}
|
||
|
||
fn extend(&mut self, group_index: usize, result: &HashResult) {
|
||
let h1_start = group_index * 64;
|
||
self.h1_hashes[h1_start..h1_start + 64].copy_from_slice(&result.h1_hashes);
|
||
let h2_start = group_index * 8;
|
||
self.h2_hashes[h2_start..h2_start + 8].copy_from_slice(&result.h2_hashes);
|
||
self.h3_hashes[group_index] = result.h3_hash;
|
||
}
|
||
}
|
||
|
||
pub struct DiscIOWIA {
|
||
pub header: WIAFileHeader,
|
||
pub disc: WIADisc,
|
||
pub partitions: Vec<WIAPartition>,
|
||
pub raw_data: Vec<WIARawData>,
|
||
pub groups: Vec<RVZGroup>,
|
||
pub filename: PathBuf,
|
||
pub encrypt: bool,
|
||
pub hash_tables: Vec<HashTable>,
|
||
pub nkit_header: Option<NKitHeader>,
|
||
}
|
||
|
||
#[derive(Debug)]
|
||
struct GroupResult {
|
||
/// Offset of the group in the raw disc image.
|
||
disc_offset: u64,
|
||
/// Data offset of the group within a partition, excluding hashes.
|
||
/// Same as `disc_offset` for raw data or GameCube discs.
|
||
partition_offset: u64,
|
||
/// The group.
|
||
group: RVZGroup,
|
||
/// The index of the Wii partition that this group belongs to.
|
||
partition_index: Option<usize>,
|
||
/// Chunk size, differs between Wii and raw data.
|
||
chunk_size: u32,
|
||
/// End offset for the partition or raw data.
|
||
partition_end: u64,
|
||
}
|
||
|
||
#[inline]
|
||
fn hash_bytes(buf: &[u8]) -> HashBytes {
|
||
let mut hasher = Sha1::new();
|
||
hasher.update(buf);
|
||
hasher.finalize().into()
|
||
}
|
||
|
||
fn verify_hash(buf: &[u8], expected: &HashBytes) -> Result<()> {
|
||
let out = hash_bytes(buf);
|
||
if out != *expected {
|
||
let mut got_bytes = [0u8; 40];
|
||
let got = base16ct::lower::encode_str(&out, &mut got_bytes).unwrap(); // Safe: fixed buffer size
|
||
let mut expected_bytes = [0u8; 40];
|
||
let expected = base16ct::lower::encode_str(expected, &mut expected_bytes).unwrap(); // Safe: fixed buffer size
|
||
return Err(Error::DiscFormat(format!(
|
||
"WIA hash mismatch: {}, expected {}",
|
||
got, expected
|
||
)));
|
||
}
|
||
Ok(())
|
||
}
|
||
|
||
impl DiscIOWIA {
|
||
pub fn new(filename: &Path, options: &OpenOptions) -> Result<Self> {
|
||
let mut file = BufReader::new(
|
||
File::open(filename).with_context(|| format!("Opening file {}", filename.display()))?,
|
||
);
|
||
|
||
// Load & verify file header
|
||
let header: WIAFileHeader = read_from(&mut file).context("Reading WIA/RVZ file header")?;
|
||
header.validate()?;
|
||
let is_rvz = header.is_rvz();
|
||
// log::debug!("Header: {:?}", header);
|
||
|
||
// Load & verify disc header
|
||
let mut disc_buf: Vec<u8> = read_vec(&mut file, header.disc_size.get() as usize)
|
||
.context("Reading WIA/RVZ disc header")?;
|
||
verify_hash(&disc_buf, &header.disc_hash)?;
|
||
disc_buf.resize(size_of::<WIADisc>(), 0);
|
||
let mut disc = WIADisc::read_from(disc_buf.as_slice()).unwrap();
|
||
disc.validate()?;
|
||
if !options.rebuild_encryption {
|
||
// If we're not re-encrypting, disable partition encryption in disc header
|
||
disc.disc_head[0x61] = 1;
|
||
}
|
||
// log::debug!("Disc: {:?}", disc);
|
||
|
||
// Read NKit header if present (after disc header)
|
||
let nkit_header = NKitHeader::try_read_from(&mut file);
|
||
|
||
// Load & verify partition headers
|
||
file.seek(SeekFrom::Start(disc.partition_offset.get()))
|
||
.context("Seeking to WIA/RVZ partition headers")?;
|
||
let partitions: Vec<WIAPartition> = read_vec(&mut file, disc.num_partitions.get() as usize)
|
||
.context("Reading WIA/RVZ partition headers")?;
|
||
verify_hash(partitions.as_slice().as_bytes(), &disc.partition_hash)?;
|
||
// log::debug!("Partitions: {:?}", partitions);
|
||
|
||
// Create decompressor
|
||
let mut decompressor = Decompressor::new(&disc)?;
|
||
|
||
// Load raw data headers
|
||
let raw_data: Vec<WIARawData> = {
|
||
file.seek(SeekFrom::Start(disc.raw_data_offset.get()))
|
||
.context("Seeking to WIA/RVZ raw data headers")?;
|
||
let mut reader = decompressor
|
||
.wrap((&mut file).take(disc.raw_data_size.get() as u64))
|
||
.context("Creating WIA/RVZ decompressor")?;
|
||
read_vec(&mut reader, disc.num_raw_data.get() as usize)
|
||
.context("Reading WIA/RVZ raw data headers")?
|
||
};
|
||
// log::debug!("Raw data: {:?}", raw_data);
|
||
|
||
// Load group headers
|
||
let groups = {
|
||
file.seek(SeekFrom::Start(disc.group_offset.get()))
|
||
.context("Seeking to WIA/RVZ group headers")?;
|
||
let mut reader = decompressor
|
||
.wrap((&mut file).take(disc.group_size.get() as u64))
|
||
.context("Creating WIA/RVZ decompressor")?;
|
||
if is_rvz {
|
||
read_vec(&mut reader, disc.num_groups.get() as usize)
|
||
.context("Reading WIA/RVZ group headers")?
|
||
} else {
|
||
let wia_groups: Vec<WIAGroup> =
|
||
read_vec(&mut reader, disc.num_groups.get() as usize)
|
||
.context("Reading WIA/RVZ group headers")?;
|
||
wia_groups.into_iter().map(RVZGroup::from).collect()
|
||
}
|
||
// log::debug!("Groups: {:?}", groups);
|
||
};
|
||
|
||
let mut disc_io = Self {
|
||
header,
|
||
disc,
|
||
partitions,
|
||
raw_data,
|
||
groups,
|
||
filename: filename.to_owned(),
|
||
encrypt: options.rebuild_encryption,
|
||
hash_tables: vec![],
|
||
nkit_header,
|
||
};
|
||
if options.rebuild_hashes {
|
||
disc_io.rebuild_hashes()?;
|
||
}
|
||
Ok(disc_io)
|
||
}
|
||
|
||
fn group_for_offset(&self, offset: u64) -> Option<GroupResult> {
|
||
if let Some((p_idx, pd)) = self.partitions.iter().enumerate().find_map(|(p_idx, p)| {
|
||
p.partition_data
|
||
.iter()
|
||
.find(|pd| {
|
||
let start = pd.first_sector.get() as u64 * SECTOR_SIZE as u64;
|
||
let end = start + pd.num_sectors.get() as u64 * SECTOR_SIZE as u64;
|
||
offset >= start && offset < end
|
||
})
|
||
.map(|pd| (p_idx, pd))
|
||
}) {
|
||
let start = pd.first_sector.get() as u64 * SECTOR_SIZE as u64;
|
||
let group_index = (offset - start) / self.disc.chunk_size.get() as u64;
|
||
if group_index >= pd.num_groups.get() as u64 {
|
||
return None;
|
||
}
|
||
let disc_offset = start + group_index * self.disc.chunk_size.get() as u64;
|
||
let chunk_size =
|
||
(self.disc.chunk_size.get() as u64 * BLOCK_SIZE as u64) / SECTOR_SIZE as u64;
|
||
let partition_offset = group_index * chunk_size;
|
||
let partition_end = pd.num_sectors.get() as u64 * BLOCK_SIZE as u64;
|
||
self.groups.get(pd.group_index.get() as usize + group_index as usize).map(|g| {
|
||
GroupResult {
|
||
disc_offset,
|
||
partition_offset,
|
||
group: g.clone(),
|
||
partition_index: Some(p_idx),
|
||
chunk_size: chunk_size as u32,
|
||
partition_end,
|
||
}
|
||
})
|
||
} else if let Some(d) = self.raw_data.iter().find(|d| {
|
||
let start = d.raw_data_offset.get() & !0x7FFF;
|
||
let end = d.raw_data_offset.get() + d.raw_data_size.get();
|
||
offset >= start && offset < end
|
||
}) {
|
||
let start = d.raw_data_offset.get() & !0x7FFF;
|
||
let end = d.raw_data_offset.get() + d.raw_data_size.get();
|
||
let group_index = (offset - start) / self.disc.chunk_size.get() as u64;
|
||
if group_index >= d.num_groups.get() as u64 {
|
||
return None;
|
||
}
|
||
let disc_offset = start + group_index * self.disc.chunk_size.get() as u64;
|
||
self.groups.get(d.group_index.get() as usize + group_index as usize).map(|g| {
|
||
GroupResult {
|
||
disc_offset,
|
||
partition_offset: disc_offset,
|
||
group: g.clone(),
|
||
partition_index: None,
|
||
chunk_size: self.disc.chunk_size.get(),
|
||
partition_end: end,
|
||
}
|
||
})
|
||
} else {
|
||
None
|
||
}
|
||
}
|
||
|
||
pub fn rebuild_hashes(&mut self) -> Result<()> {
|
||
const NUM_H0_HASHES: usize = BLOCK_SIZE / HASHES_SIZE;
|
||
const H0_HASHES_SIZE: usize = size_of::<HashBytes>() * NUM_H0_HASHES;
|
||
|
||
let start = Instant::now();
|
||
|
||
// Precompute hashes for zeroed sectors.
|
||
const ZERO_H0_BYTES: &[u8] = &[0u8; HASHES_SIZE];
|
||
let zero_h0_hash = hash_bytes(ZERO_H0_BYTES);
|
||
let mut zero_h1_hash = Sha1::new();
|
||
for _ in 0..NUM_H0_HASHES {
|
||
zero_h1_hash.update(zero_h0_hash);
|
||
}
|
||
let zero_h1_hash: HashBytes = zero_h1_hash.finalize().into();
|
||
|
||
let mut hash_tables = Vec::with_capacity(self.partitions.len());
|
||
for part in &self.partitions {
|
||
let first_sector = part.partition_data[0].first_sector.get();
|
||
if first_sector + part.partition_data[0].num_sectors.get()
|
||
!= part.partition_data[1].first_sector.get()
|
||
{
|
||
return Err(Error::DiscFormat(format!(
|
||
"Partition data is not contiguous: {}..{} != {}",
|
||
first_sector,
|
||
first_sector + part.partition_data[0].num_sectors.get(),
|
||
part.partition_data[1].first_sector.get()
|
||
)));
|
||
}
|
||
|
||
let part_sectors =
|
||
part.partition_data[0].num_sectors.get() + part.partition_data[1].num_sectors.get();
|
||
let hash_table = HashTable::new(part_sectors);
|
||
log::debug!(
|
||
"Rebuilding hashes: {} sectors, {} subgroups, {} groups",
|
||
hash_table.h1_hashes.len(),
|
||
hash_table.h2_hashes.len(),
|
||
hash_table.h3_hashes.len()
|
||
);
|
||
|
||
let group_count = hash_table.h3_hashes.len();
|
||
let mutex = Arc::new(Mutex::new(hash_table));
|
||
(0..group_count).into_par_iter().try_for_each_init(
|
||
|| (WIAReadStream::new(self, false), mutex.clone()),
|
||
|(stream, mutex), h3_index| -> Result<()> {
|
||
let stream = stream.as_mut().map_err(|_| {
|
||
Error::DiscFormat("Failed to create read stream".to_string())
|
||
})?;
|
||
let mut result = HashResult {
|
||
h1_hashes: [HashBytes::default(); 64],
|
||
h2_hashes: [HashBytes::default(); 8],
|
||
h3_hash: HashBytes::default(),
|
||
};
|
||
let mut h0_buf = [0u8; H0_HASHES_SIZE];
|
||
let mut h3_hasher = Sha1::new();
|
||
for h2_index in 0..8 {
|
||
let mut h2_hasher = Sha1::new();
|
||
for h1_index in 0..8 {
|
||
let part_sector =
|
||
h1_index as u32 + h2_index as u32 * 8 + h3_index as u32 * 64;
|
||
let h1_hash = if part_sector >= part_sectors {
|
||
zero_h1_hash
|
||
} else {
|
||
let sector = first_sector + part_sector;
|
||
stream
|
||
.seek(SeekFrom::Start(sector as u64 * SECTOR_SIZE as u64))
|
||
.with_context(|| format!("Seeking to sector {}", sector))?;
|
||
stream
|
||
.read_exact(&mut h0_buf)
|
||
.with_context(|| format!("Reading sector {}", sector))?;
|
||
hash_bytes(&h0_buf)
|
||
};
|
||
result.h1_hashes[h1_index + h2_index * 8] = h1_hash;
|
||
h2_hasher.update(h1_hash);
|
||
}
|
||
let h2_hash = h2_hasher.finalize().into();
|
||
result.h2_hashes[h2_index] = h2_hash;
|
||
h3_hasher.update(h2_hash);
|
||
}
|
||
result.h3_hash = h3_hasher.finalize().into();
|
||
let mut hash_table = mutex.lock().map_err(|_| "Failed to lock mutex")?;
|
||
hash_table.extend(h3_index, &result);
|
||
Ok(())
|
||
},
|
||
)?;
|
||
|
||
let hash_table = Arc::try_unwrap(mutex)
|
||
.map_err(|_| "Failed to unwrap Arc")?
|
||
.into_inner()
|
||
.map_err(|_| "Failed to lock mutex")?;
|
||
hash_tables.push(hash_table);
|
||
}
|
||
self.hash_tables = hash_tables;
|
||
log::info!("Rebuilt hashes in {:?}", start.elapsed());
|
||
Ok(())
|
||
}
|
||
}
|
||
|
||
impl DiscIO for DiscIOWIA {
|
||
fn open(&self) -> Result<Box<dyn ReadStream + '_>> {
|
||
Ok(Box::new(WIAReadStream::new(self, self.encrypt)?))
|
||
}
|
||
|
||
fn meta(&self) -> Result<DiscMeta> {
|
||
Ok(self.nkit_header.as_ref().map(DiscMeta::from).unwrap_or_default())
|
||
}
|
||
|
||
fn disc_size(&self) -> Option<u64> { Some(self.header.iso_file_size.get()) }
|
||
}
|
||
|
||
pub struct WIAReadStream<'a> {
|
||
/// The disc IO.
|
||
disc_io: &'a DiscIOWIA,
|
||
/// The currently open file handle.
|
||
file: BufReader<File>,
|
||
/// The data read offset.
|
||
offset: u64,
|
||
/// The data offset of the current group.
|
||
group_offset: u64,
|
||
/// The current group data.
|
||
group_data: Vec<u8>,
|
||
/// Exception lists for the current group.
|
||
exception_lists: Vec<WIAExceptionList>,
|
||
/// The decompressor data.
|
||
decompressor: Decompressor,
|
||
/// Whether to re-encrypt Wii partition data.
|
||
encrypt: bool,
|
||
}
|
||
|
||
fn read_exception_lists<R>(
|
||
reader: &mut R,
|
||
partition_index: Option<usize>,
|
||
chunk_size: u32,
|
||
) -> io::Result<Vec<WIAExceptionList>>
|
||
where
|
||
R: Read + ?Sized,
|
||
{
|
||
if partition_index.is_none() {
|
||
return Ok(vec![]);
|
||
}
|
||
|
||
let num_exception_list = (chunk_size as usize).div_ceil(0x200000);
|
||
// log::debug!("Num exception list: {:?}", num_exception_list);
|
||
let mut exception_lists = Vec::with_capacity(num_exception_list);
|
||
for i in 0..num_exception_list {
|
||
let num_exceptions = read_u16_be(reader)?;
|
||
let exceptions: Vec<WIAException> = read_vec(reader, num_exceptions as usize)?;
|
||
if !exceptions.is_empty() {
|
||
log::debug!("Exception list {}: {:?}", i, exceptions);
|
||
}
|
||
exception_lists.push(exceptions);
|
||
}
|
||
Ok(exception_lists)
|
||
}
|
||
|
||
impl<'a> WIAReadStream<'a> {
|
||
pub fn new(disc_io: &'a DiscIOWIA, encrypt: bool) -> Result<Self> {
|
||
let file = BufReader::new(
|
||
File::open(&disc_io.filename)
|
||
.with_context(|| format!("Opening file {}", disc_io.filename.display()))?,
|
||
);
|
||
let decompressor = Decompressor::new(&disc_io.disc)?;
|
||
let stream = Self {
|
||
disc_io,
|
||
file,
|
||
offset: 0,
|
||
group_offset: u64::MAX,
|
||
group_data: Vec::new(),
|
||
exception_lists: vec![],
|
||
decompressor,
|
||
encrypt,
|
||
};
|
||
Ok(stream)
|
||
}
|
||
|
||
/// If the current group does not contain the current offset, load the new group.
|
||
/// Returns false if the offset is not in the disc.
|
||
fn check_group(&mut self) -> io::Result<bool> {
|
||
if self.offset < self.group_offset
|
||
|| self.offset >= self.group_offset + self.group_data.len() as u64
|
||
{
|
||
let Some(result) = self.disc_io.group_for_offset(self.offset) else {
|
||
return Ok(false);
|
||
};
|
||
if result.disc_offset == self.group_offset {
|
||
return Err(io::Error::new(
|
||
io::ErrorKind::InvalidData,
|
||
"Group offset did not change",
|
||
));
|
||
}
|
||
self.group_offset = result.disc_offset;
|
||
self.read_group(result)?;
|
||
}
|
||
Ok(true)
|
||
}
|
||
|
||
/// Reads new group data into the buffer, handling decompression and RVZ packing.
|
||
fn read_group(&mut self, result: GroupResult) -> io::Result<()> {
|
||
// Special case for all-zero data
|
||
if result.group.data_size() == 0 {
|
||
self.exception_lists.clear();
|
||
let size = min(result.chunk_size as u64, result.partition_end - result.partition_offset)
|
||
as usize;
|
||
self.group_data = vec![0u8; size];
|
||
self.recalculate_hashes(result)?;
|
||
return Ok(());
|
||
}
|
||
|
||
self.group_data = Vec::with_capacity(result.chunk_size as usize);
|
||
let group_data_start = result.group.data_offset.get() as u64 * 4;
|
||
self.file.seek(SeekFrom::Start(group_data_start))?;
|
||
|
||
let mut reader = (&mut self.file).take_seek(result.group.data_size() as u64);
|
||
let uncompressed_exception_lists =
|
||
matches!(self.disc_io.disc.compression(), Compression::None | Compression::Purge)
|
||
|| !result.group.is_compressed();
|
||
if uncompressed_exception_lists {
|
||
self.exception_lists = read_exception_lists(
|
||
&mut reader,
|
||
result.partition_index,
|
||
self.disc_io.disc.chunk_size.get(), // result.chunk_size?
|
||
)?;
|
||
// Align to 4
|
||
let rem = reader.stream_position()? % 4;
|
||
if rem != 0 {
|
||
reader.seek(SeekFrom::Current((4 - rem) as i64))?;
|
||
}
|
||
}
|
||
let mut reader: Box<dyn Read> = if result.group.is_compressed() {
|
||
self.decompressor.wrap(reader)?
|
||
} else {
|
||
Box::new(reader)
|
||
};
|
||
if !uncompressed_exception_lists {
|
||
self.exception_lists = read_exception_lists(
|
||
reader.as_mut(),
|
||
result.partition_index,
|
||
self.disc_io.disc.chunk_size.get(), // result.chunk_size?
|
||
)?;
|
||
}
|
||
|
||
if result.group.rvz_packed_size.get() > 0 {
|
||
// Decode RVZ packed data
|
||
let mut lfg = LaggedFibonacci::default();
|
||
loop {
|
||
let mut size_bytes = [0u8; 4];
|
||
match reader.read_exact(&mut size_bytes) {
|
||
Ok(_) => {}
|
||
Err(e) if e.kind() == io::ErrorKind::UnexpectedEof => break,
|
||
Err(e) => {
|
||
return Err(io::Error::new(e.kind(), "Failed to read RVZ packed size"));
|
||
}
|
||
}
|
||
let size = u32::from_be_bytes(size_bytes);
|
||
let cur_data_len = self.group_data.len();
|
||
if size & 0x80000000 != 0 {
|
||
// Junk data
|
||
let size = size & 0x7FFFFFFF;
|
||
lfg.init_with_reader(reader.as_mut())?;
|
||
lfg.skip(
|
||
((result.partition_offset + cur_data_len as u64) % SECTOR_SIZE as u64)
|
||
as usize,
|
||
);
|
||
self.group_data.resize(cur_data_len + size as usize, 0);
|
||
lfg.fill(&mut self.group_data[cur_data_len..]);
|
||
} else {
|
||
// Real data
|
||
self.group_data.resize(cur_data_len + size as usize, 0);
|
||
reader.read_exact(&mut self.group_data[cur_data_len..])?;
|
||
}
|
||
}
|
||
} else {
|
||
// Read and decompress data
|
||
reader.read_to_end(&mut self.group_data)?;
|
||
}
|
||
|
||
drop(reader);
|
||
self.recalculate_hashes(result)?;
|
||
Ok(())
|
||
}
|
||
|
||
fn recalculate_hashes(&mut self, result: GroupResult) -> io::Result<()> {
|
||
let Some(partition_index) = result.partition_index else {
|
||
// Data not inside a Wii partition
|
||
return Ok(());
|
||
};
|
||
let hash_table = self.disc_io.hash_tables.get(partition_index);
|
||
|
||
if self.group_data.len() % BLOCK_SIZE != 0 {
|
||
return Err(io::Error::new(
|
||
io::ErrorKind::InvalidData,
|
||
format!("Invalid group data size: {:#X}", self.group_data.len()),
|
||
));
|
||
}
|
||
|
||
// WIA/RVZ excludes the hash data for each sector, instead storing all data contiguously.
|
||
// We need to add space for the hash data, and then recalculate the hashes.
|
||
let num_sectors = self.group_data.len() / BLOCK_SIZE;
|
||
let mut out = vec![0u8; num_sectors * SECTOR_SIZE];
|
||
for i in 0..num_sectors {
|
||
let data = array_ref![self.group_data, i * BLOCK_SIZE, BLOCK_SIZE];
|
||
let out = array_ref_mut![out, i * SECTOR_SIZE, SECTOR_SIZE];
|
||
|
||
// Rebuild H0 hashes
|
||
for n in 0..31 {
|
||
let hash = hash_bytes(array_ref![data, n * 0x400, 0x400]);
|
||
array_ref_mut![out, n * 20, 20].copy_from_slice(&hash);
|
||
}
|
||
|
||
// Copy data
|
||
array_ref_mut![out, 0x400, BLOCK_SIZE].copy_from_slice(data);
|
||
|
||
// Rebuild H1 and H2 hashes if available
|
||
if let Some(hash_table) = hash_table {
|
||
let partition = &self.disc_io.partitions[partition_index];
|
||
let part_sector = (result.disc_offset / SECTOR_SIZE as u64) as usize + i
|
||
- partition.partition_data[0].first_sector.get() as usize;
|
||
let h1_start = part_sector & !7;
|
||
for i in 0..8 {
|
||
array_ref_mut![out, 0x280 + i * 20, 20]
|
||
.copy_from_slice(&hash_table.h1_hashes[h1_start + i]);
|
||
}
|
||
let h2_start = (h1_start / 8) & !7;
|
||
for i in 0..8 {
|
||
array_ref_mut![out, 0x340 + i * 20, 20]
|
||
.copy_from_slice(&hash_table.h2_hashes[h2_start + i]);
|
||
}
|
||
|
||
if self.encrypt {
|
||
// Re-encrypt hashes and data
|
||
aes_encrypt(&partition.partition_key, [0u8; 16], &mut out[..HASHES_SIZE]);
|
||
let iv = *array_ref![out, 0x3d0, 16];
|
||
aes_encrypt(&partition.partition_key, iv, &mut out[HASHES_SIZE..]);
|
||
}
|
||
}
|
||
}
|
||
|
||
self.group_data = out;
|
||
Ok(())
|
||
}
|
||
}
|
||
|
||
impl<'a> Read for WIAReadStream<'a> {
|
||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||
let mut rem = buf.len();
|
||
let mut read: usize = 0;
|
||
|
||
// Special case: First 0x80 bytes are stored in the disc header
|
||
if self.offset < DISC_HEAD_SIZE as u64 {
|
||
let to_read = min(rem, DISC_HEAD_SIZE);
|
||
buf[read..read + to_read].copy_from_slice(
|
||
&self.disc_io.disc.disc_head[self.offset as usize..self.offset as usize + to_read],
|
||
);
|
||
rem -= to_read;
|
||
read += to_read;
|
||
self.offset += to_read as u64;
|
||
}
|
||
|
||
// Decompress groups and read data
|
||
while rem > 0 {
|
||
if !self.check_group()? {
|
||
break;
|
||
}
|
||
let group_offset = (self.offset - self.group_offset) as usize;
|
||
let to_read = min(rem, self.group_data.len() - group_offset);
|
||
buf[read..read + to_read]
|
||
.copy_from_slice(&self.group_data[group_offset..group_offset + to_read]);
|
||
rem -= to_read;
|
||
read += to_read;
|
||
self.offset += to_read as u64;
|
||
}
|
||
Ok(read)
|
||
}
|
||
}
|
||
|
||
impl<'a> Seek for WIAReadStream<'a> {
|
||
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
|
||
self.offset = match pos {
|
||
SeekFrom::Start(v) => v,
|
||
SeekFrom::End(v) => self.disc_io.header.iso_file_size.get().saturating_add_signed(v),
|
||
SeekFrom::Current(v) => self.offset.saturating_add_signed(v),
|
||
};
|
||
self.check_group()?;
|
||
Ok(self.offset)
|
||
}
|
||
|
||
fn stream_position(&mut self) -> io::Result<u64> { Ok(self.offset) }
|
||
}
|
||
|
||
impl<'a> ReadStream for WIAReadStream<'a> {
|
||
fn stable_stream_len(&mut self) -> io::Result<u64> {
|
||
Ok(self.disc_io.header.iso_file_size.get())
|
||
}
|
||
|
||
fn as_dyn(&mut self) -> &mut dyn ReadStream { self }
|
||
}
|