Initial commit

This commit is contained in:
Luke Street 2021-08-23 09:48:35 -04:00
commit 20bed46d43
16 changed files with 1910 additions and 0 deletions

28
.github/workflows/build.yaml vendored Normal file
View File

@ -0,0 +1,28 @@
name: build
on: [ push, pull_request ]
jobs:
default:
name: Default
strategy:
matrix:
platform: [ ubuntu-latest, macos-latest, windows-latest ]
toolchain: [ stable, 1.35.0, nightly ]
runs-on: ${{ matrix.platform }}
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.toolchain }}
override: true
- uses: actions-rs/cargo@v1
with:
command: build
args: --release --all-features
- uses: actions/upload-artifact@v2
with:
name: ${{ matrix.platform }}
path: |
target/release/nodtool
target/release/nodtool.exe

2
.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
/target
Cargo.lock

28
Cargo.toml Normal file
View File

@ -0,0 +1,28 @@
[package]
name = "nod"
version = "0.1.0"
edition = "2018"
authors = ["Luke Street <luke@street.dev>"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/encounter/nod-rs"
documentation = "https://docs.rs/nod"
readme = "README.md"
description = """
Rust library and CLI tool for reading GameCube and Wii disc images.
"""
[[bin]]
name = "nodtool"
path = "src/bin.rs"
[profile.release]
lto = true
[dependencies]
aes = "0.7.4"
binread = "2.1.1"
block-modes = "0.8.1"
clap = "2.33.3"
encoding_rs = "0.8.28"
file-size = "1.0.3"
sha-1 = "0.9.7"

201
LICENSE-APACHE Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2018, 2019, 2020 Michael Sanders.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

21
LICENSE-MIT Normal file
View File

@ -0,0 +1,21 @@
MIT License
Copyright 2018, 2019, 2020 Michael Sanders.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

66
README.md Normal file
View File

@ -0,0 +1,66 @@
# nod-rs [![Build Status]][actions] [![Latest Version]][crates.io] [![Api Rustdoc]][rustdoc] [![Rust](https://img.shields.io/badge/rust-1.35%2B-blue.svg?maxAge=3600)](https://github.com/eqrion/cbindgen)
[Build Status]: https://github.com/encounter/nod-rs/workflows/build/badge.svg
[actions]: https://github.com/encounter/nod-rs/actions
[Latest Version]: https://img.shields.io/crates/v/nod.svg
[crates.io]: https://crates.io/crates/nod
[Api Rustdoc]: https://img.shields.io/badge/api-rustdoc-blue.svg
[rustdoc]: https://docs.rs/nod
Library for traversing & reading GameCube and Wii disc images.
Based on the C++ library [nod](https://github.com/AxioDL/nod),
but does not currently support authoring.
Currently supported file formats:
- ISO
- NFS (Wii U VC files, e.g. `hif_000000.nfs`)
### CLI tool
This crate includes a CLI tool `nodtool`, which can be used to extract disc images to a specified directory:
```shell
nodtool extract /path/to/game.iso [outdir]
```
For Wii U VC titles, use `content/hif_*.nfs`:
```shell
nodtool extract /path/to/game/content/hif_000000.nfs [outdir]
```
### Library example
Opening a disc image and reading a file:
```rust
use nod::disc::{new_disc_base, PartHeader};
use nod::fst::NodeType;
use nod::io::new_disc_io;
use std::io::Read;
let mut disc_io = new_disc_io("path/to/file".as_ref())?;
let disc_base = new_disc_base(disc_io.as_mut())?;
let mut partition = disc_base.get_data_partition(disc_io.as_mut())?;
let header = partition.read_header()?;
if let Some(NodeType::File(node)) = header.find_node("/MP3/Worlds.txt") {
let mut s = String::new();
partition.begin_file_stream(node)?.read_to_string(&mut s);
println!(s);
}
```
### License
Licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
at your option.
### Contribution
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any
additional terms or conditions.

97
src/bin.rs Normal file
View File

@ -0,0 +1,97 @@
#![feature(with_options)]
use std::{env, fs, io};
use std::io::BufWriter;
use std::path::{Path, PathBuf};
use std::time::Instant;
use clap::{AppSettings, clap_app};
use file_size;
use nod::Result;
use nod::disc::{new_disc_base, PartReadStream};
use nod::fst::NodeType;
use nod::io::{has_extension, new_disc_io};
fn main() -> Result<()> {
let matches = clap_app!(nodtool =>
(settings: &[
AppSettings::SubcommandRequiredElseHelp,
AppSettings::GlobalVersion,
AppSettings::DeriveDisplayOrder,
AppSettings::VersionlessSubcommands,
])
(global_settings: &[
AppSettings::ColoredHelp,
AppSettings::UnifiedHelpMessage,
])
(version: env!("CARGO_PKG_VERSION"))
(author: "Luke Street <luke@street.dev>")
(about: "Tool for reading GameCube and Wii disc images.")
(long_about: "Tool for reading GameCube and Wii disc images.
Based on <https://github.com/AxioDL/nod>, original authors:
Jack Andersen (jackoalan)
Phillip Stephens (Antidote)")
(@subcommand extract =>
(about: "Extract GameCube & Wii disc images")
(@arg FILE: +required "Path to disc image (ISO or NFS)")
(@arg DIR: "Output directory (optional)")
(@arg quiet: -q "Quiet output")
)
).get_matches();
if let Some(matches) = matches.subcommand_matches("extract") {
let file: PathBuf = PathBuf::from(matches.value_of("FILE").unwrap());
let output_dir: PathBuf;
if let Some(dir) = matches.value_of("DIR") {
output_dir = PathBuf::from(dir);
} else if has_extension(file.as_path(), "nfs") {
// Special logic to extract from content/hif_*.nfs to extracted/..
if let Some(parent) = file.parent() {
output_dir = parent.with_file_name("extracted");
} else {
output_dir = file.with_extension("");
}
} else {
output_dir = file.with_extension("");
}
let mut disc_io = new_disc_io(file.as_path())?;
let disc_base = new_disc_base(disc_io.as_mut())?;
let mut partition = disc_base.get_data_partition(disc_io.as_mut())?;
let header = partition.read_header()?;
extract_node(header.root_node(), partition.as_mut(), output_dir.as_path())?;
}
Result::Ok(())
}
fn extract_node(node: &NodeType, partition: &mut dyn PartReadStream, base_path: &Path) -> io::Result<()> {
match node {
NodeType::File(v) => {
let mut file_path = base_path.to_owned();
file_path.push(v.name.as_ref());
print!("Extracted {}", file_path.to_string_lossy());
let now = Instant::now();
let file = fs::File::create(file_path)?;
let mut buf_writer = BufWriter::with_capacity(partition.ideal_buffer_size(), file);
io::copy(&mut partition.begin_file_stream(v)?, &mut buf_writer)?;
let elapsed = now.elapsed();
println!(" (time: {:.2?}, size: {})", elapsed, file_size::fit_4(v.length as u64));
}
NodeType::Directory(v, c) => {
if v.name.is_empty() {
fs::create_dir_all(base_path)?;
for x in c {
extract_node(x, partition, base_path)?;
}
} else {
let mut new_base = base_path.to_owned();
new_base.push(v.name.as_ref());
fs::create_dir_all(&new_base)?;
for x in c {
extract_node(x, partition, new_base.as_path())?;
}
}
}
}
io::Result::Ok(())
}

139
src/disc/gcn.rs Normal file
View File

@ -0,0 +1,139 @@
use std::io;
use std::io::{Read, Seek, SeekFrom};
use binread::prelude::*;
use crate::{div_rem, Result};
use crate::disc::{BI2Header, BUFFER_SIZE, DiscBase, DiscIO, Header, PartHeader, PartReadStream};
use crate::fst::{find_node, Node, node_parser, NodeKind, NodeType};
use crate::streams::{ReadStream, SharedWindowedReadStream};
pub(crate) struct DiscGCN {
pub(crate) header: Header,
}
pub(crate) fn new_disc_gcn(header: Header) -> Result<DiscGCN> {
Result::Ok(DiscGCN {
header
})
}
impl DiscBase for DiscGCN {
fn get_header(&self) -> &Header {
&self.header
}
fn get_data_partition<'a>(&self, disc_io: &'a mut dyn DiscIO) -> Result<Box<dyn PartReadStream + 'a>> {
Result::Ok(Box::from(GCPartReadStream {
stream: disc_io.begin_read_stream(0)?,
offset: 0,
cur_block: u64::MAX,
buf: [0; BUFFER_SIZE],
}))
}
}
struct GCPartReadStream<'a> {
stream: Box<dyn ReadStream + 'a>,
offset: u64,
cur_block: u64,
buf: [u8; BUFFER_SIZE],
}
impl<'a> Read for GCPartReadStream<'a> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let (mut block, mut block_offset) = div_rem(self.offset as usize, BUFFER_SIZE);
let mut rem = buf.len();
let mut read: usize = 0;
while rem > 0 {
if block != self.cur_block as usize {
self.stream.read(&mut self.buf)?;
self.cur_block = block as u64;
}
let mut cache_size = rem;
if cache_size + block_offset > BUFFER_SIZE {
cache_size = BUFFER_SIZE - block_offset;
}
buf[read..read + cache_size]
.copy_from_slice(&self.buf[block_offset..block_offset + cache_size]);
read += cache_size;
rem -= cache_size;
block_offset = 0;
block += 1;
}
self.offset += buf.len() as u64;
io::Result::Ok(buf.len())
}
}
impl<'a> Seek for GCPartReadStream<'a> {
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
self.offset = match pos {
SeekFrom::Start(v) => v,
SeekFrom::End(v) => (self.stream_len()? as i64 + v) as u64,
SeekFrom::Current(v) => (self.offset as i64 + v) as u64,
};
let block = self.offset / BUFFER_SIZE as u64;
if block != self.cur_block {
self.stream.seek(SeekFrom::Start(block * BUFFER_SIZE as u64))?;
self.cur_block = u64::MAX;
}
io::Result::Ok(self.offset)
}
fn stream_len(&mut self) -> io::Result<u64> {
self.stream.stream_len()
}
fn stream_position(&mut self) -> io::Result<u64> {
io::Result::Ok(self.offset)
}
}
impl<'a> ReadStream for GCPartReadStream<'a> {}
impl<'a> PartReadStream for GCPartReadStream<'a> {
fn begin_file_stream(&mut self, node: &Node) -> io::Result<SharedWindowedReadStream> {
assert_eq!(node.kind, NodeKind::File);
let offset = node.offset as u64;
self.seek(SeekFrom::Start(offset))?;
io::Result::Ok(SharedWindowedReadStream {
base: self,
begin: offset,
end: offset + node.length as u64,
})
}
fn read_header(&mut self) -> Result<Box<dyn PartHeader>> {
self.seek(SeekFrom::Start(0))?;
Result::Ok(Box::from(self.read_be::<GCPartition>()?))
}
fn ideal_buffer_size(&self) -> usize {
BUFFER_SIZE
}
}
#[derive(Clone, Debug, PartialEq, BinRead)]
pub(crate) struct GCPartition {
header: Header,
bi2_header: BI2Header,
#[br(seek_before = SeekFrom::Start(header.fst_off as u64))]
#[br(parse_with = node_parser)]
root_node: NodeType,
}
impl PartHeader for GCPartition {
fn root_node(&self) -> &NodeType {
&self.root_node
}
fn find_node(&self, path: &str) -> Option<&NodeType> {
find_node(&self.root_node, path)
}
}

182
src/disc/mod.rs Normal file
View File

@ -0,0 +1,182 @@
//! Disc type related logic (GameCube, Wii)
use std::fmt::Debug;
use std::io;
use binread::{BinReaderExt, NullString, prelude::*};
use crate::{Error, Result};
use crate::disc::{gcn::new_disc_gcn, wii::new_disc_wii};
use crate::fst::{Node, NodeType};
use crate::io::DiscIO;
use crate::streams::{ReadStream, SharedWindowedReadStream};
pub(crate) mod gcn;
pub(crate) mod wii;
/// Shared GameCube & Wii disc header
#[derive(Clone, Debug, PartialEq, BinRead)]
pub struct Header {
pub game_id: [u8; 6],
/// Used in multi-disc games
pub disc_num: u8,
pub disc_version: u8,
pub audio_streaming: u8,
pub audio_stream_buf_size: u8,
#[br(pad_before(14))]
/// If this is a Wii disc, this will be 0x5D1C9EA3
pub wii_magic: u32,
/// If this is a GameCube disc, this will be 0xC2339F3D
pub gcn_magic: u32,
#[br(pad_size_to(64), map = NullString::into_string)]
pub game_title: String,
/// Disable hash verification
pub disable_hash_verification: u8,
/// Disable disc encryption and H3 hash table loading and verification
pub disable_disc_enc: u8,
#[br(pad_before(0x39e))]
pub debug_mon_off: u32,
pub debug_load_addr: u32,
#[br(pad_before(0x18))]
/// Offset to main DOL (Wii: >> 2)
pub dol_off: u32,
/// Offset to file system table (Wii: >> 2)
pub fst_off: u32,
/// File system size
pub fst_sz: u32,
/// File system max size
pub fst_max_sz: u32,
pub fst_memory_address: u32,
pub user_position: u32,
#[br(pad_after(4))]
pub user_sz: u32,
}
#[derive(Debug, PartialEq, BinRead, Copy, Clone)]
pub(crate) struct BI2Header {
pub(crate) debug_monitor_size: i32,
pub(crate) sim_mem_size: i32,
pub(crate) arg_offset: u32,
pub(crate) debug_flag: u32,
pub(crate) trk_address: u32,
pub(crate) trk_size: u32,
pub(crate) country_code: u32,
pub(crate) unk1: u32,
pub(crate) unk2: u32,
pub(crate) unk3: u32,
pub(crate) dol_limit: u32,
#[br(pad_after(0x1fd0))]
pub(crate) unk4: u32,
}
pub(crate) const BUFFER_SIZE: usize = 0x8000;
/// Contains a disc's header & partition information.
pub trait DiscBase {
/// Retrieves the disc's header.
fn get_header(&self) -> &Header;
/// Opens a new partition read stream for the first data partition.
///
/// # Examples
///
/// Basic usage:
/// ```
/// use nod::disc::new_disc_base;
/// use nod::io::new_disc_io;
///
/// let mut disc_io = new_disc_io("path/to/file".as_ref())?;
/// let disc_base = new_disc_base(disc_io.as_mut())?;
/// let mut partition = disc_base.get_data_partition(disc_io.as_mut())?;
/// ```
fn get_data_partition<'a>(&self, disc_io: &'a mut dyn DiscIO) -> Result<Box<dyn PartReadStream + 'a>>;
}
/// Creates a new [`DiscBase`] instance.
///
/// # Examples
///
/// Basic usage:
/// ```
/// use nod::io::new_disc_io;
/// use nod::disc::new_disc_base;
///
/// let mut disc_io = new_disc_io("path/to/file".as_ref())?;
/// let disc_base = new_disc_base(disc_io.as_mut())?;
/// disc_base.get_header();
/// ```
pub fn new_disc_base(disc_io: &mut dyn DiscIO) -> Result<Box<dyn DiscBase>> {
let mut stream = disc_io.begin_read_stream(0)?;
let header: Header = stream.read_be()?;
if header.wii_magic == 0x5D1C9EA3 {
Result::Ok(Box::from(new_disc_wii(stream.as_mut(), header)?))
} else if header.gcn_magic == 0xC2339F3D {
Result::Ok(Box::from(new_disc_gcn(header)?))
} else {
Result::Err(Error::DiscFormat("Invalid GC/Wii magic".to_string()))
}
}
/// An open read stream for a disc partition.
pub trait PartReadStream: ReadStream {
/// Seeks the read stream to the specified file system node
/// and returns a windowed stream.
///
/// # Examples
///
/// Basic usage:
/// ```
/// use nod::disc::{new_disc_base, PartHeader};
/// use nod::fst::NodeType;
/// use nod::io::new_disc_io;
/// use std::io::Read;
///
/// let mut disc_io = new_disc_io("path/to/file".as_ref())?;
/// let disc_base = new_disc_base(disc_io.as_mut())?;
/// let mut partition = disc_base.get_data_partition(disc_io.as_mut())?;
/// let header = partition.read_header()?;
/// if let Some(NodeType::File(node)) = header.find_node("/MP3/Worlds.txt") {
/// let mut s = String::new();
/// partition.begin_file_stream(node)?.read_to_string(&mut s);
/// println!(s);
/// }
/// ```
fn begin_file_stream(&mut self, node: &Node) -> io::Result<SharedWindowedReadStream>;
/// Reads the partition header and file system table.
fn read_header(&mut self) -> Result<Box<dyn PartHeader>>;
/// The ideal size for buffered reads from this partition.
/// GameCube discs have a data block size of 0x8000,
/// whereas Wii discs have a data block size of 0x7c00.
fn ideal_buffer_size(&self) -> usize;
}
/// Disc partition header with file system table.
pub trait PartHeader: Debug {
/// The root node for the filesystem.
fn root_node(&self) -> &NodeType;
/// Finds a particular file or directory by path.
///
/// # Examples
///
/// Basic usage:
/// ```
/// use nod::disc::{new_disc_base, PartHeader};
/// use nod::fst::NodeType;
/// use nod::io::new_disc_io;
///
/// let mut disc_io = new_disc_io("path/to/file".as_ref())?;
/// let disc_base = new_disc_base(disc_io.as_mut())?;
/// let mut partition = disc_base.get_data_partition(disc_io.as_mut())?;
/// let header = partition.read_header()?;
/// if let Some(NodeType::File(node)) = header.find_node("/MP1/Metroid1.pak") {
/// println!(node.name);
/// }
/// if let Some(NodeType::Directory(node, children)) = header.find_node("/MP1") {
/// println!("Number of files: {}", children.len());
/// }
/// ```
fn find_node(&self, path: &str) -> Option<&NodeType>;
}

403
src/disc/wii.rs Normal file
View File

@ -0,0 +1,403 @@
use std::{io, io::{Read, Seek, SeekFrom}};
use aes::{Aes128, NewBlockCipher, Block};
use binread::prelude::*;
use block_modes::{block_padding::NoPadding, BlockMode, Cbc};
use sha1::{digest, Digest, Sha1};
use crate::disc::{BI2Header, BUFFER_SIZE, DiscBase, DiscIO, Header, PartHeader, PartReadStream};
use crate::{Error, div_rem, Result, array_ref};
use crate::fst::{find_node, Node, NodeKind, NodeType, node_parser};
use crate::streams::{OwningWindowedReadStream, ReadStream, SharedWindowedReadStream};
type Aes128Cbc = Cbc<Aes128, NoPadding>;
const BLOCK_SIZE: usize = 0x7c00;
const BUFFER_OFFSET: usize = BUFFER_SIZE - BLOCK_SIZE;
const COMMON_KEYS: [[u8; 16]; 2] = [
/* Normal */
[0xeb, 0xe4, 0x2a, 0x22, 0x5e, 0x85, 0x93, 0xe4, 0x48, 0xd9, 0xc5, 0x45, 0x73, 0x81, 0xaa, 0xf7],
/* Korean */
[0x63, 0xb8, 0x2b, 0xb4, 0xf4, 0x61, 0x4e, 0x2e, 0x13, 0xf2, 0xfe, 0xfb, 0xba, 0x4c, 0x9b, 0x7e],
];
#[derive(Debug, PartialEq, BinRead)]
#[br(repr = u32)]
enum WiiPartType {
Data,
Update,
Channel,
}
#[derive(Debug, PartialEq, BinRead)]
#[br(repr = u32)]
enum SigType {
Rsa4096 = 0x00010000,
Rsa2048 = 0x00010001,
EllipticalCurve = 0x00010002,
}
#[derive(Debug, PartialEq, BinRead)]
#[br(repr = u32)]
enum KeyType {
Rsa4096 = 0x00000000,
Rsa2048 = 0x00000001,
}
#[derive(Debug, PartialEq, BinRead)]
struct WiiPart {
#[br(map = | x: u32 | (x as u64) << 2)]
part_data_off: u64,
part_type: WiiPartType,
#[br(restore_position, args(part_data_off))]
part_header: WiiPartitionHeader,
}
#[derive(Debug, PartialEq, BinRead)]
struct WiiPartInfo {
#[br(seek_before = SeekFrom::Start(0x40000))]
part_count: u32,
#[br(map = | x: u32 | (x as u64) << 2)]
part_info_off: u64,
#[br(seek_before = SeekFrom::Start(part_info_off), count = part_count)]
parts: Vec<WiiPart>,
}
#[derive(Debug, PartialEq, BinRead)]
struct TicketTimeLimit {
enable_time_limit: u32,
time_limit: u32,
}
#[derive(Debug, PartialEq, BinRead)]
struct Ticket {
sig_type: SigType,
#[br(count = 256)]
sig: Vec<u8>,
#[br(pad_before = 60, count = 64)]
sig_issuer: Vec<u8>,
#[br(count = 60)]
ecdh: Vec<u8>,
#[br(pad_before = 3)]
enc_key: [u8; 16],
#[br(pad_before = 1)]
ticket_id: [u8; 8],
console_id: [u8; 4],
title_id: [u8; 8],
#[br(pad_before = 2)]
ticket_version: u16,
permitted_titles_mask: u32,
permit_mask: u32,
title_export_allowed: u8,
common_key_idx: u8,
#[br(pad_before = 48, count = 64)]
content_access_permissions: Vec<u8>,
#[br(pad_before = 2, count = 8)]
time_limits: Vec<TicketTimeLimit>,
}
#[derive(Debug, PartialEq, BinRead)]
struct TMDContent {
id: u32,
index: u16,
content_type: u16,
size: u64,
hash: [u8; 20],
}
#[derive(Debug, PartialEq, BinRead)]
struct TMD {
sig_type: SigType,
#[br(count = 256)]
sig: Vec<u8>,
#[br(pad_before = 60, count = 64)]
sig_issuer: Vec<u8>,
version: u8,
ca_crl_version: u8,
signer_crl_version: u8,
#[br(pad_before = 1)]
ios_id_major: u32,
ios_id_minor: u32,
title_id_major: u32,
title_id_minor: [char; 4],
title_type: u32,
group_id: u16,
#[br(pad_before = 62)]
access_flags: u32,
title_version: u16,
num_contents: u16,
#[br(pad_after = 2)]
boot_idx: u16,
#[br(count = num_contents)]
contents: Vec<TMDContent>,
}
#[derive(Debug, PartialEq, BinRead)]
struct Certificate {
sig_type: SigType,
#[br(count = if sig_type == SigType::Rsa4096 { 512 }
else if sig_type == SigType::Rsa2048 { 256 }
else if sig_type == SigType::EllipticalCurve { 64 } else { 0 })]
sig: Vec<u8>,
#[br(pad_before = 60, count = 64)]
issuer: Vec<u8>,
key_type: KeyType,
#[br(count = 64)]
subject: Vec<u8>,
#[br(count = if key_type == KeyType::Rsa4096 { 512 } else if key_type == KeyType::Rsa2048 { 256 } else { 0 })]
key: Vec<u8>,
modulus: u32,
#[br(pad_after = 52)]
pub_exp: u32,
}
#[derive(Debug, PartialEq, BinRead)]
#[br(import(partition_off: u64))]
struct WiiPartitionHeader {
#[br(seek_before = SeekFrom::Start(partition_off))]
ticket: Ticket,
tmd_size: u32,
#[br(map = | x: u32 | ((x as u64) << 2) + partition_off)]
tmd_off: u64,
cert_chain_size: u32,
#[br(map = | x: u32 | ((x as u64) << 2) + partition_off)]
cert_chain_off: u64,
#[br(map = | x: u32 | ((x as u64) << 2) + partition_off)]
global_hash_table_off: u64,
#[br(map = | x: u32 | ((x as u64) << 2) + partition_off)]
data_off: u64,
#[br(map = | x: u32 | (x as u64) << 2)]
data_size: u64,
#[br(seek_before = SeekFrom::Start(tmd_off))]
tmd: TMD,
#[br(seek_before = SeekFrom::Start(cert_chain_off))]
ca_cert: Certificate,
tmd_cert: Certificate,
ticket_cert: Certificate,
#[br(seek_before = SeekFrom::Start(global_hash_table_off), count = 0x18000)]
h3_data: Vec<u8>,
}
pub(crate) struct DiscWii {
header: Header,
part_info: WiiPartInfo,
}
pub(crate) fn new_disc_wii(mut stream: &mut dyn ReadStream, header: Header) -> Result<DiscWii> {
let mut disc = DiscWii {
header,
part_info: stream.read_be()?,
};
disc.decrypt_partition_keys()?;
Result::Ok(disc)
}
impl DiscWii {
pub(crate) fn decrypt_partition_keys(&mut self) -> Result<()> {
for part in self.part_info.parts.as_mut_slice() {
let ticket = &mut part.part_header.ticket;
let mut iv: [u8; 16] = [0; 16];
iv[..8].copy_from_slice(&ticket.title_id);
Aes128Cbc::new(
Aes128::new(&COMMON_KEYS[ticket.common_key_idx as usize].into()),
&iv.into(),
).decrypt(&mut ticket.enc_key)?;
}
Result::Ok(())
}
}
impl DiscBase for DiscWii {
fn get_header(&self) -> &Header {
&self.header
}
fn get_data_partition<'a>(&self, disc_io: &'a mut dyn DiscIO) -> Result<Box<dyn PartReadStream + 'a>> {
let part = self.part_info.parts.iter().find(|v| v.part_type == WiiPartType::Data)
.ok_or(Error::DiscFormat("Failed to locate data partition".to_string()))?;
let data_off = part.part_header.data_off;
let result = Box::new(WiiPartReadStream {
stream: OwningWindowedReadStream {
base: disc_io.begin_read_stream(data_off)?,
begin: data_off,
end: data_off + part.part_header.data_size,
},
crypto: if disc_io.has_wii_crypto() {
Aes128::new(&part.part_header.ticket.enc_key.into()).into()
} else { Option::None },
offset: 0,
cur_block: u64::MAX,
buf: [0; 0x8000],
validate_hashes: false,
});
Result::Ok(result)
}
}
struct WiiPartReadStream<'a> {
stream: OwningWindowedReadStream<'a>,
crypto: Option<Aes128>,
offset: u64,
cur_block: u64,
buf: [u8; BUFFER_SIZE],
validate_hashes: bool,
}
impl<'a> PartReadStream for WiiPartReadStream<'a> {
fn begin_file_stream(&mut self, node: &Node) -> io::Result<SharedWindowedReadStream> {
assert_eq!(node.kind, NodeKind::File);
let offset = (node.offset as u64) << 2;
self.seek(SeekFrom::Start(offset))?;
io::Result::Ok(SharedWindowedReadStream {
base: self,
begin: offset,
end: offset + node.length as u64,
})
}
fn read_header(&mut self) -> Result<Box<dyn PartHeader>> {
self.seek(SeekFrom::Start(0))?;
Result::Ok(Box::from(self.read_be::<WiiPartition>()?))
}
fn ideal_buffer_size(&self) -> usize {
BLOCK_SIZE
}
}
#[inline(always)]
fn as_digest(slice: &[u8; 20]) -> digest::Output<Sha1> { (*slice).into() }
fn decrypt_block(part: &mut WiiPartReadStream, cluster: usize) -> io::Result<()> {
part.stream.read(&mut part.buf)?;
if part.crypto.is_some() {
// Fetch IV before decrypting header
let iv = Block::from(*array_ref![part.buf, 0x3d0, 16]);
// Don't need to decrypt header if we're not validating hashes
if part.validate_hashes {
Aes128Cbc::new(part.crypto.as_ref().unwrap().clone(), &Block::from([0; 16]))
.decrypt(&mut part.buf[..BUFFER_OFFSET])
.expect("Failed to decrypt header");
}
Aes128Cbc::new(part.crypto.as_ref().unwrap().clone(), &iv)
.decrypt(&mut part.buf[BUFFER_OFFSET..])
.expect("Failed to decrypt block");
}
if part.validate_hashes && part.crypto.is_some() /* FIXME NFS validation? */ {
let (mut group, sub_group) = div_rem(cluster, 8);
group %= 8;
// H0 hashes
for i in 0..31 {
let mut hash = Sha1::new();
hash.update(array_ref![part.buf, (i + 1) * 0x400, 0x400]);
let expected = as_digest(array_ref![part.buf, i * 20, 20]);
let output = hash.finalize();
if output != expected {
panic!("Invalid hash! (block {:?}) {:?}\n\texpected {:?}", i, output.as_slice(), expected);
}
}
// H1 hash
{
let mut hash = Sha1::new();
hash.update(array_ref![part.buf, 0, 0x26C]);
let expected = as_digest(array_ref![part.buf, 0x280 + sub_group * 20, 20]);
let output = hash.finalize();
if output != expected {
panic!("Invalid hash! (subgroup {:?}) {:?}\n\texpected {:?}", sub_group, output.as_slice(), expected);
}
}
// H2 hash
{
let mut hash = Sha1::new();
hash.update(array_ref![part.buf, 0x280, 0xA0]);
let expected = as_digest(array_ref![part.buf, 0x340 + group * 20, 20]);
let output = hash.finalize();
if output != expected {
panic!("Invalid hash! (group {:?}) {:?}\n\texpected {:?}", group, output.as_slice(), expected);
}
}
}
io::Result::Ok(())
}
impl<'a> Read for WiiPartReadStream<'a> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let (mut block, mut block_offset) = div_rem(self.offset as usize, BLOCK_SIZE);
let mut rem = buf.len();
let mut read: usize = 0;
while rem > 0 {
if block != self.cur_block as usize {
decrypt_block(self, block)?;
self.cur_block = block as u64;
}
let mut cache_size = rem;
if cache_size + block_offset > BLOCK_SIZE {
cache_size = BLOCK_SIZE - block_offset;
}
buf[read..read + cache_size]
.copy_from_slice(&self.buf[BUFFER_OFFSET + block_offset..
BUFFER_OFFSET + block_offset + cache_size]);
read += cache_size;
rem -= cache_size;
block_offset = 0;
block += 1;
}
self.offset += buf.len() as u64;
io::Result::Ok(buf.len())
}
}
#[inline(always)]
fn to_block_size(v: u64) -> u64 {
(v / BUFFER_SIZE as u64) * BLOCK_SIZE as u64 + (v % BUFFER_SIZE as u64)
}
impl<'a> Seek for WiiPartReadStream<'a> {
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
self.offset = match pos {
SeekFrom::Start(v) => v,
SeekFrom::End(v) => (self.stream_len()? as i64 + v) as u64,
SeekFrom::Current(v) => (self.offset as i64 + v) as u64,
};
let block = self.offset / BLOCK_SIZE as u64;
if block != self.cur_block {
self.stream.seek(SeekFrom::Start(block * BUFFER_SIZE as u64))?;
self.cur_block = u64::MAX;
}
io::Result::Ok(self.offset)
}
fn stream_len(&mut self) -> io::Result<u64> {
io::Result::Ok(to_block_size(self.stream.stream_len()?))
}
fn stream_position(&mut self) -> io::Result<u64> {
io::Result::Ok(self.offset)
}
}
impl<'a> ReadStream for WiiPartReadStream<'a> {}
#[derive(Clone, Debug, PartialEq, BinRead)]
pub(crate) struct WiiPartition {
header: Header,
bi2_header: BI2Header,
#[br(seek_before = SeekFrom::Start((header.fst_off as u64) << 2))]
#[br(parse_with = node_parser)]
root_node: NodeType,
}
impl PartHeader for WiiPartition {
fn root_node(&self) -> &NodeType {
&self.root_node
}
fn find_node(&self, path: &str) -> Option<&NodeType> {
find_node(&self.root_node, path)
}
}

148
src/fst.rs Normal file
View File

@ -0,0 +1,148 @@
//! Disc file system types
use std::io::{Read, Seek, SeekFrom};
use binread::{derive_binread, NullString, prelude::*, ReadOptions};
use encoding_rs::SHIFT_JIS;
/// File system node kind.
#[derive(Clone, Debug, PartialEq)]
pub enum NodeKind {
File,
Directory,
}
/// An individual file system node.
#[derive_binread]
#[derive(Clone, Debug, PartialEq)]
pub struct Node {
#[br(temp)]
type_and_name_offset: u32,
#[br(calc = if (type_and_name_offset >> 24) != 0 { NodeKind::Directory } else { NodeKind::File })]
pub kind: NodeKind,
/// For files, this is the partition offset of the file data. (Wii: >> 2)
///
/// For directories, this is the children start offset in the FST.
pub offset: u32,
/// For files, this is the byte size of the file.
///
/// For directories, this is the children end offset in the FST.
///
/// Number of child files and directories recursively is `length - offset`.
pub length: u32,
#[br(calc = type_and_name_offset & 0xffffff)]
name_offset: u32,
#[br(ignore)]
/// The node name.
pub name: Box<str>,
}
/// Contains a file system node, and if a directory, its children.
#[derive(Clone, Debug, PartialEq)]
pub enum NodeType {
/// A single file node.
File(Node),
/// A directory node with children.
Directory(Node, Vec<NodeType>),
}
fn read_node<R: Read + Seek>(reader: &mut R, ro: &ReadOptions, i: &mut u32) -> BinResult<NodeType> {
let node = reader.read_type::<Node>(ro.endian)?;
*i += 1;
BinResult::Ok(if node.kind == NodeKind::Directory {
let mut children: Vec<NodeType> = Vec::new();
children.reserve((node.length - *i) as usize);
while *i < node.length {
children.push(read_node(reader, ro, i)?);
}
NodeType::Directory(node, children)
} else {
NodeType::File(node)
})
}
fn read_node_name<R: Read + Seek>(reader: &mut R, ro: &ReadOptions, base: u64, node: &mut NodeType, root: bool) -> BinResult<()> {
let mut decode_name = |v: &mut Node| -> BinResult<()> {
if !root {
let offset = base + v.name_offset as u64;
reader.seek(SeekFrom::Start(offset))?;
let null_string = reader.read_type::<NullString>(ro.endian)?;
let (res, _, errors) = SHIFT_JIS.decode(&*null_string.0);
if errors {
return BinResult::Err(binread::Error::Custom {
pos: offset,
err: Box::new("Failed to decode node name"),
});
}
v.name = res.into();
}
BinResult::Ok(())
};
match node {
NodeType::File(v) => { decode_name(v)?; }
NodeType::Directory(v, c) => {
decode_name(v)?;
for x in c {
read_node_name(reader, ro, base, x, false)?;
}
}
}
BinResult::Ok(())
}
pub(crate) fn node_parser<R: Read + Seek>(reader: &mut R, ro: &ReadOptions, _: ()) -> BinResult<NodeType> {
let mut node = read_node(reader, ro, &mut 0)?;
let base = reader.stream_position()?;
read_node_name(reader, ro, base, &mut node, true)?;
BinResult::Ok(node)
}
fn matches_name(node: &NodeType, name: &str) -> bool {
match node {
NodeType::File(v) => v.name.as_ref().eq_ignore_ascii_case(name),
NodeType::Directory(v, _) => v.name.is_empty() /* root */ || v.name.as_ref().eq_ignore_ascii_case(name),
}
}
pub(crate) fn find_node<'a>(mut node: &'a NodeType, path: &str) -> Option<&'a NodeType> {
let mut split = path.split('/');
let mut current = split.next();
while current.is_some() {
if matches_name(node, current.unwrap()) {
match node {
NodeType::File(_) => {
return if split.next().is_none() {
Option::Some(node)
} else {
Option::None
};
}
NodeType::Directory(v, c) => {
// Find child
if !v.name.is_empty() || current.unwrap().is_empty() {
current = split.next();
}
if current.is_none() || current.unwrap().is_empty() {
return if split.next().is_none() {
Option::Some(node)
} else {
Option::None
};
}
for x in c {
if matches_name(x, current.unwrap()) {
node = x;
break;
}
}
}
}
} else {
break;
}
}
Option::None
}

26
src/io/iso.rs Normal file
View File

@ -0,0 +1,26 @@
use std::fs::File;
use std::io::{Seek, SeekFrom};
use std::io;
use std::path::{PathBuf, Path};
use crate::io::DiscIO;
use crate::streams::ReadStream;
use crate::Result;
pub(crate) struct DiscIOISO {
pub(crate) filename: PathBuf,
}
pub(crate) fn new_disc_io_iso(filename: &Path) -> Result<DiscIOISO> {
Result::Ok(DiscIOISO {
filename: filename.to_owned(),
})
}
impl DiscIO for DiscIOISO {
fn begin_read_stream(&self, offset: u64) -> io::Result<Box<dyn ReadStream>> {
let mut file = File::open(&*self.filename)?;
file.seek(SeekFrom::Start(offset))?;
io::Result::Ok(Box::from(file))
}
}

73
src/io/mod.rs Normal file
View File

@ -0,0 +1,73 @@
//! Disc file format related logic (ISO, NFS, etc)
use std::{fs, io};
use std::path::Path;
use crate::{Error, Result};
use crate::io::{iso::new_disc_io_iso, nfs::new_disc_io_nfs};
use crate::streams::ReadStream;
pub(crate) mod iso;
pub(crate) mod nfs;
/// Abstraction over supported disc file types.
pub trait DiscIO {
/// Opens a new read stream for the disc file(s).
/// Generally does _not_ need to be used directly.
fn begin_read_stream(&self, offset: u64) -> io::Result<Box<dyn ReadStream + '_>>;
/// If false, the file format does not use standard Wii partition encryption. (e.g. NFS)
fn has_wii_crypto(&self) -> bool { true }
}
/// Helper function for checking a file extension.
#[inline(always)]
pub fn has_extension(filename: &Path, extension: &str) -> bool {
if let Some(ext) = filename.extension() {
ext.eq_ignore_ascii_case(extension)
} else { false }
}
/// Creates a new [`DiscIO`] instance.
///
/// # Examples
///
/// Basic usage:
/// ```
/// use nod::io::new_disc_io;
///
/// let mut disc_io = new_disc_io("path/to/file".as_ref())?;
/// ```
pub fn new_disc_io(filename: &Path) -> Result<Box<dyn DiscIO>> {
let path_result = fs::canonicalize(filename);
if path_result.is_err() {
return Result::Err(Error::Io(
format!("Failed to open {}", filename.to_string_lossy()),
path_result.unwrap_err(),
));
}
let path = path_result.as_ref().unwrap();
let meta = fs::metadata(path);
if meta.is_err() {
return Result::Err(Error::Io(
format!("Failed to open {}", filename.to_string_lossy()),
meta.unwrap_err(),
));
}
if !meta.unwrap().is_file() {
return Result::Err(Error::DiscFormat(
format!("Input is not a file: {}", filename.to_string_lossy())
));
}
if has_extension(path, "iso") {
Result::Ok(Box::from(new_disc_io_iso(path)?))
} else if has_extension(path, "nfs") {
if matches!(path.parent(), Some(parent) if parent.is_dir()) {
Result::Ok(Box::from(new_disc_io_nfs(path.parent().unwrap())?))
} else {
Result::Err(Error::DiscFormat("Failed to locate NFS parent directory".to_string()))
}
} else {
Result::Err(Error::DiscFormat("Unknown file type".to_string()))
}
}

300
src/io/nfs.rs Normal file
View File

@ -0,0 +1,300 @@
use std::{fs::File, io, io::{Read, Seek, SeekFrom}, path::{Path, PathBuf}};
use aes::{Aes128, NewBlockCipher};
use binread::{derive_binread, prelude::*};
use block_modes::{block_padding::NoPadding, BlockMode, Cbc};
use crate::disc::{BUFFER_SIZE};
use crate::io::DiscIO;
use crate::{Error,Result};
use crate::streams::ReadStream;
type Aes128Cbc = Cbc<Aes128, NoPadding>;
#[derive(Clone, Debug, PartialEq, BinRead)]
pub(crate) struct LBARange {
pub(crate) start_block: u32,
pub(crate) num_blocks: u32,
}
#[derive_binread]
#[derive(Clone, Debug, PartialEq)]
#[br(magic = b"EGGS", assert(end_magic == * b"SGGE"))]
pub(crate) struct NFSHeader {
pub(crate) version: u32,
pub(crate) unk1: u32,
pub(crate) unk2: u32,
pub(crate) lba_range_count: u32,
#[br(count = 61)]
pub(crate) lba_ranges: Vec<LBARange>,
#[br(temp)]
pub(crate) end_magic: [u8; 4],
}
#[derive(Clone, Copy, Debug, PartialEq)]
pub(crate) struct FBO {
pub(crate) file: u32,
pub(crate) block: u32,
pub(crate) l_block: u32,
pub(crate) offset: u32,
}
pub(crate) fn fbo_max() -> FBO {
FBO {
file: u32::MAX,
block: u32::MAX,
l_block: u32::MAX,
offset: u32::MAX,
}
}
impl NFSHeader {
pub(crate) fn calculate_num_files(&self) -> u32 {
let total_block_count = self.lba_ranges.iter().take(self.lba_range_count as usize)
.fold(0u32, |acc, range| acc + range.num_blocks);
(((total_block_count as u64) * 0x8000u64 + (0x200u64 + 0xF9FFFFFu64)) / 0xFA00000u64) as u32
}
pub(crate) fn logical_to_fbo(&self, offset: u64) -> FBO {
let block_div = (offset / 0x8000) as u32;
let block_off = (offset % 0x8000) as u32;
let mut block = u32::MAX;
let mut physical_block = 0u32;
for range in self.lba_ranges.iter().take(self.lba_range_count as usize) {
if block_div >= range.start_block && block_div - range.start_block < range.num_blocks {
block = physical_block + (block_div - range.start_block);
break;
}
physical_block += range.num_blocks;
}
if block == u32::MAX {
fbo_max()
} else {
FBO {
file: block / 8000,
block: block % 8000,
l_block: block_div,
offset: block_off,
}
}
}
}
pub(crate) struct DiscIONFS {
pub(crate) directory: PathBuf,
pub(crate) key: [u8; 16],
pub(crate) header: Option<NFSHeader>,
}
pub(crate) fn new_disc_io_nfs(directory: &Path) -> Result<DiscIONFS> {
let mut disc_io = DiscIONFS {
directory: directory.to_owned(),
key: [0; 16],
header: Option::None,
};
disc_io.validate_files()?;
Result::Ok(disc_io)
}
pub(crate) struct NFSReadStream<'a> {
disc_io: &'a DiscIONFS,
file: Option<File>,
crypto: Aes128,
// Physical address - all UINT32_MAX indicates logical zero block
phys_addr: FBO,
// Logical address
offset: u64,
// Active file stream and its offset as set in the system.
// Block is typically one ahead of the presently decrypted block.
cur_file: u32,
cur_block: u32,
buf: [u8; BUFFER_SIZE],
}
impl<'a> NFSReadStream<'a> {
fn set_cur_file(&mut self, cur_file: u32) -> Result<()> {
if cur_file >= self.disc_io.header.as_ref().unwrap().calculate_num_files() {
return Result::Err(Error::DiscFormat("Out of bounds NFS file access".to_string()));
}
self.cur_file = cur_file;
self.cur_block = u32::MAX;
self.file = Option::from(File::open(self.disc_io.get_nfs(cur_file)?)?);
Result::Ok(())
}
fn set_cur_block(&mut self, cur_block: u32) -> io::Result<()> {
self.cur_block = cur_block;
self.file.as_ref().unwrap().seek(
SeekFrom::Start(self.cur_block as u64 * BUFFER_SIZE as u64 + 0x200u64)
)?;
io::Result::Ok(())
}
fn set_phys_addr(&mut self, phys_addr: FBO) -> Result<()> {
// If we're just changing the offset, nothing else needs to be done
if self.phys_addr.file == phys_addr.file && self.phys_addr.block == phys_addr.block {
self.phys_addr.offset = phys_addr.offset;
return Result::Ok(());
}
self.phys_addr = phys_addr;
// Set logical zero block
if phys_addr.file == u32::MAX {
self.buf.fill(0u8);
return Result::Ok(());
}
// Make necessary file and block current with system
if phys_addr.file != self.cur_file {
self.set_cur_file(phys_addr.file)?;
}
if phys_addr.block != self.cur_block {
self.set_cur_block(phys_addr.block)?;
}
// Read block, handling 0x200 overlap case
if phys_addr.block == 7999 {
self.file.as_ref().unwrap().read(&mut self.buf[..BUFFER_SIZE - 0x200])?;
self.set_cur_file(self.cur_file + 1)?;
self.file.as_ref().unwrap().read(&mut self.buf[BUFFER_SIZE - 0x200..])?;
self.cur_block = 0;
} else {
self.file.as_ref().unwrap().read(&mut self.buf)?;
self.cur_block += 1;
}
// Decrypt
let iv: [u8; 16] = [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
(phys_addr.l_block & 0xFF) as u8,
((phys_addr.l_block >> 2) & 0xFF) as u8,
((phys_addr.l_block >> 4) & 0xFF) as u8,
((phys_addr.l_block >> 6) & 0xFF) as u8,
];
Aes128Cbc::new(self.crypto.clone(), &iv.into())
.decrypt(&mut self.buf)?;
Result::Ok(())
}
fn set_logical_addr(&mut self, addr: u64) -> Result<()> {
self.set_phys_addr(self.disc_io.header.as_ref().unwrap().logical_to_fbo(addr))
}
}
impl<'a> Read for NFSReadStream<'a> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let mut rem = buf.len();
let mut read: usize = 0;
while rem > 0 {
let mut read_size = rem;
let block_offset: usize = if self.phys_addr.offset == u32::MAX { 0 } else { self.phys_addr.offset as usize };
if read_size + block_offset > BUFFER_SIZE {
read_size = BUFFER_SIZE - block_offset
}
buf[read..read + read_size]
.copy_from_slice(&mut self.buf[block_offset..block_offset + read_size]);
read += read_size;
rem -= read_size;
self.offset += read_size as u64;
self.set_logical_addr(self.offset)
.map_err(|v| match v {
Error::Io(_, v) => v,
_ => io::Error::from(io::ErrorKind::Other)
})?;
}
io::Result::Ok(read)
}
}
impl<'a> Seek for NFSReadStream<'a> {
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
self.offset = match pos {
SeekFrom::Start(v) => v,
SeekFrom::End(v) => (self.stream_len()? as i64 + v) as u64,
SeekFrom::Current(v) => (self.offset as i64 + v) as u64,
};
self.set_logical_addr(self.offset)
.map_err(|v| match v {
Error::Io(_, v) => v,
_ => io::Error::from(io::ErrorKind::Other)
})?;
io::Result::Ok(self.offset)
}
fn stream_len(&mut self) -> io::Result<u64> {
todo!()
}
fn stream_position(&mut self) -> io::Result<u64> {
io::Result::Ok(self.offset)
}
}
impl<'a> ReadStream for NFSReadStream<'a> {}
impl DiscIO for DiscIONFS {
fn begin_read_stream(&self, offset: u64) -> io::Result<Box<dyn ReadStream + '_>> {
io::Result::Ok(Box::from(NFSReadStream {
disc_io: self,
file: Option::None,
crypto: Aes128::new(&self.key.into()),
phys_addr: fbo_max(),
offset,
cur_file: u32::MAX,
cur_block: u32::MAX,
buf: [0; BUFFER_SIZE],
}))
}
fn has_wii_crypto(&self) -> bool { false }
}
impl DiscIONFS {
fn get_path<P: AsRef<Path>>(&self, path: P) -> PathBuf {
let mut buf = self.directory.clone();
buf.push(path);
buf
}
fn get_nfs(&self, num: u32) -> Result<PathBuf> {
let path = self.get_path(format!("hif_{:06}.nfs", num));
if path.exists() {
Result::Ok(path)
} else {
Result::Err(Error::DiscFormat(format!("Failed to locate {}", path.to_string_lossy())))
}
}
pub(crate) fn validate_files(&mut self) -> Result<()> {
{
// Load key file
let mut key_path = self.get_path("../code/htk.bin");
if !key_path.is_file() {
key_path = self.directory.clone();
key_path.push("htk.bin");
}
if !key_path.is_file() {
return Result::Err(Error::DiscFormat(format!(
"Failed to locate {} or {}",
self.get_path("../code/htk.bin").to_string_lossy(),
key_path.to_string_lossy()
)));
}
File::open(key_path.as_path())
.map_err(|v| Error::Io(format!("Failed to open {}", key_path.to_string_lossy()), v))?
.read(&mut self.key)
.map_err(|v| Error::Io(format!("Failed to read {}", key_path.to_string_lossy()), v))?;
}
{
// Load header from first file
let header: NFSHeader = File::open(self.get_nfs(0)?)?.read_be()?;
// Ensure remaining files exist
for i in 1..header.calculate_num_files() {
self.get_nfs(i)?;
}
self.header = Option::from(header)
}
Result::Ok(())
}
}

69
src/lib.rs Normal file
View File

@ -0,0 +1,69 @@
//! Library for traversing & reading GameCube and Wii disc images.
//!
//! Based on the C++ library [nod](https://github.com/AxioDL/nod),
//! but does not currently support authoring.
//!
//! Currently supported file formats:
//! - ISO
//! - NFS (Wii U VC files, e.g. `hif_000000.nfs`)
//!
//! # Examples
//!
//! Opening a disc image and reading a file:
//! ```
//! use nod::disc::{new_disc_base, PartHeader};
//! use nod::fst::NodeType;
//! use nod::io::new_disc_io;
//! use std::io::Read;
//!
//! let mut disc_io = new_disc_io("path/to/file".as_ref())?;
//! let disc_base = new_disc_base(disc_io.as_mut())?;
//! let mut partition = disc_base.get_data_partition(disc_io.as_mut())?;
//! let header = partition.read_header()?;
//! if let Some(NodeType::File(node)) = header.find_node("/MP3/Worlds.txt") {
//! let mut s = String::new();
//! partition.begin_file_stream(node)?.read_to_string(&mut s);
//! println!(s);
//! }
//! ```
#![feature(seek_stream_len)]
pub mod fst;
pub mod disc;
pub mod io;
pub mod streams;
#[derive(Debug)]
pub enum Error {
BinaryFormat(binread::Error),
Encryption(block_modes::BlockModeError),
Io(String, std::io::Error),
DiscFormat(String),
}
pub type Result<T> = std::result::Result<T, Error>;
impl From<std::io::Error> for Error {
fn from(v: std::io::Error) -> Self {
Error::Io("I/O error".to_string(), v)
}
}
impl From<binread::Error> for Error {
fn from(v: binread::Error) -> Self {
Error::BinaryFormat(v)
}
}
impl From<block_modes::BlockModeError> for Error {
fn from(v: block_modes::BlockModeError) -> Self {
Error::Encryption(v)
}
}
#[inline(always)]
pub(crate) fn div_rem<T: std::ops::Div<Output=T> + std::ops::Rem<Output=T> + Copy>(x: T, y: T) -> (T, T) {
let quot = x / y;
let rem = x % y;
(quot, rem)
}

127
src/streams.rs Normal file
View File

@ -0,0 +1,127 @@
//! Common stream types
use std::{fs::File, io, io::{Read, Seek, SeekFrom}};
use std::ops::DerefMut;
/// Creates a fixed-size array from a slice.
#[macro_export]
macro_rules! array_ref {
($slice:expr, $offset:expr, $size:expr) => {{
#[inline]
fn to_array<T>(slice: &[T]) -> &[T; $size] {
unsafe { &*(slice.as_ptr() as *const [_; $size]) }
}
to_array(&$slice[$offset..$offset + $size])
}}
}
pub trait ReadStream: Read + Seek {}
impl ReadStream for File {}
trait WindowedReadStream: ReadStream {
fn base_stream(&mut self) -> &mut dyn ReadStream;
fn window(&self) -> (u64, u64);
}
pub struct OwningWindowedReadStream<'a> {
pub(crate) base: Box<dyn ReadStream + 'a>,
pub(crate) begin: u64,
pub(crate) end: u64,
}
pub struct SharedWindowedReadStream<'a> {
pub(crate) base: &'a mut dyn ReadStream,
pub(crate) begin: u64,
pub(crate) end: u64,
}
#[inline(always)]
fn windowed_read(stream: &mut dyn WindowedReadStream, buf: &mut [u8]) -> io::Result<usize> {
let pos = stream.stream_position()?;
let size = stream.stream_len()?;
stream.base_stream().read(if pos + buf.len() as u64 > size {
&mut buf[..(size - pos) as usize]
} else {
buf
})
}
#[inline(always)]
fn windowed_seek(stream: &mut dyn WindowedReadStream, pos: SeekFrom) -> io::Result<u64> {
let (begin, end) = stream.window();
let result = stream.base_stream().seek(match pos {
SeekFrom::Start(p) => SeekFrom::Start(begin + p),
SeekFrom::End(p) => SeekFrom::End(end as i64 + p),
SeekFrom::Current(_) => pos,
})?;
if result < begin || result > end {
io::Result::Err(io::Error::from(io::ErrorKind::UnexpectedEof))
} else {
io::Result::Ok(result - begin)
}
}
impl<'a> Read for OwningWindowedReadStream<'a> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
windowed_read(self, buf)
}
}
impl<'a> Seek for OwningWindowedReadStream<'a> {
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
windowed_seek(self, pos)
}
fn stream_len(&mut self) -> io::Result<u64> {
Result::Ok(self.end - self.begin)
}
fn stream_position(&mut self) -> io::Result<u64> {
Result::Ok(self.base.stream_position()? - self.begin)
}
}
impl<'a> ReadStream for OwningWindowedReadStream<'a> {}
impl<'a> WindowedReadStream for OwningWindowedReadStream<'a> {
fn base_stream(&mut self) -> &mut dyn ReadStream {
self.base.deref_mut()
}
fn window(&self) -> (u64, u64) {
(self.begin, self.end)
}
}
impl<'a> Read for SharedWindowedReadStream<'a> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
windowed_read(self, buf)
}
}
impl<'a> Seek for SharedWindowedReadStream<'a> {
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
windowed_seek(self, pos)
}
fn stream_len(&mut self) -> io::Result<u64> {
Result::Ok(self.end - self.begin)
}
fn stream_position(&mut self) -> io::Result<u64> {
Result::Ok(self.base.stream_position()? - self.begin)
}
}
impl<'a> ReadStream for SharedWindowedReadStream<'a> {}
impl<'a> WindowedReadStream for SharedWindowedReadStream<'a> {
fn base_stream(&mut self) -> &mut dyn ReadStream {
self.base
}
fn window(&self) -> (u64, u64) {
(self.begin, self.end)
}
}