diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 47e250c..5596425 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -95,42 +95,42 @@ jobs: target: x86_64-unknown-linux-musl name: linux-x86_64 build: zigbuild - features: asm + features: openssl-vendored - platform: ubuntu-latest target: i686-unknown-linux-musl name: linux-i686 build: zigbuild - features: asm + features: openssl-vendored - platform: ubuntu-latest target: aarch64-unknown-linux-musl name: linux-aarch64 build: zigbuild - features: nightly + features: openssl-vendored - platform: windows-latest target: i686-pc-windows-msvc name: windows-x86 build: build - features: default + features: openssl-vendored - platform: windows-latest target: x86_64-pc-windows-msvc name: windows-x86_64 build: build - features: default + features: openssl-vendored - platform: windows-latest target: aarch64-pc-windows-msvc name: windows-arm64 build: build - features: nightly + features: openssl-vendored - platform: macos-latest target: x86_64-apple-darwin name: macos-x86_64 build: build - features: asm + features: openssl - platform: macos-latest target: aarch64-apple-darwin name: macos-arm64 build: build - features: nightly + features: openssl fail-fast: false runs-on: ${{ matrix.platform }} steps: diff --git a/Cargo.lock b/Cargo.lock index 166eb18..421ad2a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -16,9 +16,9 @@ checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" [[package]] name = "aes" -version = "0.8.4" +version = "0.9.0-pre.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" +checksum = "e7856582c758ade85d71daf27ec6bcea6c1c73913692b07b8dffea2dc03531c9" dependencies = [ "cfg-if", "cipher", @@ -34,6 +34,12 @@ dependencies = [ "memchr", ] +[[package]] +name = "allocator-api2" +version = "0.2.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45862d1c77f2228b9e10bc609d5bc203d86ebc9b87ad8d5d5167a6c9abf739d9" + [[package]] name = "argp" version = "0.3.0" @@ -55,12 +61,33 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "autocfg" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" + [[package]] name = "base16ct" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" +[[package]] +name = "bit-set" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08807e080ed7f9d5433fa9b275196cfc35414f66a0c79d864dc51a0d825231a3" +dependencies = [ + "bit-vec", +] + +[[package]] +name = "bit-vec" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e764a1d40d510daf35e07be9eb06e75770908c27d411ee6c92109c9840eaaf7" + [[package]] name = "bitflags" version = "2.6.0" @@ -69,22 +96,40 @@ checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" [[package]] name = "block-buffer" -version = "0.10.4" +version = "0.11.0-rc.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +checksum = "3fd016a0ddc7cb13661bf5576073ce07330a693f8608a1320b4e20561cc12cdc" dependencies = [ - "generic-array", + "hybrid-array", ] [[package]] name = "block-padding" -version = "0.3.3" +version = "0.4.0-rc.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8894febbff9f758034a5b8e12d87918f56dfc64a8e1fe757d65e29041538d93" +checksum = "6868e23cd7a5b2e18fb2e9a583910b88b8d645dd21017aafc5d0439cf16ae6d6" dependencies = [ - "generic-array", + "hybrid-array", ] +[[package]] +name = "bumpalo" +version = "3.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "bytes" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ac0150caa2ae65ca5bd83f25c7de183dea78d4d366469f148435e2acfbad0da" + [[package]] name = "bzip2" version = "0.4.4" @@ -108,18 +153,18 @@ dependencies = [ [[package]] name = "cbc" -version = "0.1.2" +version = "0.2.0-pre.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b52a9543ae338f279b96b0b9fed9c8093744685043739079ce85cd58f289a6" +checksum = "e0729a0a8422deb6056b8fcd89c42b724fe27e69458fa006f00c63cbffffd91b" dependencies = [ "cipher", ] [[package]] name = "cc" -version = "1.1.24" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "812acba72f0a070b003d3697490d2b55b837230ae7c6c6497f05cc2ddbb8d938" +checksum = "fd9de9f2205d5ef3fd67e685b0df337994ddd4495e2a28d185500d0e1edfea47" dependencies = [ "jobserver", "libc", @@ -134,9 +179,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "cipher" -version = "0.4.4" +version = "0.5.0-pre.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" +checksum = "5b1425e6ce000f05a73096556cabcfb6a10a3ffe3bb4d75416ca8f00819c0b6a" dependencies = [ "crypto-common", "inout", @@ -151,15 +196,15 @@ dependencies = [ "encode_unicode", "lazy_static", "libc", - "unicode-width", + "unicode-width 0.1.14", "windows-sys 0.52.0", ] [[package]] name = "cpufeatures" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0" +checksum = "0ca741a962e1b0bff6d724a1a0958b686406e853bb14061f218562e1896f95e6" dependencies = [ "libc", ] @@ -173,6 +218,15 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "crossbeam-channel" +version = "0.5.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2" +dependencies = [ + "crossbeam-utils", +] + [[package]] name = "crossbeam-deque" version = "0.8.5" @@ -200,19 +254,18 @@ checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" [[package]] name = "crypto-common" -version = "0.1.6" +version = "0.2.0-rc.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +checksum = "b0b8ce8218c97789f16356e7896b3714f26c2ee1079b79c0b7ae7064bb9089fa" dependencies = [ - "generic-array", - "typenum", + "hybrid-array", ] [[package]] name = "digest" -version = "0.10.7" +version = "0.11.0-pre.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +checksum = "cf2e3d6615d99707295a9673e889bf363a04b2a466bd320c65a72536f7577379" dependencies = [ "block-buffer", "crypto-common", @@ -247,21 +300,51 @@ checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" [[package]] name = "encoding_rs" -version = "0.8.34" +version = "0.8.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b45de904aa0b010bce2ab45264d0631681847fa7b6f2eaa7dab7619943bc4f59" +checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" dependencies = [ "cfg-if", ] [[package]] -name = "generic-array" -version = "0.14.7" +name = "equivalent" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" + +[[package]] +name = "foldhash" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f81ec6369c545a7d40e4589b5597581fa1c441fe1cce96dd1de43159910a36a2" + +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" dependencies = [ - "typenum", - "version_check", + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + +[[package]] +name = "generator" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbb949699c3e4df3a183b1d2142cb24277057055ed23c68ed58894f76c517223" +dependencies = [ + "cfg-if", + "libc", + "log", + "rustversion", + "windows", ] [[package]] @@ -270,9 +353,37 @@ version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14dbbfd5c71d70241ecf9e6f13737f7b5ce823821063188d7e46c41d371eebd5" dependencies = [ - "unicode-width", + "unicode-width 0.1.14", ] +[[package]] +name = "getrandom" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "hashbrown" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a9bfc1af68b1726ea47d3d5109de126281def866b33970e10fbab11b5dafab3" +dependencies = [ + "allocator-api2", + "equivalent", + "foldhash", +] + +[[package]] +name = "hermit-abi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" + [[package]] name = "hex" version = "0.4.3" @@ -283,35 +394,35 @@ dependencies = [ ] [[package]] -name = "indicatif" -version = "0.17.8" +name = "hybrid-array" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "763a5a8f45087d6bcea4222e7b72c291a054edf80e4ef6efd2a4979878c7bea3" +checksum = "45a9a965bb102c1c891fb017c09a05c965186b1265a207640f323ddd009f9deb" +dependencies = [ + "typenum", +] + +[[package]] +name = "indicatif" +version = "0.17.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbf675b85ed934d3c67b5c5469701eec7db22689d0a2139d856e0925fa28b281" dependencies = [ "console", - "instant", "number_prefix", "portable-atomic", - "unicode-width", + "unicode-width 0.2.0", + "web-time", ] [[package]] name = "inout" -version = "0.1.3" +version = "0.2.0-rc.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" +checksum = "de49db00f5add6dad75a57946b75de0f26287a6fc95f4f277d48419200422beb" dependencies = [ "block-padding", - "generic-array", -] - -[[package]] -name = "instant" -version = "0.1.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" -dependencies = [ - "cfg-if", + "hybrid-array", ] [[package]] @@ -338,6 +449,15 @@ dependencies = [ "libc", ] +[[package]] +name = "js-sys" +version = "0.3.72" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a88f1bda2bd75b0452a14784937d796722fdebfe50df998aeb3f0b7603019a9" +dependencies = [ + "wasm-bindgen", +] + [[package]] name = "lazy_static" version = "1.5.0" @@ -346,15 +466,15 @@ checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" -version = "0.2.159" +version = "0.2.164" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "561d97a539a36e26a9a5fad1ea11a3039a67714694aaa379433e580854bc3dc5" +checksum = "433bfe06b8c75da9b2e3fbea6e5329ff87748f0b144ef75306e674c3f6f7c13f" [[package]] name = "liblzma" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7c45fc6fcf5b527d3cf89c1dee8c327943984b0dc8bfcf6e100473b00969e63" +checksum = "603222e049bf0da71529325ada5d02dc3871cbd3679cf905429f7f0de93da87b" dependencies = [ "liblzma-sys", ] @@ -386,6 +506,28 @@ version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" +[[package]] +name = "loom" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "419e0dc8046cb947daa77eb95ae174acfbddb7673b4151f56d1eed8e93fbfaca" +dependencies = [ + "cfg-if", + "generator", + "scoped-tls", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "lru" +version = "0.12.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" +dependencies = [ + "hashbrown", +] + [[package]] name = "matchers" version = "0.1.0" @@ -397,22 +539,12 @@ dependencies = [ [[package]] name = "md-5" -version = "0.10.6" +version = "0.11.0-pre.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf" +checksum = "117b97b6b9ae1ec9a396b357698efa3ecff4fc1f40e0ec59ae7c1270b460ac1d" dependencies = [ "cfg-if", "digest", - "md5-asm", -] - -[[package]] -name = "md5-asm" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d19b8ee7fc7d812058d3b708c7f719efd0713d53854648e4223c6fcae709e2df" -dependencies = [ - "cc", ] [[package]] @@ -421,6 +553,15 @@ version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" +[[package]] +name = "memmap2" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd3f7eed9d3848f8b98834af67102b720745c4ec028fcd0aa0239277e7de374f" +dependencies = [ + "libc", +] + [[package]] name = "mimalloc" version = "0.1.43" @@ -446,20 +587,34 @@ dependencies = [ "adler", "aes", "base16ct", + "bit-set", + "bytes", "bzip2", "cbc", + "crc32fast", + "crossbeam-channel", + "crossbeam-utils", "digest", "dyn-clone", "encoding_rs", "itertools", "liblzma", - "log", + "liblzma-sys", + "lru", + "md-5", + "memmap2", "miniz_oxide", + "openssl", + "rand", "rayon", "sha1", + "simple_moving_average", "thiserror", - "zerocopy", + "tracing", + "xxhash-rust", + "zerocopy 0.8.10", "zstd", + "zstd-safe", ] [[package]] @@ -467,17 +622,15 @@ name = "nodtool" version = "2.0.0-alpha.1" dependencies = [ "argp", - "base16ct", "crc32fast", "digest", "enable-ansi-support", "hex", "indicatif", - "itertools", - "log", "md-5", "mimalloc", "nod", + "num_cpus", "quick-xml", "serde", "sha1", @@ -486,8 +639,8 @@ dependencies = [ "tracing", "tracing-attributes", "tracing-subscriber", - "xxhash-rust", - "zerocopy", + "tracing-tracy", + "zerocopy 0.8.10", "zstd", ] @@ -501,6 +654,25 @@ dependencies = [ "winapi", ] +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", +] + +[[package]] +name = "num_cpus" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" +dependencies = [ + "hermit-abi", + "libc", +] + [[package]] name = "number_prefix" version = "0.4.0" @@ -509,11 +681,56 @@ checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" [[package]] name = "once_cell" -version = "1.20.1" +version = "1.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82881c4be219ab5faaf2ad5e5e5ecdff8c66bd7402ca3160975c93b24961afd1" +checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" + +[[package]] +name = "openssl" +version = "0.10.68" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6174bc48f102d208783c2c84bf931bb75927a617866870de8a4ea85597f871f5" dependencies = [ - "portable-atomic", + "bitflags", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", +] + +[[package]] +name = "openssl-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.89", +] + +[[package]] +name = "openssl-src" +version = "300.3.2+3.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a211a18d945ef7e648cc6e0058f4c548ee46aab922ea203e0d30e966ea23647b" +dependencies = [ + "cc", +] + +[[package]] +name = "openssl-sys" +version = "0.9.104" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45abf306cbf99debc8195b66b7346498d7b10c210de50418b5ccd7ceba08c741" +dependencies = [ + "cc", + "libc", + "openssl-src", + "pkg-config", + "vcpkg", ] [[package]] @@ -524,9 +741,9 @@ checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" [[package]] name = "pin-project-lite" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" +checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff" [[package]] name = "pkg-config" @@ -541,10 +758,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cc9c68a3f6da06753e9335d63e27f6b9754dd1920d941135b7ea8224f141adb2" [[package]] -name = "proc-macro2" -version = "1.0.86" +name = "ppv-lite86" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" +checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" +dependencies = [ + "zerocopy 0.7.35", +] + +[[package]] +name = "proc-macro2" +version = "1.0.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0" dependencies = [ "unicode-ident", ] @@ -563,9 +789,9 @@ dependencies = [ [[package]] name = "quick-xml" -version = "0.36.2" +version = "0.37.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7649a7b4df05aed9ea7ec6f628c67c9953a43869b8bc50929569b2999d443fe" +checksum = "f22f29bdff3987b4d8632ef95fd6424ec7e4e0a57e2f4fc63e489e75357f6a03" dependencies = [ "memchr", "serde", @@ -580,6 +806,36 @@ dependencies = [ "proc-macro2", ] +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom", +] + [[package]] name = "rayon" version = "1.10.0" @@ -602,13 +858,13 @@ dependencies = [ [[package]] name = "regex" -version = "1.11.0" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.8", + "regex-automata 0.4.9", "regex-syntax 0.8.5", ] @@ -623,9 +879,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" dependencies = [ "aho-corasick", "memchr", @@ -645,44 +901,46 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] -name = "serde" -version = "1.0.210" +name = "rustversion" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" +checksum = "0e819f2bc632f285be6d7cd36e25940d45b2391dd6d9b939e79de557f7014248" + +[[package]] +name = "scoped-tls" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" + +[[package]] +name = "serde" +version = "1.0.215" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6513c1ad0b11a9376da888e3e0baa0077f1aed55c17f50e7b2397136129fb88f" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.210" +version = "1.0.215" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" +checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.89", ] [[package]] name = "sha1" -version = "0.10.6" +version = "0.11.0-pre.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" +checksum = "9540978cef7a8498211c1b1c14e5ce920fe5bd524ea84f4a3d72d4602515ae93" dependencies = [ "cfg-if", "cpufeatures", "digest", - "sha1-asm", -] - -[[package]] -name = "sha1-asm" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "286acebaf8b67c1130aedffad26f594eff0c1292389158135327d2e23aed582b" -dependencies = [ - "cc", ] [[package]] @@ -700,6 +958,15 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" +[[package]] +name = "simple_moving_average" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a4b144ad185430cd033299e2c93e465d5a7e65fbb858593dc57181fa13cd310" +dependencies = [ + "num-traits", +] + [[package]] name = "size" version = "0.4.1" @@ -734,9 +1001,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.79" +version = "2.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89132cd0bf050864e1d38dc3bbc07a0eb8e7530af26344d3d2bbbef83499f590" +checksum = "44d46482f1c1c87acd84dea20c1bf5ebff4c757009ed6bf19cfd36fb10e92c4e" dependencies = [ "proc-macro2", "quote", @@ -745,22 +1012,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.64" +version = "2.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" +checksum = "c006c85c7651b3cf2ada4584faa36773bd07bac24acfb39f3c431b36d7e667aa" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.64" +version = "2.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" +checksum = "f077553d607adc1caf65430528a576c757a71ed73944b66ebb58ef2bbd243568" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.89", ] [[package]] @@ -792,7 +1059,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.89", ] [[package]] @@ -834,6 +1101,38 @@ dependencies = [ "tracing-log", ] +[[package]] +name = "tracing-tracy" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc775fdaf33c3dfd19dc354729e65e87914bc67dcdc390ca1210807b8bee5902" +dependencies = [ + "tracing-core", + "tracing-subscriber", + "tracy-client", +] + +[[package]] +name = "tracy-client" +version = "0.17.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "746b078c6a09ebfd5594609049e07116735c304671eaab06ce749854d23435bc" +dependencies = [ + "loom", + "once_cell", + "tracy-client-sys", +] + +[[package]] +name = "tracy-client-sys" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3637e734239e12ab152cd269302500bd063f37624ee210cd04b4936ed671f3b1" +dependencies = [ + "cc", + "windows-targets", +] + [[package]] name = "typenum" version = "1.17.0" @@ -842,18 +1141,15 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "unicase" -version = "2.7.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d2d4dafb69621809a81864c9c1b864479e1235c0dd4e199924b9742439ed89" -dependencies = [ - "version_check", -] +checksum = "7e51b68083f157f853b6379db119d1c1be0e6e4dec98101079dec41f6f5cf6df" [[package]] name = "unicode-ident" -version = "1.0.13" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" +checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83" [[package]] name = "unicode-width" @@ -861,6 +1157,12 @@ version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" +[[package]] +name = "unicode-width" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fc81956842c57dac11422a97c3b8195a1ff727f06e85c84ed2e8aa277c9a0fd" + [[package]] name = "valuable" version = "0.1.0" @@ -868,10 +1170,81 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" [[package]] -name = "version_check" -version = "0.9.5" +name = "vcpkg" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "wasm-bindgen" +version = "0.2.95" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "128d1e363af62632b8eb57219c8fd7877144af57558fb2ef0368d0087bddeb2e" +dependencies = [ + "cfg-if", + "once_cell", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.95" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb6dd4d3ca0ddffd1dd1c9c04f94b868c37ff5fac97c30b97cff2d74fce3a358" +dependencies = [ + "bumpalo", + "log", + "once_cell", + "proc-macro2", + "quote", + "syn 2.0.89", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.95" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e79384be7f8f5a9dd5d7167216f022090cf1f9ec128e6e6a482a2cb5c5422c56" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.95" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.89", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.95" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d" + +[[package]] +name = "web-time" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" +dependencies = [ + "js-sys", + "wasm-bindgen", +] [[package]] name = "winapi" @@ -895,6 +1268,70 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "windows" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd04d41d93c4992d421894c18c8b43496aa748dd4c081bac0dc93eb0489272b6" +dependencies = [ + "windows-core", + "windows-targets", +] + +[[package]] +name = "windows-core" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ba6d44ec8c2591c134257ce647b7ea6b20335bf6379a27dac5f1641fcf59f99" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-result", + "windows-strings", + "windows-targets", +] + +[[package]] +name = "windows-implement" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.89", +] + +[[package]] +name = "windows-interface" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.89", +] + +[[package]] +name = "windows-result" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-strings" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" +dependencies = [ + "windows-result", + "windows-targets", +] + [[package]] name = "windows-sys" version = "0.42.0" @@ -1033,22 +1470,43 @@ checksum = "6a5cbf750400958819fb6178eaa83bee5cd9c29a26a40cc241df8c70fdd46984" [[package]] name = "zerocopy" -version = "0.8.1" +version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd2e5ce961dea177d282ec084dca2aa411b7411199a68d79eb1beacb305a6cd9" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ - "zerocopy-derive", + "byteorder", + "zerocopy-derive 0.7.35", +] + +[[package]] +name = "zerocopy" +version = "0.8.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a13a42ed30c63171d820889b2981318736915150575b8d2d6dbee7edd68336ca" +dependencies = [ + "zerocopy-derive 0.8.10", ] [[package]] name = "zerocopy-derive" -version = "0.8.1" +version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b06304eeddb6081af98ac59db08c868ac197e586086b996d15a86ed70e09a754" +checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.89", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "593e7c96176495043fcb9e87cf7659f4d18679b5bab6b92bdef359c76a7795dd" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.89", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 56c6bb8..d74de41 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,6 +2,9 @@ members = ["nod", "nodtool"] resolver = "2" +[profile.release] +debug = 1 + [profile.release-lto] inherits = "release" lto = "fat" @@ -16,3 +19,10 @@ authors = ["Luke Street "] license = "MIT OR Apache-2.0" repository = "https://github.com/encounter/nod" keywords = ["gamecube", "wii", "iso", "wbfs", "rvz"] + +[workspace.dependencies] +digest = { version = "0.11.0-pre.9", default-features = false } +md-5 = { version = "0.11.0-pre.4", default-features = false } +sha1 = { version = "0.11.0-pre.4", default-features = false } +tracing = "0.1" +zerocopy = { version = "0.8", features = ["alloc", "derive"] } diff --git a/nod/Cargo.toml b/nod/Cargo.toml index fee964b..737e3a5 100644 --- a/nod/Cargo.toml +++ b/nod/Cargo.toml @@ -16,27 +16,42 @@ categories = ["command-line-utilities", "parser-implementations"] [features] default = ["compress-bzip2", "compress-lzma", "compress-zlib", "compress-zstd"] -asm = ["sha1/asm"] compress-bzip2 = ["bzip2"] -compress-lzma = ["liblzma"] +compress-lzma = ["liblzma", "liblzma-sys"] compress-zlib = ["adler", "miniz_oxide"] -compress-zstd = ["zstd"] +compress-zstd = ["zstd", "zstd-safe"] +openssl = ["dep:openssl"] +openssl-vendored = ["openssl", "openssl/vendored"] [dependencies] adler = { version = "1.0", optional = true } -aes = "0.8" +aes = "0.9.0-pre.2" base16ct = "0.2" +bit-set = "0.8" +bytes = "1.8" bzip2 = { version = "0.4", features = ["static"], optional = true } -cbc = "0.1" -digest = "0.10" +cbc = "0.2.0-pre.2" +crc32fast = "1.4" +crossbeam-channel = "0.5" +crossbeam-utils = "0.8" +digest = { workspace = true } dyn-clone = "1.0" encoding_rs = "0.8" itertools = "0.13" liblzma = { version = "0.3", features = ["static"], optional = true } -log = "0.4" +liblzma-sys = { version = "0.3", features = ["static"], optional = true } +lru = "0.12" +md-5 = { workspace = true } +memmap2 = "0.9" miniz_oxide = { version = "0.8", optional = true } +openssl = { version = "0.10", optional = true } +rand = "0.8" rayon = "1.10" -sha1 = "0.10" -thiserror = "1.0" -zerocopy = { version = "0.8", features = ["alloc", "derive"] } -zstd = { version = "0.13", optional = true } +sha1 = { workspace = true } +simple_moving_average = "1.0" +thiserror = "2.0" +tracing = { workspace = true } +xxhash-rust = { version = "0.8", features = ["xxh64"] } +zerocopy = { workspace = true } +zstd = { version = "0.13", optional = true, default-features = false } +zstd-safe = { version = "7.2", optional = true, default-features = false } diff --git a/nod/src/build/gc.rs b/nod/src/build/gc.rs new file mode 100644 index 0000000..84c155c --- /dev/null +++ b/nod/src/build/gc.rs @@ -0,0 +1,827 @@ +#![allow(missing_docs)] // TODO +use std::{ + io, + io::{Read, Seek, Write}, + sync::Arc, +}; + +use tracing::debug; +use zerocopy::{FromZeros, IntoBytes}; + +use crate::{ + disc::{ + fst::{Fst, FstBuilder}, + DiscHeader, PartitionHeader, BI2_SIZE, BOOT_SIZE, GCN_MAGIC, MINI_DVD_SIZE, SECTOR_SIZE, + WII_MAGIC, + }, + read::DiscStream, + util::{align_up_64, array_ref, array_ref_mut, lfg::LaggedFibonacci}, + Error, Result, ResultContext, +}; + +pub trait FileCallback: Clone + Send + Sync { + fn read_file(&mut self, out: &mut [u8], name: &str, offset: u64) -> io::Result<()>; +} + +#[derive(Debug, Clone)] +pub struct FileInfo { + pub name: String, + pub size: u64, + pub offset: Option, + pub alignment: Option, +} + +pub struct GCPartitionBuilder { + disc_header: Box, + partition_header: Box, + user_files: Vec, + overrides: PartitionOverrides, + junk_files: Vec, +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum WriteKind { + File(String), + Static(Arc<[u8]>, &'static str), + Junk, +} + +impl WriteKind { + fn name(&self) -> &str { + match self { + WriteKind::File(name) => name, + WriteKind::Static(_, name) => name, + WriteKind::Junk => "[junk data]", + } + } +} + +#[derive(Debug, Clone)] +pub struct WriteInfo { + pub kind: WriteKind, + pub size: u64, + pub offset: u64, +} + +pub struct GCPartitionWriter { + write_info: Vec, + disc_size: u64, + disc_id: [u8; 4], + disc_num: u8, +} + +const BI2_OFFSET: u64 = BOOT_SIZE as u64; +const APPLOADER_OFFSET: u64 = BI2_OFFSET + BI2_SIZE as u64; + +#[derive(Debug, Clone, Default)] +pub struct PartitionOverrides { + pub game_id: Option<[u8; 6]>, + pub game_title: Option, + pub disc_num: Option, + pub disc_version: Option, + pub audio_streaming: Option, + pub audio_stream_buf_size: Option, + pub junk_id: Option<[u8; 4]>, + pub region: Option, +} + +impl GCPartitionBuilder { + pub fn new(is_wii: bool, overrides: PartitionOverrides) -> Self { + let mut disc_header = DiscHeader::new_box_zeroed().unwrap(); + if is_wii { + disc_header.gcn_magic = [0u8; 4]; + disc_header.wii_magic = WII_MAGIC; + } else { + disc_header.gcn_magic = GCN_MAGIC; + disc_header.wii_magic = [0u8; 4]; + } + Self { + disc_header, + partition_header: PartitionHeader::new_box_zeroed().unwrap(), + user_files: Vec::new(), + overrides, + junk_files: Vec::new(), + } + } + + #[inline] + pub fn set_disc_header(&mut self, disc_header: Box) { + self.disc_header = disc_header; + } + + #[inline] + pub fn set_partition_header(&mut self, partition_header: Box) { + self.partition_header = partition_header; + } + + pub fn add_file(&mut self, info: FileInfo) -> Result<()> { + if let (Some(offset), Some(alignment)) = (info.offset, info.alignment) { + if offset % alignment as u64 != 0 { + return Err(Error::Other(format!( + "File {} offset {:#X} is not aligned to {}", + info.name, offset, alignment + ))); + } + } + self.user_files.push(info); + Ok(()) + } + + /// A junk file exists in the FST, but is excluded from the disc layout, so junk data will be + /// written in its place. + pub fn add_junk_file(&mut self, name: String) { self.junk_files.push(name); } + + pub fn build( + &self, + sys_file_callback: impl FnMut(&mut dyn Write, &str) -> io::Result<()>, + ) -> Result { + let mut layout = GCPartitionLayout::new(self); + layout.locate_sys_files(sys_file_callback)?; + layout.apply_overrides(&self.overrides)?; + let write_info = layout.layout_files()?; + let disc_size = layout.partition_header.user_offset.get() as u64 + + layout.partition_header.user_size.get() as u64; + let junk_id = layout.junk_id(); + Ok(GCPartitionWriter::new(write_info, disc_size, junk_id, self.disc_header.disc_num)) + } +} + +struct GCPartitionLayout { + disc_header: Box, + partition_header: Box, + user_files: Vec, + apploader_file: Option, + dol_file: Option, + raw_fst: Option>, + raw_bi2: Option>, + junk_id: Option<[u8; 4]>, + junk_files: Vec, +} + +impl GCPartitionLayout { + fn new(builder: &GCPartitionBuilder) -> Self { + GCPartitionLayout { + disc_header: builder.disc_header.clone(), + partition_header: builder.partition_header.clone(), + user_files: builder.user_files.clone(), + apploader_file: None, + dol_file: None, + raw_fst: None, + raw_bi2: None, + junk_id: builder.overrides.junk_id, + junk_files: builder.junk_files.clone(), + } + } + + fn locate_sys_files( + &mut self, + mut file_callback: impl FnMut(&mut dyn Write, &str) -> io::Result<()>, + ) -> Result<()> { + let mut handled = vec![false; self.user_files.len()]; + + // Locate fixed offset system files + for (info, handled) in self.user_files.iter().zip(handled.iter_mut()) { + if info.offset == Some(0) || info.name == "sys/boot.bin" { + let mut data = Vec::with_capacity(BOOT_SIZE); + file_callback(&mut data, &info.name) + .with_context(|| format!("Failed to read file {}", info.name))?; + if data.len() != BOOT_SIZE { + return Err(Error::Other(format!( + "Boot file {} is {} bytes, expected {}", + info.name, + data.len(), + BOOT_SIZE + ))); + } + self.disc_header.as_mut_bytes().copy_from_slice(&data[..size_of::()]); + self.partition_header + .as_mut_bytes() + .copy_from_slice(&data[size_of::()..]); + *handled = true; + continue; + } + + if info.offset == Some(BI2_OFFSET) || info.name == "sys/bi2.bin" { + let mut data = Vec::with_capacity(BI2_SIZE); + file_callback(&mut data, &info.name) + .with_context(|| format!("Failed to read file {}", info.name))?; + if data.len() != BI2_SIZE { + return Err(Error::Other(format!( + "BI2 file {} is {} bytes, expected {}", + info.name, + data.len(), + BI2_SIZE + ))); + } + self.raw_bi2 = Some(data.into_boxed_slice()); + *handled = true; + continue; + } + + if info.offset == Some(APPLOADER_OFFSET) || info.name == "sys/apploader.img" { + self.apploader_file = Some(info.clone()); + *handled = true; + continue; + } + } + + // Locate other system files + let is_wii = self.disc_header.is_wii(); + for (info, handled) in self.user_files.iter().zip(handled.iter_mut()) { + let dol_offset = self.partition_header.dol_offset(is_wii); + if (dol_offset != 0 && info.offset == Some(dol_offset)) || info.name == "sys/main.dol" { + let mut info = info.clone(); + if info.alignment.is_none() { + info.alignment = Some(128); + } + self.dol_file = Some(info); + *handled = true; // TODO DOL in user data + continue; + } + + let fst_offset = self.partition_header.fst_offset(is_wii); + if (fst_offset != 0 && info.offset == Some(fst_offset)) || info.name == "sys/fst.bin" { + let mut data = Vec::with_capacity(info.size as usize); + file_callback(&mut data, &info.name) + .with_context(|| format!("Failed to read file {}", info.name))?; + if data.len() != info.size as usize { + return Err(Error::Other(format!( + "FST file {} is {} bytes, expected {}", + info.name, + data.len(), + info.size + ))); + } + self.raw_fst = Some(data.into_boxed_slice()); + *handled = true; + continue; + } + } + + // Remove handled files + let mut iter = handled.iter(); + self.user_files.retain(|_| !iter.next().unwrap()); + Ok(()) + } + + fn apply_overrides(&mut self, overrides: &PartitionOverrides) -> Result<()> { + if let Some(game_id) = overrides.game_id { + self.disc_header.game_id.copy_from_slice(&game_id); + } + if let Some(game_title) = overrides.game_title.as_ref() { + let max_size = self.disc_header.game_title.len() - 1; // nul terminator + if game_title.len() > max_size { + return Err(Error::Other(format!( + "Game title \"{}\" is too long ({} > {})", + game_title, + game_title.len(), + max_size + ))); + } + let len = game_title.len().min(max_size); + self.disc_header.game_title[..len].copy_from_slice(&game_title.as_bytes()[..len]); + } + if let Some(disc_num) = overrides.disc_num { + self.disc_header.disc_num = disc_num; + } + if let Some(disc_version) = overrides.disc_version { + self.disc_header.disc_version = disc_version; + } + if let Some(audio_streaming) = overrides.audio_streaming { + self.disc_header.audio_streaming = audio_streaming as u8; + } + if let Some(audio_stream_buf_size) = overrides.audio_stream_buf_size { + self.disc_header.audio_stream_buf_size = audio_stream_buf_size; + } + let set_bi2 = self.raw_bi2.is_none() && overrides.region.is_some(); + let raw_bi2 = self.raw_bi2.get_or_insert_with(|| { + <[u8]>::new_box_zeroed_with_elems(BI2_SIZE).expect("Failed to allocate BI2") + }); + if set_bi2 { + let region = overrides.region.unwrap_or(0xFF) as u32; + *array_ref_mut![raw_bi2, 0x18, 4] = region.to_be_bytes(); + } + Ok(()) + } + + fn can_use_orig_fst(&self) -> bool { + if let Some(existing) = self.raw_fst.as_deref() { + let Ok(existing_fst) = Fst::new(existing) else { + return false; + }; + for (_, node, path) in existing_fst.iter() { + if node.is_dir() { + continue; + } + if !self.user_files.iter().any(|info| info.name == path) + && !self.junk_files.contains(&path) + { + println!("FST file {} not found", path); + return false; + } + } + println!("Using existing FST"); + return true; + } + false + } + + fn calculate_fst_size(&self) -> Result { + if self.can_use_orig_fst() { + return Ok(self.raw_fst.as_deref().unwrap().len() as u64); + } + + let mut file_names = Vec::with_capacity(self.user_files.len()); + for info in &self.user_files { + file_names.push(info.name.as_str()); + } + // file_names.sort_unstable(); + let is_wii = self.disc_header.is_wii(); + let mut builder = if let Some(existing) = self.raw_fst.as_deref() { + let existing_fst = Fst::new(existing)?; + FstBuilder::new_with_string_table(is_wii, Vec::from(existing_fst.string_table))? + } else { + FstBuilder::new(is_wii) + }; + for name in file_names { + builder.add_file(name, 0, 0); + } + let size = builder.byte_size() as u64; + // if size != self.partition_header.fst_size(is_wii) { + // return Err(Error::Other(format!( + // "FST size {} != {}", + // size, + // self.partition_header.fst_size(is_wii) + // ))); + // } + Ok(size) + } + + fn generate_fst(&mut self, write_info: &[WriteInfo]) -> Result> { + if self.can_use_orig_fst() { + let fst_data = self.raw_fst.as_ref().unwrap().clone(); + // TODO update offsets and sizes + // let node_count = Fst::new(fst_data.as_ref())?.nodes.len(); + // let string_base = node_count * size_of::(); + // let (node_buf, string_table) = fst_data.split_at_mut(string_base); + // let nodes = <[Node]>::mut_from_bytes(node_buf).unwrap(); + return Ok(Arc::from(fst_data)); + } + + let files = write_info.to_vec(); + // files.sort_unstable_by(|a, b| a.name.cmp(&b.name)); + let is_wii = self.disc_header.is_wii(); + let mut builder = if let Some(existing) = self.raw_fst.as_deref() { + let existing_fst = Fst::new(existing)?; + FstBuilder::new_with_string_table(is_wii, Vec::from(existing_fst.string_table))? + } else { + FstBuilder::new(is_wii) + }; + for info in files { + if let WriteKind::File(name) = info.kind { + builder.add_file(&name, info.offset, info.size as u32); + } + } + let raw_fst = builder.finalize(); + if raw_fst.len() != self.partition_header.fst_size(is_wii) as usize { + return Err(Error::Other(format!( + "FST size mismatch: {} != {}", + raw_fst.len(), + self.partition_header.fst_size(is_wii) + ))); + } + Ok(Arc::from(raw_fst)) + } + + fn layout_system_data(&mut self, write_info: &mut Vec) -> Result { + let mut last_offset = 0; + + let Some(apploader_file) = self.apploader_file.as_ref() else { + return Err(Error::Other("Apploader not set".to_string())); + }; + let Some(dol_file) = self.dol_file.as_ref() else { + return Err(Error::Other("DOL not set".to_string())); + }; + let Some(raw_bi2) = self.raw_bi2.as_ref() else { + return Err(Error::Other("BI2 not set".to_string())); + }; + // let Some(raw_fst) = self.raw_fst.as_ref() else { + // return Err(Error::Other("FST not set".to_string())); + // }; + + let mut boot = <[u8]>::new_box_zeroed_with_elems(BOOT_SIZE)?; + boot[..size_of::()].copy_from_slice(self.disc_header.as_bytes()); + boot[size_of::()..].copy_from_slice(self.partition_header.as_bytes()); + write_info.push(WriteInfo { + kind: WriteKind::Static(Arc::from(boot), "[BOOT]"), + size: BOOT_SIZE as u64, + offset: last_offset, + }); + last_offset += BOOT_SIZE as u64; + write_info.push(WriteInfo { + kind: WriteKind::Static(Arc::from(raw_bi2.as_ref()), "[BI2]"), + size: BI2_SIZE as u64, + offset: last_offset, + }); + last_offset += BI2_SIZE as u64; + write_info.push(WriteInfo { + kind: WriteKind::File(apploader_file.name.clone()), + size: apploader_file.size, + offset: last_offset, + }); + last_offset += apploader_file.size; + + // Update DOL and FST offsets if not set + let is_wii = self.disc_header.is_wii(); + let mut dol_offset = self.partition_header.dol_offset(is_wii); + if dol_offset == 0 { + dol_offset = align_up_64(last_offset, dol_file.alignment.unwrap() as u64); + self.partition_header.set_dol_offset(dol_offset, is_wii); + } + let mut fst_offset = self.partition_header.fst_offset(is_wii); + if fst_offset == 0 { + // TODO handle DOL in user data + fst_offset = align_up_64(dol_offset + dol_file.size, 128); + self.partition_header.set_fst_offset(fst_offset, is_wii); + } + let fst_size = self.calculate_fst_size()?; + self.partition_header.set_fst_size(fst_size, is_wii); + if self.partition_header.fst_max_size(is_wii) < fst_size { + self.partition_header.set_fst_max_size(fst_size, is_wii); + } + + if dol_offset < fst_offset { + write_info.push(WriteInfo { + kind: WriteKind::File(dol_file.name.clone()), + size: dol_file.size, + offset: dol_offset, + }); + } else { + // DOL in user data + } + // write_info.push(WriteInfo { + // kind: WriteKind::Static(Arc::from(raw_fst.as_ref()), "[FST]"), + // size: fst_size, + // offset: fst_offset, + // }); + + Ok(fst_offset + fst_size) + } + + fn layout_files(&mut self) -> Result> { + let mut system_write_info = Vec::new(); + let mut write_info = Vec::with_capacity(self.user_files.len()); + let mut last_offset = self.layout_system_data(&mut system_write_info)?; + + // Layout user data + let mut user_offset = self.partition_header.user_offset.get() as u64; + if user_offset == 0 { + user_offset = align_up_64(last_offset, SECTOR_SIZE as u64); + self.partition_header.user_offset.set(user_offset as u32); + } else if user_offset < last_offset { + return Err(Error::Other(format!( + "User offset {:#X} is before FST {:#X}", + user_offset, last_offset + ))); + } + last_offset = user_offset; + for info in &self.user_files { + let offset = info + .offset + .unwrap_or_else(|| align_up_64(last_offset, info.alignment.unwrap_or(32) as u64)); + write_info.push(WriteInfo { + kind: WriteKind::File(info.name.clone()), + offset, + size: info.size, + }); + last_offset = offset + info.size; + } + + // Generate FST from only user files + let is_wii = self.disc_header.is_wii(); + let fst_data = self.generate_fst(&write_info)?; + let fst_size = fst_data.len() as u64; + write_info.push(WriteInfo { + kind: WriteKind::Static(fst_data, "[FST]"), + size: fst_size, + offset: self.partition_header.fst_offset(is_wii), + }); + // Add system files to write info + write_info.extend(system_write_info); + // Sort files by offset + sort_files(&mut write_info)?; + + // Update user size if not set + if self.partition_header.user_size.get() == 0 { + let user_end = if self.disc_header.is_wii() { + align_up_64(last_offset, SECTOR_SIZE as u64) + } else { + MINI_DVD_SIZE + }; + self.partition_header.user_size.set((user_end - user_offset) as u32); + } + + // Insert junk data + let write_info = insert_junk_data(write_info, &self.partition_header); + + Ok(write_info) + } + + fn junk_id(&self) -> [u8; 4] { + self.junk_id.unwrap_or_else(|| *array_ref![self.disc_header.game_id, 0, 4]) + } +} + +pub(crate) fn insert_junk_data( + write_info: Vec, + partition_header: &PartitionHeader, +) -> Vec { + let mut new_write_info = Vec::with_capacity(write_info.len()); + + let fst_end = partition_header.fst_offset(false) + partition_header.fst_size(false); + let file_gap = find_file_gap(&write_info, fst_end); + let mut last_file_end = 0; + for info in write_info { + if let WriteKind::File(..) | WriteKind::Static(..) = &info.kind { + let aligned_end = gcm_align(last_file_end); + if info.offset > aligned_end && last_file_end >= fst_end { + // Junk data is aligned to 4 bytes with a 28 byte padding (aka `(n + 31) & !3`) + // but a few cases don't have the 28 byte padding. Namely, the junk data after the + // FST, and the junk data in between the inner and outer rim files. This attempts to + // determine the correct alignment, but is not 100% accurate. + let junk_start = if file_gap == Some(last_file_end) { + align_up_64(last_file_end, 4) + } else { + aligned_end + }; + new_write_info.push(WriteInfo { + kind: WriteKind::Junk, + size: info.offset - junk_start, + offset: junk_start, + }); + } + last_file_end = info.offset + info.size; + } + new_write_info.push(info); + } + let aligned_end = gcm_align(last_file_end); + let user_end = + partition_header.user_offset.get() as u64 + partition_header.user_size.get() as u64; + if aligned_end < user_end && aligned_end >= fst_end { + new_write_info.push(WriteInfo { + kind: WriteKind::Junk, + size: user_end - aligned_end, + offset: aligned_end, + }); + } + + new_write_info +} + +impl GCPartitionWriter { + fn new(write_info: Vec, disc_size: u64, disc_id: [u8; 4], disc_num: u8) -> Self { + Self { write_info, disc_size, disc_id, disc_num } + } + + pub fn write_to( + &self, + out: &mut W, + mut file_callback: impl FnMut(&mut dyn Write, &str) -> io::Result<()>, + ) -> Result<()> + where + W: Write + ?Sized, + { + let mut out = WriteCursor { inner: out, position: 0 }; + let mut lfg = LaggedFibonacci::default(); + for info in &self.write_info { + out.write_zeroes_until(info.offset).context("Writing padding")?; + match &info.kind { + WriteKind::File(name) => file_callback(&mut out, name) + .with_context(|| format!("Writing file {}", name))?, + WriteKind::Static(data, name) => out.write_all(data).with_context(|| { + format!("Writing static data {} ({} bytes)", name, data.len()) + })?, + WriteKind::Junk => { + lfg.write_sector_chunked( + &mut out, + info.size, + self.disc_id, + self.disc_num, + info.offset, + ) + .with_context(|| { + format!( + "Writing junk data at {:X} -> {:X}", + info.offset, + info.offset + info.size + ) + })?; + } + }; + if out.position != info.offset + info.size { + return Err(Error::Other(format!( + "File {}: Wrote {} bytes, expected {}", + info.kind.name(), + out.position - info.offset, + info.size + ))); + } + } + out.write_zeroes_until(self.disc_size).context("Writing end of file")?; + out.flush().context("Flushing output")?; + Ok(()) + } + + pub fn into_stream(self, file_callback: Cb) -> Result> + where Cb: FileCallback + 'static { + Ok(Box::new(GCPartitionStream::new( + file_callback, + Arc::from(self.write_info), + self.disc_size, + self.disc_id, + self.disc_num, + ))) + } +} + +struct WriteCursor { + inner: W, + position: u64, +} + +impl WriteCursor +where W: Write +{ + fn write_zeroes_until(&mut self, until: u64) -> io::Result<()> { + const ZEROES: [u8; 0x1000] = [0u8; 0x1000]; + let mut remaining = until.saturating_sub(self.position); + while remaining > 0 { + let write_len = remaining.min(ZEROES.len() as u64) as usize; + let written = self.write(&ZEROES[..write_len])?; + remaining -= written as u64; + } + Ok(()) + } +} + +impl Write for WriteCursor +where W: Write +{ + #[inline] + fn write(&mut self, buf: &[u8]) -> io::Result { + let len = self.inner.write(buf)?; + self.position += len as u64; + Ok(len) + } + + #[inline] + fn flush(&mut self) -> io::Result<()> { self.inner.flush() } +} + +#[derive(Clone)] +pub(crate) struct GCPartitionStream { + file_callback: Cb, + pos: u64, + write_info: Arc<[WriteInfo]>, + size: u64, + disc_id: [u8; 4], + disc_num: u8, +} + +impl GCPartitionStream { + pub fn new( + file_callback: Cb, + write_info: Arc<[WriteInfo]>, + size: u64, + disc_id: [u8; 4], + disc_num: u8, + ) -> Self { + Self { file_callback, pos: 0, write_info, size, disc_id, disc_num } + } + + #[inline] + pub fn set_position(&mut self, pos: u64) { self.pos = pos; } + + #[inline] + pub fn len(&self) -> u64 { self.size } +} + +impl Read for GCPartitionStream +where Cb: FileCallback +{ + fn read(&mut self, out: &mut [u8]) -> io::Result { + if self.pos >= self.size { + // Out of bounds + return Ok(0); + } + + let end = (self.size - self.pos).min(out.len() as u64) as usize; + let mut buf = &mut out[..end]; + let mut curr = self + .write_info + .binary_search_by_key(&self.pos, |i| i.offset) + .unwrap_or_else(|idx| idx.saturating_sub(1)); + let mut pos = self.pos; + let mut total = 0; + while !buf.is_empty() { + let Some(info) = self.write_info.get(curr) else { + buf.fill(0); + total += buf.len(); + break; + }; + if pos > info.offset + info.size { + curr += 1; + continue; + } + let read = if pos < info.offset { + let read = buf.len().min((info.offset - pos) as usize); + buf[..read].fill(0); + read + } else { + let read = buf.len().min((info.offset + info.size - pos) as usize); + match &info.kind { + WriteKind::File(name) => { + self.file_callback.read_file(&mut buf[..read], name, pos - info.offset)?; + } + WriteKind::Static(data, _) => { + let offset = (pos - info.offset) as usize; + buf[..read].copy_from_slice(&data[offset..offset + read]); + } + WriteKind::Junk => { + let mut lfg = LaggedFibonacci::default(); + lfg.fill_sector_chunked(&mut buf[..read], self.disc_id, self.disc_num, pos); + } + } + curr += 1; + read + }; + buf = &mut buf[read..]; + pos += read as u64; + total += read; + } + + Ok(total) + } +} + +impl Seek for GCPartitionStream { + fn seek(&mut self, pos: io::SeekFrom) -> io::Result { + self.pos = match pos { + io::SeekFrom::Start(pos) => pos, + io::SeekFrom::End(v) => self.size.saturating_add_signed(v), + io::SeekFrom::Current(v) => self.pos.saturating_add_signed(v), + }; + Ok(self.pos) + } +} + +#[inline(always)] +fn gcm_align(n: u64) -> u64 { (n + 31) & !3 } + +fn sort_files(files: &mut [WriteInfo]) -> Result<()> { + files.sort_unstable_by_key(|info| (info.offset, info.size)); + for i in 1..files.len() { + let prev = &files[i - 1]; + let cur = &files[i]; + if cur.offset < prev.offset + prev.size { + let name = match &cur.kind { + WriteKind::File(name) => name.as_str(), + WriteKind::Static(_, name) => name, + WriteKind::Junk => "[junk data]", + }; + let prev_name = match &prev.kind { + WriteKind::File(name) => name.as_str(), + WriteKind::Static(_, name) => name, + WriteKind::Junk => "[junk data]", + }; + return Err(Error::Other(format!( + "File {} ({:#X}-{:#X}) overlaps with {} ({:#X}-{:#X})", + name, + cur.offset, + cur.offset + cur.size, + prev_name, + prev.offset, + prev.offset + prev.size + ))); + } + } + Ok(()) +} + +/// Files can be located on the inner rim of the disc (closer to the center) or the outer rim +/// (closer to the edge). The inner rim is slower to read, so developers often configured certain +/// files to be located on the outer rim. This function attempts to find a gap in the file offsets +/// between the inner and outer rim, which we need to recreate junk data properly. +fn find_file_gap(file_infos: &[WriteInfo], fst_end: u64) -> Option { + let mut last_offset = 0; + for info in file_infos { + if let WriteKind::File(..) | WriteKind::Static(..) = &info.kind { + if last_offset > fst_end && info.offset > last_offset + SECTOR_SIZE as u64 { + debug!("Found file gap at {:X} -> {:X}", last_offset, info.offset); + return Some(last_offset); + } + last_offset = info.offset + info.size; + } + } + None +} diff --git a/nod/src/build/mod.rs b/nod/src/build/mod.rs new file mode 100644 index 0000000..69c30ae --- /dev/null +++ b/nod/src/build/mod.rs @@ -0,0 +1,4 @@ +//! Disc image building. + +pub mod gc; +pub mod wii; diff --git a/nod/src/build/wii.rs b/nod/src/build/wii.rs new file mode 100644 index 0000000..8c5b9ef --- /dev/null +++ b/nod/src/build/wii.rs @@ -0,0 +1 @@ +#![allow(missing_docs)] // TODO diff --git a/nod/src/common.rs b/nod/src/common.rs new file mode 100644 index 0000000..068edbe --- /dev/null +++ b/nod/src/common.rs @@ -0,0 +1,325 @@ +//! Common types. + +use std::{borrow::Cow, fmt, str::FromStr, sync::Arc}; + +use crate::{ + disc::{wii::WiiPartitionHeader, DiscHeader, PartitionHeader, SECTOR_SIZE}, + Error, Result, +}; + +/// SHA-1 hash bytes +pub type HashBytes = [u8; 20]; + +/// AES key bytes +pub type KeyBytes = [u8; 16]; + +/// Magic bytes +pub type MagicBytes = [u8; 4]; + +/// The disc file format. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] +pub enum Format { + /// ISO / GCM (GameCube master disc) + #[default] + Iso, + /// CISO (Compact ISO) + Ciso, + /// GCZ + Gcz, + /// NFS (Wii U VC) + Nfs, + /// RVZ + Rvz, + /// WBFS + Wbfs, + /// WIA + Wia, + /// TGC + Tgc, +} + +impl Format { + /// Returns the default block size for the disc format, if any. + pub fn default_block_size(self) -> u32 { + match self { + Format::Ciso => crate::io::ciso::DEFAULT_BLOCK_SIZE, + #[cfg(feature = "compress-zlib")] + Format::Gcz => crate::io::gcz::DEFAULT_BLOCK_SIZE, + Format::Rvz => crate::io::wia::RVZ_DEFAULT_CHUNK_SIZE, + Format::Wbfs => crate::io::wbfs::DEFAULT_BLOCK_SIZE, + Format::Wia => crate::io::wia::WIA_DEFAULT_CHUNK_SIZE, + _ => 0, + } + } + + /// Returns the default compression algorithm for the disc format. + pub fn default_compression(self) -> Compression { + match self { + #[cfg(feature = "compress-zlib")] + Format::Gcz => crate::io::gcz::DEFAULT_COMPRESSION, + Format::Rvz => crate::io::wia::RVZ_DEFAULT_COMPRESSION, + Format::Wia => crate::io::wia::WIA_DEFAULT_COMPRESSION, + _ => Compression::None, + } + } +} + +impl fmt::Display for Format { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Format::Iso => write!(f, "ISO"), + Format::Ciso => write!(f, "CISO"), + Format::Gcz => write!(f, "GCZ"), + Format::Nfs => write!(f, "NFS"), + Format::Rvz => write!(f, "RVZ"), + Format::Wbfs => write!(f, "WBFS"), + Format::Wia => write!(f, "WIA"), + Format::Tgc => write!(f, "TGC"), + } + } +} + +/// The disc file format's compression algorithm. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] +pub enum Compression { + /// No compression + #[default] + None, + /// BZIP2 + Bzip2(u8), + /// Deflate (GCZ only) + Deflate(u8), + /// LZMA + Lzma(u8), + /// LZMA2 + Lzma2(u8), + /// Zstandard + Zstandard(i8), +} + +impl Compression { + /// Validates the compression level. Sets the default level if the level is 0. + pub fn validate_level(&mut self) -> Result<()> { + match self { + Compression::Bzip2(level) => { + if *level == 0 { + *level = 9; + } + if *level > 9 { + return Err(Error::Other(format!( + "Invalid BZIP2 compression level: {level} (expected 1-9)" + ))); + } + } + Compression::Deflate(level) => { + if *level == 0 { + *level = 9; + } + if *level > 10 { + return Err(Error::Other(format!( + "Invalid Deflate compression level: {level} (expected 1-10)" + ))); + } + } + Compression::Lzma(level) => { + if *level == 0 { + *level = 6; + } + if *level > 9 { + return Err(Error::Other(format!( + "Invalid LZMA compression level: {level} (expected 1-9)" + ))); + } + } + Compression::Lzma2(level) => { + if *level == 0 { + *level = 6; + } + if *level > 9 { + return Err(Error::Other(format!( + "Invalid LZMA2 compression level: {level} (expected 1-9)" + ))); + } + } + Compression::Zstandard(level) => { + if *level == 0 { + *level = 19; + } + if *level < -22 || *level > 22 { + return Err(Error::Other(format!( + "Invalid Zstandard compression level: {level} (expected -22 to 22)" + ))); + } + } + _ => {} + } + Ok(()) + } +} + +impl fmt::Display for Compression { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Compression::None => write!(f, "None"), + Compression::Bzip2(level) => { + if *level == 0 { + write!(f, "BZIP2") + } else { + write!(f, "BZIP2 ({level})") + } + } + Compression::Deflate(level) => { + if *level == 0 { + write!(f, "Deflate") + } else { + write!(f, "Deflate ({level})") + } + } + Compression::Lzma(level) => { + if *level == 0 { + write!(f, "LZMA") + } else { + write!(f, "LZMA ({level})") + } + } + Compression::Lzma2(level) => { + if *level == 0 { + write!(f, "LZMA2") + } else { + write!(f, "LZMA2 ({level})") + } + } + Compression::Zstandard(level) => { + if *level == 0 { + write!(f, "Zstandard") + } else { + write!(f, "Zstandard ({level})") + } + } + } + } +} + +impl FromStr for Compression { + type Err = String; + + fn from_str(s: &str) -> Result { + let (format, level) = + if let Some((format, level_str)) = s.split_once(':').or_else(|| s.split_once('.')) { + let level = level_str + .parse::() + .map_err(|_| format!("Failed to parse compression level: {level_str:?}"))?; + (format, level) + } else { + (s, 0) + }; + match format.to_ascii_lowercase().as_str() { + "" | "none" => Ok(Compression::None), + "bz2" | "bzip2" => Ok(Compression::Bzip2(level as u8)), + "deflate" | "gz" | "gzip" => Ok(Compression::Deflate(level as u8)), + "lzma" => Ok(Compression::Lzma(level as u8)), + "lzma2" | "xz" => Ok(Compression::Lzma2(level as u8)), + "zst" | "zstd" | "zstandard" => Ok(Compression::Zstandard(level as i8)), + _ => Err(format!("Unknown compression type: {format:?}")), + } + } +} + +/// The kind of disc partition. +#[derive(Debug, PartialEq, Eq, Copy, Clone)] +pub enum PartitionKind { + /// Data partition. + Data, + /// Update partition. + Update, + /// Channel partition. + Channel, + /// Other partition kind. + Other(u32), +} + +impl fmt::Display for PartitionKind { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Data => write!(f, "Data"), + Self::Update => write!(f, "Update"), + Self::Channel => write!(f, "Channel"), + Self::Other(v) => { + let bytes = v.to_be_bytes(); + write!(f, "Other ({:08X}, {})", v, String::from_utf8_lossy(&bytes)) + } + } + } +} + +impl PartitionKind { + /// Returns the directory name for the partition kind. + #[inline] + pub fn dir_name(&self) -> Cow { + match self { + Self::Data => Cow::Borrowed("DATA"), + Self::Update => Cow::Borrowed("UPDATE"), + Self::Channel => Cow::Borrowed("CHANNEL"), + Self::Other(v) => { + let bytes = v.to_be_bytes(); + Cow::Owned(format!("P-{}", String::from_utf8_lossy(&bytes))) + } + } + } +} + +impl From for PartitionKind { + #[inline] + fn from(v: u32) -> Self { + match v { + 0 => Self::Data, + 1 => Self::Update, + 2 => Self::Channel, + v => Self::Other(v), + } + } +} + +/// Wii partition information. +#[derive(Debug, Clone)] +pub struct PartitionInfo { + /// The partition index. + pub index: usize, + /// The kind of disc partition. + pub kind: PartitionKind, + /// The start sector of the partition. + pub start_sector: u32, + /// The start sector of the partition's (usually encrypted) data. + pub data_start_sector: u32, + /// The end sector of the partition's (usually encrypted) data. + pub data_end_sector: u32, + /// The AES key for the partition, also known as the "title key". + pub key: KeyBytes, + /// The Wii partition header. + pub header: Arc, + /// The disc header within the partition. + pub disc_header: Arc, + /// The partition header within the partition. + pub partition_header: Arc, + /// Whether the partition data is encrypted + pub has_encryption: bool, + /// Whether the partition data hashes are present + pub has_hashes: bool, +} + +impl PartitionInfo { + /// Returns the size of the partition's data region in bytes. + #[inline] + pub fn data_size(&self) -> u64 { + (self.data_end_sector as u64 - self.data_start_sector as u64) * SECTOR_SIZE as u64 + } + + /// Returns whether the given sector is within the partition's data region. + #[inline] + pub fn data_contains_sector(&self, sector: u32) -> bool { + sector >= self.data_start_sector && sector < self.data_end_sector + } +} diff --git a/nod/src/disc/direct.rs b/nod/src/disc/direct.rs new file mode 100644 index 0000000..ae5da16 --- /dev/null +++ b/nod/src/disc/direct.rs @@ -0,0 +1,124 @@ +use std::{ + io, + io::{BufRead, Seek, SeekFrom}, + sync::Arc, +}; + +use zerocopy::FromZeros; + +use crate::{ + common::KeyBytes, + disc::{wii::SECTOR_DATA_SIZE, DiscHeader, SECTOR_SIZE}, + io::block::{Block, BlockReader}, + util::impl_read_for_bufread, + Result, +}; + +pub enum DirectDiscReaderMode { + Raw, + Partition { disc_header: Arc, data_start_sector: u32, key: KeyBytes }, +} + +/// Simplified disc reader that uses a block reader directly. +/// +/// This is used to read disc and partition metadata before we can construct a full disc reader. +pub struct DirectDiscReader { + io: Box, + block: Block, + block_buf: Box<[u8]>, + block_decrypted: bool, + pos: u64, + mode: DirectDiscReaderMode, +} + +impl DirectDiscReader { + pub fn new(inner: Box) -> Result> { + let block_size = inner.block_size() as usize; + Ok(Box::new(Self { + io: inner, + block: Block::default(), + block_buf: <[u8]>::new_box_zeroed_with_elems(block_size)?, + block_decrypted: false, + pos: 0, + mode: DirectDiscReaderMode::Raw, + })) + } + + pub fn reset(&mut self, mode: DirectDiscReaderMode) { + self.block = Block::default(); + self.block_decrypted = false; + self.pos = 0; + self.mode = mode; + } + + pub fn into_inner(self) -> Box { self.io } +} + +impl BufRead for DirectDiscReader { + fn fill_buf(&mut self) -> io::Result<&[u8]> { + match &self.mode { + DirectDiscReaderMode::Raw => { + // Read new block if necessary + let sector = (self.pos / SECTOR_SIZE as u64) as u32; + if self.block_decrypted || !self.block.contains(sector) { + self.block = self.io.read_block(self.block_buf.as_mut(), sector)?; + self.block_decrypted = false; + } + self.block.data(self.block_buf.as_ref(), self.pos) + } + DirectDiscReaderMode::Partition { disc_header, data_start_sector, key } => { + let has_encryption = disc_header.has_partition_encryption(); + let has_hashes = disc_header.has_partition_hashes(); + let part_sector = if has_hashes { + (self.pos / SECTOR_DATA_SIZE as u64) as u32 + } else { + (self.pos / SECTOR_SIZE as u64) as u32 + }; + + // Read new block if necessary + let abs_sector = data_start_sector + part_sector; + if !self.block.contains(abs_sector) { + self.block = self.io.read_block(self.block_buf.as_mut(), abs_sector)?; + self.block_decrypted = false; + } + + // Allow reusing the same block from raw mode, just decrypt it if necessary + if !self.block_decrypted { + self.block + .decrypt_block(self.block_buf.as_mut(), has_encryption.then_some(*key))?; + self.block_decrypted = true; + } + + self.block.partition_data( + self.block_buf.as_ref(), + self.pos, + *data_start_sector, + has_hashes, + ) + } + } + } + + #[inline] + fn consume(&mut self, amt: usize) { self.pos += amt as u64; } +} + +impl_read_for_bufread!(DirectDiscReader); + +impl Seek for DirectDiscReader { + fn seek(&mut self, pos: SeekFrom) -> io::Result { + self.pos = match pos { + SeekFrom::Start(v) => v, + SeekFrom::End(_) => { + return Err(io::Error::new( + io::ErrorKind::Unsupported, + "DirectDiscReader: SeekFrom::End is not supported", + )); + } + SeekFrom::Current(v) => self.pos.saturating_add_signed(v), + }; + Ok(self.pos) + } + + fn stream_position(&mut self) -> io::Result { Ok(self.pos) } +} diff --git a/nod/src/disc/fst.rs b/nod/src/disc/fst.rs index 25a4515..ea228d1 100644 --- a/nod/src/disc/fst.rs +++ b/nod/src/disc/fst.rs @@ -1,11 +1,15 @@ -//! Disc file system types +//! File system table (FST) types. use std::{borrow::Cow, ffi::CStr, mem::size_of}; use encoding_rs::SHIFT_JIS; -use zerocopy::{big_endian::*, FromBytes, Immutable, IntoBytes, KnownLayout}; +use itertools::Itertools; +use zerocopy::{big_endian::*, FromBytes, FromZeros, Immutable, IntoBytes, KnownLayout}; -use crate::{static_assert, Result}; +use crate::{ + util::{array_ref, static_assert}, + Error, Result, +}; /// File system node kind. #[derive(Clone, Debug, PartialEq)] @@ -25,13 +29,31 @@ pub struct Node { kind: u8, // u24 big-endian name_offset: [u8; 3], - pub(crate) offset: U32, + offset: U32, length: U32, } static_assert!(size_of::() == 12); impl Node { + /// Create a new node. + #[inline] + pub fn new(kind: NodeKind, name_offset: u32, offset: u64, length: u32, is_wii: bool) -> Self { + Self { + kind: match kind { + NodeKind::File => 0, + NodeKind::Directory => 1, + NodeKind::Invalid => u8::MAX, + }, + name_offset: *array_ref![name_offset.to_be_bytes(), 1, 3], + offset: U32::new(match kind { + NodeKind::File if is_wii => (offset / 4) as u32, + _ => offset as u32, + }), + length: U32::new(length), + } + } + /// File system node kind. #[inline] pub fn kind(&self) -> NodeKind { @@ -42,6 +64,16 @@ impl Node { } } + /// Set the node kind. + #[inline] + pub fn set_kind(&mut self, kind: NodeKind) { + self.kind = match kind { + NodeKind::File => 0, + NodeKind::Directory => 1, + NodeKind::Invalid => u8::MAX, + }; + } + /// Whether the node is a file. #[inline] pub fn is_file(&self) -> bool { self.kind == 0 } @@ -56,6 +88,12 @@ impl Node { u32::from_be_bytes([0, self.name_offset[0], self.name_offset[1], self.name_offset[2]]) } + /// Set the name offset of the node. + #[inline] + pub fn set_name_offset(&mut self, name_offset: u32) { + self.name_offset = *array_ref![name_offset.to_be_bytes(), 1, 3]; + } + /// For files, this is the partition offset of the file data. (Wii: >> 2) /// /// For directories, this is the parent node index in the FST. @@ -68,16 +106,27 @@ impl Node { } } + /// Set the offset of the node. See [`Node::offset`] for details. + #[inline] + pub fn set_offset(&mut self, offset: u64, is_wii: bool) { + self.offset.set(if is_wii && self.is_file() { (offset / 4) as u32 } else { offset as u32 }); + } + /// For files, this is the byte size of the file. /// /// For directories, this is the child end index in the FST. /// /// Number of child files and directories recursively is `length - offset`. #[inline] - pub fn length(&self) -> u64 { self.length.get() as u64 } + pub fn length(&self) -> u32 { self.length.get() } + + /// Set the length of the node. See [`Node::length`] for details. + #[inline] + pub fn set_length(&mut self, length: u32) { self.length.set(length); } } /// A view into the file system table (FST). +#[derive(Clone)] pub struct Fst<'a> { /// The nodes in the FST. pub nodes: &'a [Node], @@ -87,14 +136,13 @@ pub struct Fst<'a> { impl<'a> Fst<'a> { /// Create a new FST view from a buffer. - #[allow(clippy::missing_inline_in_public_items)] pub fn new(buf: &'a [u8]) -> Result { let Ok((root_node, _)) = Node::ref_from_prefix(buf) else { return Err("FST root node not found"); }; // String table starts after the last node - let string_base = root_node.length() * size_of::() as u64; - if string_base >= buf.len() as u64 { + let string_base = root_node.length() * size_of::() as u32; + if string_base > buf.len() as u32 { return Err("FST string table out of bounds"); } let (node_buf, string_table) = buf.split_at(string_base as usize); @@ -104,10 +152,9 @@ impl<'a> Fst<'a> { /// Iterate over the nodes in the FST. #[inline] - pub fn iter(&self) -> FstIter { FstIter { fst: self, idx: 1 } } + pub fn iter(&self) -> FstIter { FstIter { fst: self.clone(), idx: 1, segments: vec![] } } /// Get the name of a node. - #[allow(clippy::missing_inline_in_public_items)] pub fn get_name(&self, node: Node) -> Result, String> { let name_buf = self.string_table.get(node.name_offset() as usize..).ok_or_else(|| { format!( @@ -126,7 +173,6 @@ impl<'a> Fst<'a> { } /// Finds a particular file or directory by path. - #[allow(clippy::missing_inline_in_public_items)] pub fn find(&self, path: &str) -> Option<(usize, Node)> { let mut split = path.trim_matches('/').split('/'); let mut current = next_non_empty(&mut split); @@ -160,23 +206,46 @@ impl<'a> Fst<'a> { } None } + + /// Count the number of files in the FST. + pub fn num_files(&self) -> usize { self.nodes.iter().filter(|n| n.is_file()).count() } } /// Iterator over the nodes in an FST. +/// +/// For each node, the iterator yields the node index, the node itself, +/// and the full path to the node (separated by `/`). pub struct FstIter<'a> { - fst: &'a Fst<'a>, + fst: Fst<'a>, idx: usize, + segments: Vec<(Cow<'a, str>, usize)>, } impl<'a> Iterator for FstIter<'a> { - type Item = (usize, Node, Result, String>); + type Item = (usize, Node, String); fn next(&mut self) -> Option { let idx = self.idx; let node = self.fst.nodes.get(idx).copied()?; - let name = self.fst.get_name(node); + let name = self.fst.get_name(node).unwrap_or("".into()); self.idx += 1; - Some((idx, node, name)) + + // Remove ended path segments + let mut new_size = 0; + for (_, end) in self.segments.iter() { + if *end == idx { + break; + } + new_size += 1; + } + self.segments.truncate(new_size); + + // Add the new path segment + let length = node.length() as u64; + let end = if node.is_dir() { length as usize } else { idx + 1 }; + self.segments.push((name, end)); + let path = self.segments.iter().map(|(name, _)| name.as_ref()).join("/"); + Some((idx, node, path)) } } @@ -190,3 +259,117 @@ fn next_non_empty<'a>(iter: &mut impl Iterator) -> &'a str { } } } + +/// A builder for creating a file system table (FST). +pub struct FstBuilder { + nodes: Vec, + string_table: Vec, + stack: Vec<(String, u32)>, + is_wii: bool, +} + +impl FstBuilder { + /// Create a new FST builder. + pub fn new(is_wii: bool) -> Self { + let mut builder = Self { nodes: vec![], string_table: vec![], stack: vec![], is_wii }; + builder.add_node(NodeKind::Directory, "", 0, 0); + builder + } + + /// Create a new FST builder with an existing string table. This allows matching the string + /// ordering of an existing FST. + pub fn new_with_string_table(is_wii: bool, string_table: Vec) -> Result { + if matches!(string_table.last(), Some(n) if *n != 0) { + return Err(Error::DiscFormat("String table must be null-terminated".to_string())); + } + let root_name = CStr::from_bytes_until_nul(&string_table) + .map_err(|_| { + Error::DiscFormat("String table root name not null-terminated".to_string()) + })? + .to_str() + .unwrap_or("") + .to_string(); + let mut builder = Self { nodes: vec![], string_table, stack: vec![], is_wii }; + builder.add_node(NodeKind::Directory, &root_name, 0, 0); + Ok(builder) + } + + /// Add a file to the FST. All paths within a directory must be added sequentially, + /// otherwise the output FST will be invalid. + pub fn add_file(&mut self, path: &str, offset: u64, size: u32) { + let components = path.split('/').collect::>(); + for i in 0..components.len() - 1 { + if matches!(self.stack.get(i), Some((name, _)) if name != components[i]) { + // Pop directories + while self.stack.len() > i { + let (_, idx) = self.stack.pop().unwrap(); + let length = self.nodes.len() as u32; + self.nodes[idx as usize].set_length(length); + } + } + while i >= self.stack.len() { + // Push a new directory node + let component_idx = self.stack.len(); + let parent = if component_idx == 0 { 0 } else { self.stack[component_idx - 1].1 }; + let node_idx = + self.add_node(NodeKind::Directory, components[component_idx], parent as u64, 0); + self.stack.push((components[i].to_string(), node_idx)); + } + } + if components.len() == 1 { + // Pop all directories + while let Some((_, idx)) = self.stack.pop() { + let length = self.nodes.len() as u32; + self.nodes[idx as usize].set_length(length); + } + } + // Add file node + self.add_node(NodeKind::File, components.last().unwrap(), offset, size); + } + + /// Get the byte size of the FST. + pub fn byte_size(&self) -> usize { + size_of_val(self.nodes.as_slice()) + self.string_table.len() + } + + /// Finalize the FST and return the serialized data. + pub fn finalize(mut self) -> Box<[u8]> { + // Finalize directory lengths + let node_count = self.nodes.len() as u32; + while let Some((_, idx)) = self.stack.pop() { + self.nodes[idx as usize].set_length(node_count); + } + self.nodes[0].set_length(node_count); + + // Serialize nodes and string table + let nodes_data = self.nodes.as_bytes(); + let string_table_data = self.string_table.as_bytes(); + let mut data = + <[u8]>::new_box_zeroed_with_elems(nodes_data.len() + string_table_data.len()).unwrap(); + data[..nodes_data.len()].copy_from_slice(self.nodes.as_bytes()); + data[nodes_data.len()..].copy_from_slice(self.string_table.as_bytes()); + data + } + + fn add_node(&mut self, kind: NodeKind, name: &str, offset: u64, length: u32) -> u32 { + let (bytes, _, _) = SHIFT_JIS.encode(name); + // Check if the name already exists in the string table + let mut name_offset = 0; + while name_offset < self.string_table.len() { + let string_buf = &self.string_table[name_offset..]; + let existing = CStr::from_bytes_until_nul(string_buf).unwrap(); + if existing.to_bytes() == bytes.as_ref() { + break; + } + name_offset += existing.to_bytes_with_nul().len(); + } + // Otherwise, add the name to the string table + if name_offset == self.string_table.len() { + self.string_table.extend_from_slice(bytes.as_ref()); + self.string_table.push(0); + } + let idx = self.nodes.len() as u32; + self.nodes.push(Node::new(kind, name_offset as u32, offset, length, self.is_wii)); + idx + } +} diff --git a/nod/src/disc/gcn.rs b/nod/src/disc/gcn.rs index dcc58fc..abf70d4 100644 --- a/nod/src/disc/gcn.rs +++ b/nod/src/disc/gcn.rs @@ -2,190 +2,144 @@ use std::{ io, io::{BufRead, Read, Seek, SeekFrom}, mem::size_of, + sync::Arc, }; -use zerocopy::{FromBytes, FromZeros}; +use zerocopy::FromBytes; -use super::{ - ApploaderHeader, DiscHeader, DolHeader, FileStream, Node, PartitionBase, PartitionHeader, - PartitionMeta, BI2_SIZE, BOOT_SIZE, SECTOR_SIZE, -}; use crate::{ - disc::streams::OwnedFileStream, - io::block::{Block, BlockIO}, - util::read::{read_box, read_box_slice, read_vec}, + disc::{ + preloader::{Preloader, SectorGroup, SectorGroupRequest}, + ApploaderHeader, DiscHeader, DolHeader, PartitionHeader, BI2_SIZE, BOOT_SIZE, + SECTOR_GROUP_SIZE, SECTOR_SIZE, + }, + io::block::BlockReader, + read::{PartitionEncryption, PartitionMeta, PartitionReader}, + util::{ + impl_read_for_bufread, + read::{read_arc, read_arc_slice, read_vec}, + }, Result, ResultContext, }; -pub struct PartitionGC { - io: Box, - block: Block, - block_buf: Box<[u8]>, - block_idx: u32, - sector_buf: Box<[u8; SECTOR_SIZE]>, - sector: u32, +pub struct PartitionReaderGC { + io: Box, + preloader: Arc, pos: u64, - disc_header: Box, + disc_size: u64, + sector_group: Option, + meta: Option, } -impl Clone for PartitionGC { +impl Clone for PartitionReaderGC { fn clone(&self) -> Self { Self { io: self.io.clone(), - block: Block::default(), - block_buf: <[u8]>::new_box_zeroed_with_elems(self.block_buf.len()).unwrap(), - block_idx: u32::MAX, - sector_buf: <[u8; SECTOR_SIZE]>::new_box_zeroed().unwrap(), - sector: u32::MAX, + preloader: self.preloader.clone(), pos: 0, - disc_header: self.disc_header.clone(), + disc_size: self.disc_size, + sector_group: None, + meta: self.meta.clone(), } } } -impl PartitionGC { - pub fn new(inner: Box, disc_header: Box) -> Result> { - let block_size = inner.block_size(); +impl PartitionReaderGC { + pub fn new( + inner: Box, + preloader: Arc, + disc_size: u64, + ) -> Result> { Ok(Box::new(Self { io: inner, - block: Block::default(), - block_buf: <[u8]>::new_box_zeroed_with_elems(block_size as usize).unwrap(), - block_idx: u32::MAX, - sector_buf: <[u8; SECTOR_SIZE]>::new_box_zeroed().unwrap(), - sector: u32::MAX, + preloader, pos: 0, - disc_header, + disc_size, + sector_group: None, + meta: None, })) } - - pub fn into_inner(self) -> Box { self.io } } -impl BufRead for PartitionGC { +impl BufRead for PartitionReaderGC { fn fill_buf(&mut self) -> io::Result<&[u8]> { - let sector = (self.pos / SECTOR_SIZE as u64) as u32; - let block_idx = (sector as u64 * SECTOR_SIZE as u64 / self.block_buf.len() as u64) as u32; - - // Read new block if necessary - if block_idx != self.block_idx { - self.block = self.io.read_block(self.block_buf.as_mut(), block_idx, None)?; - self.block_idx = block_idx; + if self.pos >= self.disc_size { + return Ok(&[]); } - // Copy sector if necessary - if sector != self.sector { - self.block.copy_raw( - self.sector_buf.as_mut(), - self.block_buf.as_ref(), - sector, - &self.disc_header, - )?; - self.sector = sector; - } + let abs_sector = (self.pos / SECTOR_SIZE as u64) as u32; + let group_idx = abs_sector / 64; + let abs_group_sector = group_idx * 64; + let max_groups = self.disc_size.div_ceil(SECTOR_GROUP_SIZE as u64) as u32; + let request = SectorGroupRequest { + group_idx, + partition_idx: None, + mode: PartitionEncryption::Original, + }; - let offset = (self.pos % SECTOR_SIZE as u64) as usize; - Ok(&self.sector_buf[offset..]) + let sector_group = if matches!(&self.sector_group, Some(sector_group) if sector_group.request == request) + { + // We can improve this in Rust 2024 with `if_let_rescope` + // https://github.com/rust-lang/rust/issues/124085 + self.sector_group.as_ref().unwrap() + } else { + self.sector_group.insert(self.preloader.fetch(request, max_groups)?) + }; + + // Calculate the number of consecutive sectors in the group + let group_sector = abs_sector - abs_group_sector; + let consecutive_sectors = sector_group.consecutive_sectors(group_sector); + if consecutive_sectors == 0 { + return Ok(&[]); + } + let num_sectors = group_sector + consecutive_sectors; + + // Read from sector group buffer + let group_start = abs_group_sector as u64 * SECTOR_SIZE as u64; + let offset = (self.pos - group_start) as usize; + let end = + (num_sectors as u64 * SECTOR_SIZE as u64).min(self.disc_size - group_start) as usize; + Ok(§or_group.data[offset..end]) } #[inline] fn consume(&mut self, amt: usize) { self.pos += amt as u64; } } -impl Read for PartitionGC { - #[inline] - fn read(&mut self, out: &mut [u8]) -> io::Result { - let buf = self.fill_buf()?; - let len = buf.len().min(out.len()); - out[..len].copy_from_slice(&buf[..len]); - self.consume(len); - Ok(len) - } -} +impl_read_for_bufread!(PartitionReaderGC); -impl Seek for PartitionGC { +impl Seek for PartitionReaderGC { fn seek(&mut self, pos: SeekFrom) -> io::Result { self.pos = match pos { SeekFrom::Start(v) => v, - SeekFrom::End(_) => { - return Err(io::Error::new( - io::ErrorKind::Unsupported, - "GCPartitionReader: SeekFrom::End is not supported".to_string(), - )); - } + SeekFrom::End(v) => self.disc_size.saturating_add_signed(v), SeekFrom::Current(v) => self.pos.saturating_add_signed(v), }; Ok(self.pos) } + + fn stream_position(&mut self) -> io::Result { Ok(self.pos) } } -impl PartitionBase for PartitionGC { - fn meta(&mut self) -> Result> { - self.seek(SeekFrom::Start(0)).context("Seeking to partition metadata")?; - read_part_meta(self, false) - } +impl PartitionReader for PartitionReaderGC { + fn is_wii(&self) -> bool { false } - fn open_file(&mut self, node: Node) -> io::Result { - if !node.is_file() { - return Err(io::Error::new( - io::ErrorKind::InvalidInput, - "Node is not a file".to_string(), - )); + fn meta(&mut self) -> Result { + if let Some(meta) = &self.meta { + Ok(meta.clone()) + } else { + let meta = read_part_meta(self, false)?; + self.meta = Some(meta.clone()); + Ok(meta) } - FileStream::new(self, node.offset(false), node.length()) - } - - fn into_open_file(self: Box, node: Node) -> io::Result { - if !node.is_file() { - return Err(io::Error::new( - io::ErrorKind::InvalidInput, - "Node is not a file".to_string(), - )); - } - OwnedFileStream::new(self, node.offset(false), node.length()) } } -pub(crate) fn read_part_meta( - reader: &mut dyn PartitionBase, +pub(crate) fn read_dol( + reader: &mut dyn PartitionReader, + partition_header: &PartitionHeader, is_wii: bool, -) -> Result> { - // boot.bin - let raw_boot: Box<[u8; BOOT_SIZE]> = read_box(reader).context("Reading boot.bin")?; - let partition_header = - PartitionHeader::ref_from_bytes(&raw_boot[size_of::()..]).unwrap(); - - // bi2.bin - let raw_bi2: Box<[u8; BI2_SIZE]> = read_box(reader).context("Reading bi2.bin")?; - - // apploader.bin - let mut raw_apploader: Vec = - read_vec(reader, size_of::()).context("Reading apploader header")?; - let apploader_header = ApploaderHeader::ref_from_bytes(raw_apploader.as_slice()).unwrap(); - raw_apploader.resize( - size_of::() - + apploader_header.size.get() as usize - + apploader_header.trailer_size.get() as usize, - 0, - ); - reader - .read_exact(&mut raw_apploader[size_of::()..]) - .context("Reading apploader")?; - let raw_apploader = raw_apploader.into_boxed_slice(); - - // fst.bin - reader - .seek(SeekFrom::Start(partition_header.fst_offset(is_wii))) - .context("Seeking to FST offset")?; - let raw_fst: Box<[u8]> = read_box_slice(reader, partition_header.fst_size(is_wii) as usize) - .with_context(|| { - format!( - "Reading partition FST (offset {}, size {})", - partition_header.fst_offset(is_wii), - partition_header.fst_size(is_wii) - ) - })?; - - // main.dol +) -> Result> { reader .seek(SeekFrom::Start(partition_header.dol_offset(is_wii))) .context("Seeking to DOL offset")?; @@ -208,9 +162,65 @@ pub(crate) fn read_part_meta( .unwrap_or(size_of::() as u32); raw_dol.resize(dol_size as usize, 0); reader.read_exact(&mut raw_dol[size_of::()..]).context("Reading DOL")?; - let raw_dol = raw_dol.into_boxed_slice(); + Ok(Arc::from(raw_dol.as_slice())) +} - Ok(Box::new(PartitionMeta { +pub(crate) fn read_fst( + reader: &mut R, + partition_header: &PartitionHeader, + is_wii: bool, +) -> Result> +where + R: Read + Seek + ?Sized, +{ + reader + .seek(SeekFrom::Start(partition_header.fst_offset(is_wii))) + .context("Seeking to FST offset")?; + let raw_fst: Arc<[u8]> = read_arc_slice(reader, partition_header.fst_size(is_wii) as usize) + .with_context(|| { + format!( + "Reading partition FST (offset {}, size {})", + partition_header.fst_offset(is_wii), + partition_header.fst_size(is_wii) + ) + })?; + Ok(raw_fst) +} + +pub(crate) fn read_part_meta( + reader: &mut dyn PartitionReader, + is_wii: bool, +) -> Result { + // boot.bin + let raw_boot: Arc<[u8; BOOT_SIZE]> = read_arc(reader).context("Reading boot.bin")?; + let partition_header = + PartitionHeader::ref_from_bytes(&raw_boot[size_of::()..]).unwrap(); + + // bi2.bin + let raw_bi2: Arc<[u8; BI2_SIZE]> = read_arc(reader).context("Reading bi2.bin")?; + + // apploader.bin + let mut raw_apploader: Vec = + read_vec(reader, size_of::()).context("Reading apploader header")?; + let apploader_header = ApploaderHeader::ref_from_bytes(raw_apploader.as_slice()).unwrap(); + raw_apploader.resize( + size_of::() + + apploader_header.size.get() as usize + + apploader_header.trailer_size.get() as usize, + 0, + ); + reader + .read_exact(&mut raw_apploader[size_of::()..]) + .context("Reading apploader")?; + let raw_apploader = Arc::from(raw_apploader.as_slice()); + + // fst.bin + let raw_fst = read_fst(reader, partition_header, is_wii)?; + + // main.dol + let raw_dol = read_dol(reader, partition_header, is_wii)?; + + Ok(PartitionMeta { raw_boot, raw_bi2, raw_apploader, @@ -220,5 +230,5 @@ pub(crate) fn read_part_meta( raw_tmd: None, raw_cert_chain: None, raw_h3_table: None, - })) + }) } diff --git a/nod/src/disc/hashes.rs b/nod/src/disc/hashes.rs index 5605623..5998b18 100644 --- a/nod/src/disc/hashes.rs +++ b/nod/src/disc/hashes.rs @@ -1,202 +1,92 @@ -use std::{ - io::{Read, Seek, SeekFrom}, - sync::{Arc, Mutex}, - time::Instant, -}; - -use rayon::iter::{IntoParallelIterator, ParallelIterator}; -use sha1::{Digest, Sha1}; -use zerocopy::FromZeros; +use tracing::instrument; +use zerocopy::{FromZeros, IntoBytes}; use crate::{ - array_ref, array_ref_mut, + common::HashBytes, disc::{ - reader::DiscReader, wii::{HASHES_SIZE, SECTOR_DATA_SIZE}, + SECTOR_GROUP_SIZE, SECTOR_SIZE, }, - io::HashBytes, - util::read::read_box_slice, - PartitionOptions, Result, ResultContext, SECTOR_SIZE, + util::{array_ref, array_ref_mut}, }; -/// In a sector, following the 0x400 byte block of hashes, each 0x400 bytes of decrypted data is -/// hashed, yielding 31 H0 hashes. -/// Then, 8 sectors are aggregated into a subgroup, and the 31 H0 hashes for each sector are hashed, -/// yielding 8 H1 hashes. -/// Then, 8 subgroups are aggregated into a group, and the 8 H1 hashes for each subgroup are hashed, -/// yielding 8 H2 hashes. -/// Finally, the 8 H2 hashes for each group are hashed, yielding 1 H3 hash. -/// The H3 hashes for each group are stored in the partition's H3 table. -#[derive(Clone, Debug)] -pub struct HashTable { - /// SHA-1 hash of each 0x400 byte block of decrypted data. - pub h0_hashes: Box<[HashBytes]>, - /// SHA-1 hash of the 31 H0 hashes for each sector. - pub h1_hashes: Box<[HashBytes]>, - /// SHA-1 hash of the 8 H1 hashes for each subgroup. - pub h2_hashes: Box<[HashBytes]>, - /// SHA-1 hash of the 8 H2 hashes for each group. - pub h3_hashes: Box<[HashBytes]>, -} - +/// Hashes for a single sector group (64 sectors). #[derive(Clone, FromZeros)] -struct HashResult { - h0_hashes: [HashBytes; 1984], - h1_hashes: [HashBytes; 64], - h2_hashes: [HashBytes; 8], - h3_hash: HashBytes, +pub struct GroupHashes { + pub h3_hash: HashBytes, + pub h2_hashes: [HashBytes; 8], + pub h1_hashes: [HashBytes; 64], + pub h0_hashes: [HashBytes; 1984], } -impl HashTable { - fn new(num_sectors: u32) -> Self { - let num_sectors = num_sectors.next_multiple_of(64) as usize; - let num_data_hashes = num_sectors * 31; - let num_subgroups = num_sectors / 8; - let num_groups = num_subgroups / 8; - Self { - h0_hashes: <[HashBytes]>::new_box_zeroed_with_elems(num_data_hashes).unwrap(), - h1_hashes: <[HashBytes]>::new_box_zeroed_with_elems(num_sectors).unwrap(), - h2_hashes: <[HashBytes]>::new_box_zeroed_with_elems(num_subgroups).unwrap(), - h3_hashes: <[HashBytes]>::new_box_zeroed_with_elems(num_groups).unwrap(), - } +impl GroupHashes { + #[inline] + pub fn hashes_for_sector( + &self, + sector: usize, + ) -> (&[HashBytes; 31], &[HashBytes; 8], &[HashBytes; 8]) { + let h1_hashes = array_ref![self.h1_hashes, sector & !7, 8]; + let h0_hashes = array_ref![self.h0_hashes, sector * 31, 31]; + (h0_hashes, h1_hashes, &self.h2_hashes) } - fn extend(&mut self, group_index: usize, result: &HashResult) { - *array_ref_mut![self.h0_hashes, group_index * 1984, 1984] = result.h0_hashes; - *array_ref_mut![self.h1_hashes, group_index * 64, 64] = result.h1_hashes; - *array_ref_mut![self.h2_hashes, group_index * 8, 8] = result.h2_hashes; - self.h3_hashes[group_index] = result.h3_hash; + #[inline] + pub fn apply(&self, sector_data: &mut [u8; SECTOR_SIZE], sector: usize) { + let (h0_hashes, h1_hashes, h2_hashes) = self.hashes_for_sector(sector); + array_ref_mut![sector_data, 0, 0x26C].copy_from_slice(h0_hashes.as_bytes()); + array_ref_mut![sector_data, 0x280, 0xA0].copy_from_slice(h1_hashes.as_bytes()); + array_ref_mut![sector_data, 0x340, 0xA0].copy_from_slice(h2_hashes.as_bytes()); } } -pub fn rebuild_hashes(reader: &mut DiscReader) -> Result<()> { - const NUM_H0_HASHES: usize = SECTOR_DATA_SIZE / HASHES_SIZE; +pub const NUM_H0_HASHES: usize = SECTOR_DATA_SIZE / HASHES_SIZE; - log::info!( - "Rebuilding hashes for Wii partition data (using {} threads)", - rayon::current_num_threads() - ); - - let start = Instant::now(); - - // Precompute hashes for zeroed sectors. - const ZERO_H0_BYTES: &[u8] = &[0u8; HASHES_SIZE]; - let zero_h0_hash = sha1_hash(ZERO_H0_BYTES); - - let partitions = reader.partitions(); - let mut hash_tables = Vec::with_capacity(partitions.len()); - for part in partitions { - let part_sectors = part.data_end_sector - part.data_start_sector; - let hash_table = HashTable::new(part_sectors); - log::debug!( - "Rebuilding hashes: {} sectors, {} subgroups, {} groups", - hash_table.h1_hashes.len(), - hash_table.h2_hashes.len(), - hash_table.h3_hashes.len() - ); - - let group_count = hash_table.h3_hashes.len(); - let mutex = Arc::new(Mutex::new(hash_table)); - let partition_options = PartitionOptions { validate_hashes: false }; - (0..group_count).into_par_iter().try_for_each_with( - (reader.open_partition(part.index, &partition_options)?, mutex.clone()), - |(stream, mutex), h3_index| -> Result<()> { - let mut result = HashResult::new_box_zeroed()?; - let mut data_buf = <[u8]>::new_box_zeroed_with_elems(SECTOR_DATA_SIZE)?; - let mut h3_hasher = Sha1::new(); - for h2_index in 0..8 { - let mut h2_hasher = Sha1::new(); - for h1_index in 0..8 { - let sector = h1_index + h2_index * 8; - let part_sector = sector as u32 + h3_index as u32 * 64; - let mut h1_hasher = Sha1::new(); - if part_sector >= part_sectors { - for h0_index in 0..NUM_H0_HASHES { - result.h0_hashes[h0_index + sector * 31] = zero_h0_hash; - h1_hasher.update(zero_h0_hash); - } - } else { - stream - .seek(SeekFrom::Start(part_sector as u64 * SECTOR_DATA_SIZE as u64)) - .with_context(|| format!("Seeking to sector {}", part_sector))?; - stream - .read_exact(&mut data_buf) - .with_context(|| format!("Reading sector {}", part_sector))?; - for h0_index in 0..NUM_H0_HASHES { - let h0_hash = sha1_hash(array_ref![ - data_buf, - h0_index * HASHES_SIZE, - HASHES_SIZE - ]); - result.h0_hashes[h0_index + sector * 31] = h0_hash; - h1_hasher.update(h0_hash); - } - }; - let h1_hash = h1_hasher.finalize().into(); - result.h1_hashes[sector] = h1_hash; - h2_hasher.update(h1_hash); - } - let h2_hash = h2_hasher.finalize().into(); - result.h2_hashes[h2_index] = h2_hash; - h3_hasher.update(h2_hash); +#[instrument(skip_all)] +pub fn hash_sector_group(sector_group: &[u8; SECTOR_GROUP_SIZE]) -> Box { + let mut result = GroupHashes::new_box_zeroed().unwrap(); + for (h2_index, h2_hash) in result.h2_hashes.iter_mut().enumerate() { + let out_h1_hashes = array_ref_mut![result.h1_hashes, h2_index * 8, 8]; + for (h1_index, h1_hash) in out_h1_hashes.iter_mut().enumerate() { + let sector = h1_index + h2_index * 8; + let out_h0_hashes = + array_ref_mut![result.h0_hashes, sector * NUM_H0_HASHES, NUM_H0_HASHES]; + if array_ref![sector_group, sector * SECTOR_SIZE, 20].iter().any(|&v| v != 0) { + // Hash block already present, use it + out_h0_hashes.as_mut_bytes().copy_from_slice(array_ref![ + sector_group, + sector * SECTOR_SIZE, + 0x26C + ]); + } else { + for (h0_index, h0_hash) in out_h0_hashes.iter_mut().enumerate() { + *h0_hash = sha1_hash(array_ref![ + sector_group, + sector * SECTOR_SIZE + HASHES_SIZE + h0_index * HASHES_SIZE, + HASHES_SIZE + ]); } - result.h3_hash = h3_hasher.finalize().into(); - let mut hash_table = mutex.lock().map_err(|_| "Failed to lock mutex")?; - hash_table.extend(h3_index, &result); - Ok(()) - }, - )?; - - let hash_table = Arc::try_unwrap(mutex) - .map_err(|_| "Failed to unwrap Arc")? - .into_inner() - .map_err(|_| "Failed to lock mutex")?; - hash_tables.push(hash_table); - } - - // Verify against H3 table - for (part, hash_table) in reader.partitions.clone().iter().zip(hash_tables.iter()) { - log::debug!( - "Verifying H3 table for partition {} (count {})", - part.index, - hash_table.h3_hashes.len() - ); - reader - .seek(SeekFrom::Start( - part.start_sector as u64 * SECTOR_SIZE as u64 + part.header.h3_table_off(), - )) - .context("Seeking to H3 table")?; - let h3_table: Box<[HashBytes]> = - read_box_slice(reader, hash_table.h3_hashes.len()).context("Reading H3 table")?; - let mut mismatches = 0; - for (idx, (expected_hash, h3_hash)) in - h3_table.iter().zip(hash_table.h3_hashes.iter()).enumerate() - { - if expected_hash != h3_hash { - let mut got_bytes = [0u8; 40]; - let got = base16ct::lower::encode_str(h3_hash, &mut got_bytes).unwrap(); - let mut expected_bytes = [0u8; 40]; - let expected = - base16ct::lower::encode_str(expected_hash, &mut expected_bytes).unwrap(); - log::debug!( - "Partition {} H3 table does not match:\n\tindex {}\n\texpected: {}\n\tgot: {}", - part.index, idx, expected, got - ); - mismatches += 1; } + *h1_hash = sha1_hash(out_h0_hashes.as_bytes()); } - if mismatches > 0 { - log::warn!("Partition {} H3 table has {} hash mismatches", part.index, mismatches); - } + *h2_hash = sha1_hash(out_h1_hashes.as_bytes()); } - - for (part, hash_table) in reader.partitions.iter_mut().zip(hash_tables) { - part.hash_table = Some(hash_table); - } - log::info!("Rebuilt hashes in {:?}", start.elapsed()); - Ok(()) + result.h3_hash = sha1_hash(result.h2_hashes.as_bytes()); + result } /// Hashes a byte slice with SHA-1. -#[inline] -pub fn sha1_hash(buf: &[u8]) -> HashBytes { HashBytes::from(Sha1::digest(buf)) } +#[instrument(skip_all)] +pub fn sha1_hash(buf: &[u8]) -> HashBytes { + #[cfg(feature = "openssl")] + { + // The one-shot openssl::sha::sha1 ends up being much slower + let mut hasher = openssl::sha::Sha1::new(); + hasher.update(buf); + hasher.finish() + } + #[cfg(not(feature = "openssl"))] + { + use sha1::Digest; + HashBytes::from(sha1::Sha1::digest(buf)) + } +} diff --git a/nod/src/disc/mod.rs b/nod/src/disc/mod.rs index 44ea033..60c2bcf 100644 --- a/nod/src/disc/mod.rs +++ b/nod/src/disc/mod.rs @@ -1,40 +1,54 @@ -//! Disc type related logic (GameCube, Wii) +//! GameCube/Wii disc format types. -use std::{ - borrow::Cow, - ffi::CStr, - fmt::{Debug, Display, Formatter}, - io, - io::{BufRead, Seek}, - mem::size_of, - str::from_utf8, -}; +use std::{ffi::CStr, str::from_utf8}; -use dyn_clone::DynClone; use zerocopy::{big_endian::*, FromBytes, Immutable, IntoBytes, KnownLayout}; -use crate::{io::MagicBytes, static_assert, Result}; +use crate::{common::MagicBytes, util::static_assert}; -pub(crate) mod fst; +pub(crate) mod direct; +pub mod fst; pub(crate) mod gcn; pub(crate) mod hashes; +pub(crate) mod preloader; pub(crate) mod reader; -pub(crate) mod streams; -pub(crate) mod wii; - -pub use fst::{Fst, Node, NodeKind}; -pub use streams::{FileStream, OwnedFileStream, WindowedStream}; -pub use wii::{ContentMetadata, SignedHeader, Ticket, TicketLimit, TmdHeader, REGION_SIZE}; +pub mod wii; +pub(crate) mod writer; /// Size in bytes of a disc sector. (32 KiB) pub const SECTOR_SIZE: usize = 0x8000; +/// Size in bytes of a Wii partition sector group. (32 KiB * 64, 2 MiB) +pub const SECTOR_GROUP_SIZE: usize = SECTOR_SIZE * 64; + /// Magic bytes for Wii discs. Located at offset 0x18. pub const WII_MAGIC: MagicBytes = [0x5D, 0x1C, 0x9E, 0xA3]; /// Magic bytes for GameCube discs. Located at offset 0x1C. pub const GCN_MAGIC: MagicBytes = [0xC2, 0x33, 0x9F, 0x3D]; +/// Size in bytes of the disc header and partition header. (boot.bin) +pub const BOOT_SIZE: usize = size_of::() + size_of::(); + +/// Size in bytes of the debug and region information. (bi2.bin) +pub const BI2_SIZE: usize = 0x2000; + +/// The size of a single-layer MiniDVD. (1.4 GB) +/// +/// GameCube games and some third-party Wii discs (Datel) use this format. +pub const MINI_DVD_SIZE: u64 = 1_459_978_240; + +/// The size of a single-layer DVD. (4.7 GB) +/// +/// The vast majority of Wii games use this format. +pub const SL_DVD_SIZE: u64 = 4_699_979_776; + +/// The size of a dual-layer DVD. (8.5 GB) +/// +/// A few larger Wii games use this format. +/// (Super Smash Bros. Brawl, Metroid Prime Trilogy, etc.) +pub const DL_DVD_SIZE: u64 = 8_511_160_320; + /// Shared GameCube & Wii disc header. /// /// This header is always at the start of the disc image and within each Wii partition. @@ -53,7 +67,7 @@ pub struct DiscHeader { pub audio_stream_buf_size: u8, /// Padding _pad1: [u8; 14], - /// If this is a Wii disc, this will be 0x5D1C9EA3 + /// If this is a Wii disc, this will bPartitionKinde 0x5D1C9EA3 pub wii_magic: MagicBytes, /// If this is a GameCube disc, this will be 0xC2339F3D pub gcn_magic: MagicBytes, @@ -112,7 +126,7 @@ pub struct PartitionHeader { pub debug_mon_offset: U32, /// Debug monitor load address pub debug_load_address: U32, - /// Padding + /// PaddingPartitionKind _pad1: [u8; 0x18], /// Offset to main DOL (Wii: >> 2) pub dol_offset: U32, @@ -145,6 +159,16 @@ impl PartitionHeader { } } + /// Set the offset within the partition to the main DOL. + #[inline] + pub fn set_dol_offset(&mut self, offset: u64, is_wii: bool) { + if is_wii { + self.dol_offset.set((offset / 4) as u32); + } else { + self.dol_offset.set(offset as u32); + } + } + /// Offset within the partition to the file system table (FST). #[inline] pub fn fst_offset(&self, is_wii: bool) -> u64 { @@ -155,6 +179,16 @@ impl PartitionHeader { } } + /// Set the offset within the partition to the file system table (FST). + #[inline] + pub fn set_fst_offset(&mut self, offset: u64, is_wii: bool) { + if is_wii { + self.fst_offset.set((offset / 4) as u32); + } else { + self.fst_offset.set(offset as u32); + } + } + /// Size of the file system table (FST). #[inline] pub fn fst_size(&self, is_wii: bool) -> u64 { @@ -165,6 +199,16 @@ impl PartitionHeader { } } + /// Set the size of the file system table (FST). + #[inline] + pub fn set_fst_size(&mut self, size: u64, is_wii: bool) { + if is_wii { + self.fst_size.set((size / 4) as u32); + } else { + self.fst_size.set(size as u32); + } + } + /// Maximum size of the file system table (FST) across multi-disc games. #[inline] pub fn fst_max_size(&self, is_wii: bool) -> u64 { @@ -174,6 +218,16 @@ impl PartitionHeader { self.fst_max_size.get() as u64 } } + + /// Set the maximum size of the file system table (FST) across multi-disc games. + #[inline] + pub fn set_fst_max_size(&mut self, size: u64, is_wii: bool) { + if is_wii { + self.fst_max_size.set((size / 4) as u32); + } else { + self.fst_max_size.set(size as u32); + } + } } /// Apploader header. @@ -231,225 +285,3 @@ pub struct DolHeader { } static_assert!(size_of::() == 0x100); - -/// The kind of disc partition. -#[derive(Debug, PartialEq, Eq, Copy, Clone)] -pub enum PartitionKind { - /// Data partition. - Data, - /// Update partition. - Update, - /// Channel partition. - Channel, - /// Other partition kind. - Other(u32), -} - -impl Display for PartitionKind { - #[inline] - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - match self { - Self::Data => write!(f, "Data"), - Self::Update => write!(f, "Update"), - Self::Channel => write!(f, "Channel"), - Self::Other(v) => { - let bytes = v.to_be_bytes(); - write!(f, "Other ({:08X}, {})", v, String::from_utf8_lossy(&bytes)) - } - } - } -} - -impl PartitionKind { - /// Returns the directory name for the partition kind. - #[inline] - pub fn dir_name(&self) -> Cow { - match self { - Self::Data => Cow::Borrowed("DATA"), - Self::Update => Cow::Borrowed("UPDATE"), - Self::Channel => Cow::Borrowed("CHANNEL"), - Self::Other(v) => { - let bytes = v.to_be_bytes(); - Cow::Owned(format!("P-{}", String::from_utf8_lossy(&bytes))) - } - } - } -} - -impl From for PartitionKind { - #[inline] - fn from(v: u32) -> Self { - match v { - 0 => Self::Data, - 1 => Self::Update, - 2 => Self::Channel, - v => Self::Other(v), - } - } -} - -/// An open disc partition. -pub trait PartitionBase: DynClone + BufRead + Seek + Send + Sync { - /// Reads the partition header and file system table. - fn meta(&mut self) -> Result>; - - /// Seeks the partition stream to the specified file system node - /// and returns a windowed stream. - /// - /// # Examples - /// - /// Basic usage: - /// ```no_run - /// use std::io::Read; - /// - /// use nod::{Disc, PartitionKind}; - /// - /// fn main() -> nod::Result<()> { - /// let disc = Disc::new("path/to/file.iso")?; - /// let mut partition = disc.open_partition_kind(PartitionKind::Data)?; - /// let meta = partition.meta()?; - /// let fst = meta.fst()?; - /// if let Some((_, node)) = fst.find("/MP3/Worlds.txt") { - /// let mut s = String::new(); - /// partition - /// .open_file(node) - /// .expect("Failed to open file stream") - /// .read_to_string(&mut s) - /// .expect("Failed to read file"); - /// println!("{}", s); - /// } - /// Ok(()) - /// } - /// ``` - fn open_file(&mut self, node: Node) -> io::Result; - - /// Consumes the partition instance and returns a windowed stream. - /// - /// # Examples - /// - /// ```no_run - /// use std::io::Read; - /// - /// use nod::{Disc, PartitionKind, OwnedFileStream}; - /// - /// fn main() -> nod::Result<()> { - /// let disc = Disc::new("path/to/file.iso")?; - /// let mut partition = disc.open_partition_kind(PartitionKind::Data)?; - /// let meta = partition.meta()?; - /// let fst = meta.fst()?; - /// if let Some((_, node)) = fst.find("/disc.tgc") { - /// let file: OwnedFileStream = partition - /// .clone() // Clone the Box - /// .into_open_file(node) // Get an OwnedFileStream - /// .expect("Failed to open file stream"); - /// // Open the inner disc image using the owned stream - /// let inner_disc = Disc::new_stream(Box::new(file)) - /// .expect("Failed to open inner disc"); - /// // ... - /// } - /// Ok(()) - /// } - /// ``` - fn into_open_file(self: Box, node: Node) -> io::Result; -} - -dyn_clone::clone_trait_object!(PartitionBase); - -/// Size of the disc header and partition header (boot.bin) -pub const BOOT_SIZE: usize = size_of::() + size_of::(); -/// Size of the debug and region information (bi2.bin) -pub const BI2_SIZE: usize = 0x2000; - -/// Extra disc partition data. (DOL, FST, etc.) -#[derive(Clone, Debug)] -pub struct PartitionMeta { - /// Disc and partition header (boot.bin) - pub raw_boot: Box<[u8; BOOT_SIZE]>, - /// Debug and region information (bi2.bin) - pub raw_bi2: Box<[u8; BI2_SIZE]>, - /// Apploader (apploader.bin) - pub raw_apploader: Box<[u8]>, - /// Main binary (main.dol) - pub raw_dol: Box<[u8]>, - /// File system table (fst.bin) - pub raw_fst: Box<[u8]>, - /// Ticket (ticket.bin, Wii only) - pub raw_ticket: Option>, - /// TMD (tmd.bin, Wii only) - pub raw_tmd: Option>, - /// Certificate chain (cert.bin, Wii only) - pub raw_cert_chain: Option>, - /// H3 hash table (h3.bin, Wii only) - pub raw_h3_table: Option>, -} - -impl PartitionMeta { - /// A view into the disc header. - #[inline] - pub fn header(&self) -> &DiscHeader { - DiscHeader::ref_from_bytes(&self.raw_boot[..size_of::()]) - .expect("Invalid header alignment") - } - - /// A view into the partition header. - #[inline] - pub fn partition_header(&self) -> &PartitionHeader { - PartitionHeader::ref_from_bytes(&self.raw_boot[size_of::()..]) - .expect("Invalid partition header alignment") - } - - /// A view into the apploader header. - #[inline] - pub fn apploader_header(&self) -> &ApploaderHeader { - ApploaderHeader::ref_from_prefix(&self.raw_apploader) - .expect("Invalid apploader alignment") - .0 - } - - /// A view into the file system table (FST). - #[inline] - pub fn fst(&self) -> Result { Fst::new(&self.raw_fst) } - - /// A view into the DOL header. - #[inline] - pub fn dol_header(&self) -> &DolHeader { - DolHeader::ref_from_prefix(&self.raw_dol).expect("Invalid DOL alignment").0 - } - - /// A view into the ticket. (Wii only) - #[inline] - pub fn ticket(&self) -> Option<&Ticket> { - let raw_ticket = self.raw_ticket.as_deref()?; - Some(Ticket::ref_from_bytes(raw_ticket).expect("Invalid ticket alignment")) - } - - /// A view into the TMD. (Wii only) - #[inline] - pub fn tmd_header(&self) -> Option<&TmdHeader> { - let raw_tmd = self.raw_tmd.as_deref()?; - Some(TmdHeader::ref_from_prefix(raw_tmd).expect("Invalid TMD alignment").0) - } - - /// A view into the TMD content metadata. (Wii only) - #[inline] - pub fn content_metadata(&self) -> Option<&[ContentMetadata]> { - let raw_cmd = &self.raw_tmd.as_deref()?[size_of::()..]; - Some(<[ContentMetadata]>::ref_from_bytes(raw_cmd).expect("Invalid CMD alignment")) - } -} - -/// The size of a single-layer MiniDVD. (1.4 GB) -/// -/// GameCube games and some third-party Wii discs (Datel) use this format. -pub const MINI_DVD_SIZE: u64 = 1_459_978_240; - -/// The size of a single-layer DVD. (4.7 GB) -/// -/// The vast majority of Wii games use this format. -pub const SL_DVD_SIZE: u64 = 4_699_979_776; - -/// The size of a dual-layer DVD. (8.5 GB) -/// -/// A few larger Wii games use this format. -/// (Super Smash Bros. Brawl, Metroid Prime Trilogy, etc.) -pub const DL_DVD_SIZE: u64 = 8_511_160_320; diff --git a/nod/src/disc/preloader.rs b/nod/src/disc/preloader.rs new file mode 100644 index 0000000..e51c6bf --- /dev/null +++ b/nod/src/disc/preloader.rs @@ -0,0 +1,558 @@ +use std::{ + collections::HashMap, + io, + num::NonZeroUsize, + sync::{Arc, Mutex}, + thread::JoinHandle, + time::{Duration, Instant}, +}; + +use bytes::{Bytes, BytesMut}; +use crossbeam_channel::{Receiver, Sender}; +use crossbeam_utils::sync::WaitGroup; +use lru::LruCache; +use simple_moving_average::{SingleSumSMA, SMA}; +use tracing::{debug, error, instrument, span, Level}; +use zerocopy::FromZeros; + +use crate::{ + common::PartitionInfo, + disc::{ + hashes::hash_sector_group, wii::HASHES_SIZE, DiscHeader, SECTOR_GROUP_SIZE, SECTOR_SIZE, + }, + io::{ + block::{Block, BlockKind, BlockReader}, + wia::WIAException, + }, + read::PartitionEncryption, + util::{ + aes::{decrypt_sector, encrypt_sector}, + array_ref_mut, + }, +}; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub struct SectorGroupRequest { + pub group_idx: u32, + pub partition_idx: Option, + pub mode: PartitionEncryption, +} + +#[derive(Clone)] +pub struct SectorGroup { + pub request: SectorGroupRequest, + pub data: Bytes, + pub sector_bitmap: u64, + pub io_duration: Option, +} + +impl SectorGroup { + /// Calculate the number of consecutive sectors starting from `start`. + #[inline] + pub fn consecutive_sectors(&self, start: u32) -> u32 { + (self.sector_bitmap >> start).trailing_ones() + } +} + +pub type SectorGroupResult = io::Result; + +#[allow(unused)] +pub struct Preloader { + request_tx: Sender, + request_rx: Receiver, + stat_tx: Sender, + stat_rx: Receiver, + threads: Mutex, + cache: Arc>, + // Fallback single-threaded loader + loader: Mutex, +} + +#[allow(unused)] +struct PreloaderThreads { + join_handles: Vec>, + last_adjust: Instant, + num_samples: usize, + wait_time_avg: SingleSumSMA, + req_time_avg: SingleSumSMA, + io_time_avg: SingleSumSMA, +} + +impl PreloaderThreads { + fn new(join_handles: Vec>) -> Self { + Self { + join_handles, + last_adjust: Instant::now(), + num_samples: 0, + wait_time_avg: SingleSumSMA::<_, _, 100>::from_zero(Duration::default()), + req_time_avg: SingleSumSMA::<_, _, 100>::from_zero(Duration::default()), + io_time_avg: SingleSumSMA::<_, _, 100>::from_zero(Duration::default()), + } + } + + fn push_stats(&mut self, stat: PreloaderThreadStats, _outer: &Preloader) { + self.wait_time_avg.add_sample(stat.wait_time); + self.req_time_avg.add_sample(stat.req_time); + self.io_time_avg.add_sample(stat.io_time); + self.num_samples += 1; + if self.num_samples % 100 == 0 { + let avg_wait = self.wait_time_avg.get_average(); + let avg_req = self.req_time_avg.get_average(); + let avg_io = self.io_time_avg.get_average(); + let utilization = + avg_req.as_secs_f64() / (avg_req.as_secs_f64() + avg_wait.as_secs_f64()); + let io_time = avg_io.as_secs_f64() / avg_req.as_secs_f64(); + debug!( + "Preloader stats: count {}, wait: {:?}, req: {:?}, util: {:.2}%, io: {:.2}%", + self.num_samples, + avg_wait, + avg_req, + utilization * 100.0, + io_time * 100.0 + ); + // if self.last_adjust.elapsed() > Duration::from_secs(2) { + // if utilization > 0.9 && io_time < 0.1 { + // println!("Preloader is CPU-bound, increasing thread count"); + // let id = self.join_handles.len(); + // self.join_handles.push(preloader_thread( + // id, + // outer.request_rx.clone(), + // outer.cache.clone(), + // outer.loader.lock().unwrap().clone(), + // outer.stat_tx.clone(), + // )); + // self.last_adjust = Instant::now(); + // } /*else if io_time > 0.9 { + // println!("Preloader is I/O-bound, decreasing thread count"); + // if self.join_handles.len() > 1 { + // let handle = self.join_handles.pop().unwrap(); + // + // } + // }*/ + // } + } + } +} + +struct PreloaderCache { + inflight: HashMap, + lru_cache: LruCache, +} + +impl Default for PreloaderCache { + fn default() -> Self { + Self { + inflight: Default::default(), + lru_cache: LruCache::new(NonZeroUsize::new(64).unwrap()), + } + } +} + +impl PreloaderCache { + fn push(&mut self, request: SectorGroupRequest, group: SectorGroup) { + self.lru_cache.push(request, group); + self.inflight.remove(&request); + } + + fn remove(&mut self, request: &SectorGroupRequest) { self.inflight.remove(request); } + + fn contains(&self, request: &SectorGroupRequest) -> bool { + self.lru_cache.contains(request) || self.inflight.contains_key(request) + } +} + +#[allow(unused)] +struct PreloaderThreadStats { + thread_id: usize, + wait_time: Duration, + req_time: Duration, + io_time: Duration, +} + +fn preloader_thread( + thread_id: usize, + request_rx: Receiver, + cache: Arc>, + mut loader: SectorGroupLoader, + stat_tx: Sender, +) -> JoinHandle<()> { + std::thread::Builder::new() + .name(format!("Preloader {thread_id}")) + .spawn(move || { + let mut last_request_end: Option = None; + while let Ok(request) = request_rx.recv() { + let wait_time = if let Some(last_request) = last_request_end { + last_request.elapsed() + } else { + Duration::default() + }; + let start = Instant::now(); + let mut io_time = Duration::default(); + match loader.load(request) { + Ok(group) => { + let Ok(mut cache_guard) = cache.lock() else { + break; + }; + io_time = group.io_duration.unwrap_or_default(); + cache_guard.push(request, group); + } + Err(_) => { + let Ok(mut cache_guard) = cache.lock() else { + break; + }; + // Just drop the request if it failed + cache_guard.remove(&request); + } + } + let end = Instant::now(); + last_request_end = Some(end); + let req_time = end - start; + stat_tx + .send(PreloaderThreadStats { thread_id, wait_time, req_time, io_time }) + .expect("Failed to send preloader stats"); + } + }) + .expect("Failed to spawn preloader thread") +} + +impl Preloader { + pub fn new(loader: SectorGroupLoader, num_threads: usize) -> Arc { + debug!("Creating preloader with {} threads", num_threads); + + let (request_tx, request_rx) = crossbeam_channel::unbounded(); + let (stat_tx, stat_rx) = crossbeam_channel::unbounded(); + let cache = Arc::new(Mutex::new(PreloaderCache::default())); + let mut join_handles = Vec::with_capacity(num_threads); + for i in 0..num_threads { + join_handles.push(preloader_thread( + i, + request_rx.clone(), + cache.clone(), + loader.clone(), + stat_tx.clone(), + )); + } + let threads = Mutex::new(PreloaderThreads::new(join_handles)); + let loader = Mutex::new(loader); + Arc::new(Self { request_tx, request_rx, stat_tx, stat_rx, threads, cache, loader }) + } + + #[allow(unused)] + pub fn shutdown(self) { + let guard = self.threads.into_inner().unwrap(); + for handle in guard.join_handles { + handle.join().unwrap(); + } + } + + #[instrument(name = "Preloader::fetch", skip_all)] + pub fn fetch(&self, request: SectorGroupRequest, max_groups: u32) -> SectorGroupResult { + let num_threads = { + let mut threads_guard = self.threads.lock().map_err(map_poisoned)?; + while let Ok(stat) = self.stat_rx.try_recv() { + threads_guard.push_stats(stat, self); + } + threads_guard.join_handles.len() + }; + let mut cache_guard = self.cache.lock().map_err(map_poisoned)?; + // Preload n groups ahead + for i in 0..num_threads as u32 { + let group_idx = request.group_idx + i; + if group_idx >= max_groups { + break; + } + let request = SectorGroupRequest { group_idx, ..request }; + if cache_guard.contains(&request) { + continue; + } + if self.request_tx.send(request).is_ok() { + cache_guard.inflight.insert(request, WaitGroup::new()); + } + } + if let Some(cached) = cache_guard.lru_cache.get(&request) { + return Ok(cached.clone()); + } + if let Some(wg) = cache_guard.inflight.get(&request) { + // Wait for inflight request to finish + let wg = wg.clone(); + drop(cache_guard); + { + let _span = span!(Level::TRACE, "wg.wait").entered(); + wg.wait(); + } + let mut cache_guard = self.cache.lock().map_err(map_poisoned)?; + if let Some(cached) = cache_guard.lru_cache.get(&request) { + return Ok(cached.clone()); + } + } else { + drop(cache_guard); + } + // No threads are running, fallback to single-threaded loader + let result = { + let mut loader = self.loader.lock().map_err(map_poisoned)?; + loader.load(request) + }; + match result { + Ok(group) => { + let mut cache_guard = self.cache.lock().map_err(map_poisoned)?; + cache_guard.push(request, group.clone()); + Ok(group) + } + Err(e) => Err(e), + } + } +} + +fn map_poisoned(_: std::sync::PoisonError) -> io::Error { + io::Error::new(io::ErrorKind::Other, "Mutex poisoned") +} + +pub struct SectorGroupLoader { + io: Box, + disc_header: Arc, + partitions: Arc<[PartitionInfo]>, + block: Block, + block_buf: Box<[u8]>, +} + +impl Clone for SectorGroupLoader { + fn clone(&self) -> Self { + let block_size = self.io.block_size() as usize; + Self { + io: self.io.clone(), + disc_header: self.disc_header.clone(), + partitions: self.partitions.clone(), + block: Block::default(), + block_buf: <[u8]>::new_box_zeroed_with_elems(block_size).unwrap(), + } + } +} + +impl SectorGroupLoader { + pub fn new( + io: Box, + disc_header: Arc, + partitions: Arc<[PartitionInfo]>, + ) -> Self { + let block_buf = <[u8]>::new_box_zeroed_with_elems(io.block_size() as usize).unwrap(); + Self { io, disc_header, partitions, block: Block::default(), block_buf } + } + + #[instrument(name = "SectorGroupLoader::load", skip_all)] + pub fn load(&mut self, request: SectorGroupRequest) -> SectorGroupResult { + let mut sector_group_buf = BytesMut::zeroed(SECTOR_GROUP_SIZE); + + let out = array_ref_mut![sector_group_buf, 0, SECTOR_GROUP_SIZE]; + let (sector_bitmap, io_duration) = if request.partition_idx.is_some() { + self.load_partition_group(request, out)? + } else { + self.load_raw_group(request, out)? + }; + + Ok(SectorGroup { request, data: sector_group_buf.freeze(), sector_bitmap, io_duration }) + } + + /// Load a sector group from a partition. + /// + /// This will handle encryption, decryption, and hash recovery as needed. + fn load_partition_group( + &mut self, + request: SectorGroupRequest, + sector_group_buf: &mut [u8; SECTOR_GROUP_SIZE], + ) -> io::Result<(u64, Option)> { + let Some(partition) = + request.partition_idx.and_then(|idx| self.partitions.get(idx as usize)) + else { + return Ok((0, None)); + }; + + let abs_group_sector = partition.data_start_sector + request.group_idx * 64; + if abs_group_sector >= partition.data_end_sector { + return Ok((0, None)); + } + + // Bitmap of sectors that were read + let mut sector_bitmap = 0u64; + // Bitmap of sectors that are decrypted + let mut decrypted_sectors = 0u64; + // Bitmap of sectors that need hash recovery + let mut hash_recovery_sectors = 0u64; + // Hash exceptions + let mut hash_exceptions = Vec::::new(); + // Total duration of I/O operations + let mut io_duration = None; + + // Read sector group + for sector in 0..64 { + let sector_data = + array_ref_mut![sector_group_buf, sector as usize * SECTOR_SIZE, SECTOR_SIZE]; + let abs_sector = abs_group_sector + sector; + if abs_sector >= partition.data_end_sector { + // Already zeroed + decrypted_sectors |= 1 << sector; + hash_recovery_sectors |= 1 << sector; + continue; + } + + // Read new block + if !self.block.contains(abs_sector) { + self.block = self.io.read_block(self.block_buf.as_mut(), abs_sector)?; + if let Some(duration) = self.block.io_duration { + *io_duration.get_or_insert_with(Duration::default) += duration; + } + if self.block.kind == BlockKind::None { + error!("Failed to read block for sector {}", abs_sector); + break; + } + } + + // Add hash exceptions + self.block.append_hash_exceptions(abs_sector, sector, &mut hash_exceptions)?; + + // Read new sector into buffer + let (encrypted, has_hashes) = self.block.copy_sector( + sector_data, + self.block_buf.as_mut(), + abs_sector, + &partition.disc_header, + Some(partition), + )?; + if !encrypted { + decrypted_sectors |= 1 << sector; + } + if !has_hashes && partition.has_hashes { + hash_recovery_sectors |= 1 << sector; + } + sector_bitmap |= 1 << sector; + } + + // Recover hashes + if request.mode != PartitionEncryption::ForceDecryptedNoHashes && hash_recovery_sectors != 0 + { + // Decrypt any encrypted sectors + if decrypted_sectors != u64::MAX { + for sector in 0..64 { + let sector_data = + array_ref_mut![sector_group_buf, sector * SECTOR_SIZE, SECTOR_SIZE]; + if (decrypted_sectors >> sector) & 1 == 0 { + decrypt_sector(sector_data, &partition.key); + } + } + decrypted_sectors = u64::MAX; + } + + // Recover hashes + let group_hashes = hash_sector_group(sector_group_buf); + + // Apply hashes + for sector in 0..64 { + let sector_data = + array_ref_mut![sector_group_buf, sector * SECTOR_SIZE, SECTOR_SIZE]; + if (hash_recovery_sectors >> sector) & 1 == 1 { + group_hashes.apply(sector_data, sector); + } + } + } + + // Apply hash exceptions + if request.mode != PartitionEncryption::ForceDecryptedNoHashes + && !hash_exceptions.is_empty() + { + for exception in hash_exceptions { + let offset = exception.offset.get(); + let sector = offset / HASHES_SIZE as u16; + + // Decrypt sector if needed + let sector_data = + array_ref_mut![sector_group_buf, sector as usize * SECTOR_SIZE, SECTOR_SIZE]; + if (decrypted_sectors >> sector) & 1 == 0 { + decrypt_sector(sector_data, &partition.key); + decrypted_sectors |= 1 << sector; + } + + let sector_offset = (offset - (sector * HASHES_SIZE as u16)) as usize; + *array_ref_mut![sector_data, sector_offset, 20] = exception.hash; + } + } + + // Encrypt/decrypt sectors + if match request.mode { + PartitionEncryption::Original => partition.has_encryption, + PartitionEncryption::ForceEncrypted => true, + PartitionEncryption::ForceDecrypted | PartitionEncryption::ForceDecryptedNoHashes => { + false + } + } { + // Encrypt any decrypted sectors + if decrypted_sectors != 0 { + for sector in 0..64 { + let sector_data = array_ref_mut![ + sector_group_buf, + sector as usize * SECTOR_SIZE, + SECTOR_SIZE + ]; + if (decrypted_sectors >> sector) & 1 == 1 { + encrypt_sector(sector_data, &partition.key); + } + } + } + } else if decrypted_sectors != u64::MAX { + // Decrypt any encrypted sectors + for sector in 0..64 { + let sector_data = + array_ref_mut![sector_group_buf, sector as usize * SECTOR_SIZE, SECTOR_SIZE]; + if (decrypted_sectors >> sector) & 1 == 0 { + decrypt_sector(sector_data, &partition.key); + } + } + } + + Ok((sector_bitmap, io_duration)) + } + + /// Loads a non-partition sector group. + fn load_raw_group( + &mut self, + request: SectorGroupRequest, + sector_group_buf: &mut [u8; SECTOR_GROUP_SIZE], + ) -> io::Result<(u64, Option)> { + let abs_group_sector = request.group_idx * 64; + + // Bitmap of sectors that were read + let mut sector_bitmap = 0u64; + // Total duration of I/O operations + let mut io_duration = None; + + for sector in 0..64 { + let sector_data = + array_ref_mut![sector_group_buf, sector as usize * SECTOR_SIZE, SECTOR_SIZE]; + let abs_sector = abs_group_sector + sector; + if self.partitions.iter().any(|p| p.data_contains_sector(abs_sector)) { + continue; + } + + // Read new block + if !self.block.contains(abs_sector) { + self.block = self.io.read_block(self.block_buf.as_mut(), abs_sector)?; + if let Some(duration) = self.block.io_duration { + *io_duration.get_or_insert_with(Duration::default) += duration; + } + if self.block.kind == BlockKind::None { + break; + } + } + + // Read new sector into buffer + self.block.copy_sector( + sector_data, + self.block_buf.as_mut(), + abs_sector, + self.disc_header.as_ref(), + None, + )?; + sector_bitmap |= 1 << sector; + } + + Ok((sector_bitmap, io_duration)) + } +} diff --git a/nod/src/disc/reader.rs b/nod/src/disc/reader.rs index 645ba55..ea7e61f 100644 --- a/nod/src/disc/reader.rs +++ b/nod/src/disc/reader.rs @@ -1,130 +1,163 @@ use std::{ io, - io::{BufRead, Read, Seek, SeekFrom}, + io::{BufRead, Seek, SeekFrom}, + sync::Arc, }; -use zerocopy::{FromBytes, FromZeros}; +use bytes::Bytes; +use tracing::warn; +use zerocopy::IntoBytes; -use super::{ - gcn::PartitionGC, - hashes::{rebuild_hashes, HashTable}, - wii::{PartitionWii, WiiPartEntry, WiiPartGroup, WiiPartitionHeader, WII_PART_GROUP_OFF}, - DiscHeader, PartitionBase, PartitionHeader, PartitionKind, DL_DVD_SIZE, MINI_DVD_SIZE, - REGION_SIZE, SL_DVD_SIZE, -}; use crate::{ - disc::wii::REGION_OFFSET, - io::block::{Block, BlockIO, PartitionInfo}, - util::read::{read_box, read_from, read_vec}, - DiscMeta, Error, OpenOptions, PartitionEncryptionMode, PartitionOptions, Result, ResultContext, - SECTOR_SIZE, + common::{PartitionInfo, PartitionKind}, + disc::{ + direct::{DirectDiscReader, DirectDiscReaderMode}, + fst::{Fst, NodeKind}, + gcn::{read_fst, PartitionReaderGC}, + preloader::{Preloader, SectorGroup, SectorGroupLoader, SectorGroupRequest}, + wii::{ + PartitionReaderWii, WiiPartEntry, WiiPartGroup, WiiPartitionHeader, REGION_OFFSET, + REGION_SIZE, WII_PART_GROUP_OFF, + }, + DiscHeader, DL_DVD_SIZE, MINI_DVD_SIZE, SECTOR_GROUP_SIZE, SECTOR_SIZE, SL_DVD_SIZE, + }, + io::block::BlockReader, + read::{DiscMeta, DiscOptions, PartitionEncryption, PartitionOptions, PartitionReader}, + util::{ + impl_read_for_bufread, + read::{read_arc, read_from, read_vec}, + }, + Error, Result, ResultContext, }; pub struct DiscReader { - io: Box, - block: Block, - block_buf: Box<[u8]>, - block_idx: u32, - sector_buf: Box<[u8; SECTOR_SIZE]>, - sector_idx: u32, + io: Box, pos: u64, - mode: PartitionEncryptionMode, - disc_header: Box, - pub(crate) partitions: Vec, - hash_tables: Vec, + size: u64, + mode: PartitionEncryption, + disc_header: Arc, + partitions: Arc<[PartitionInfo]>, region: Option<[u8; REGION_SIZE]>, + sector_group: Option, + preloader: Arc, + alt_disc_header: Option>, + alt_partitions: Option>, } impl Clone for DiscReader { fn clone(&self) -> Self { Self { io: self.io.clone(), - block: Block::default(), - block_buf: <[u8]>::new_box_zeroed_with_elems(self.block_buf.len()).unwrap(), - block_idx: u32::MAX, - sector_buf: <[u8; SECTOR_SIZE]>::new_box_zeroed().unwrap(), - sector_idx: u32::MAX, pos: 0, + size: self.size, mode: self.mode, disc_header: self.disc_header.clone(), partitions: self.partitions.clone(), - hash_tables: self.hash_tables.clone(), region: self.region, + sector_group: None, + preloader: self.preloader.clone(), + alt_disc_header: self.alt_disc_header.clone(), + alt_partitions: self.alt_partitions.clone(), } } } impl DiscReader { - pub fn new(inner: Box, options: &OpenOptions) -> Result { - let block_size = inner.block_size(); - let meta = inner.meta(); - let mut reader = Self { - io: inner, - block: Block::default(), - block_buf: <[u8]>::new_box_zeroed_with_elems(block_size as usize)?, - block_idx: u32::MAX, - sector_buf: <[u8; SECTOR_SIZE]>::new_box_zeroed()?, - sector_idx: u32::MAX, - pos: 0, - mode: options.partition_encryption, - disc_header: DiscHeader::new_box_zeroed()?, - partitions: vec![], - hash_tables: vec![], - region: None, - }; - let disc_header: Box = read_box(&mut reader).context("Reading disc header")?; - reader.disc_header = disc_header; - if reader.disc_header.is_wii() { - if reader.disc_header.has_partition_encryption() - && !reader.disc_header.has_partition_hashes() - { + pub fn new(inner: Box, options: &DiscOptions) -> Result { + let mut reader = DirectDiscReader::new(inner)?; + + let disc_header: Arc = read_arc(&mut reader).context("Reading disc header")?; + let mut alt_disc_header = None; + let mut region = None; + let mut partitions = Arc::<[PartitionInfo]>::default(); + let mut alt_partitions = None; + if disc_header.is_wii() { + // Sanity check + if disc_header.has_partition_encryption() && !disc_header.has_partition_hashes() { return Err(Error::DiscFormat( "Wii disc is encrypted but has no partition hashes".to_string(), )); } - if !reader.disc_header.has_partition_hashes() - && options.partition_encryption == PartitionEncryptionMode::ForceEncrypted + if !disc_header.has_partition_hashes() + && options.partition_encryption == PartitionEncryption::ForceEncrypted { return Err(Error::Other( "Unsupported: Rebuilding encryption for Wii disc without hashes".to_string(), )); } + + // Read region info reader.seek(SeekFrom::Start(REGION_OFFSET)).context("Seeking to region info")?; - reader.region = Some(read_from(&mut reader).context("Reading region info")?); - reader.partitions = read_partition_info(&mut reader)?; - // Rebuild hashes if the format requires it - if options.partition_encryption != PartitionEncryptionMode::AsIs - && meta.needs_hash_recovery - && reader.disc_header.has_partition_hashes() - { - rebuild_hashes(&mut reader)?; + region = Some(read_from(&mut reader).context("Reading region info")?); + + // Read partition info + partitions = Arc::from(read_partition_info(&mut reader, disc_header.clone())?); + + // Update disc header with encryption mode + if matches!( + options.partition_encryption, + PartitionEncryption::ForceDecrypted | PartitionEncryption::ForceEncrypted + ) { + let mut disc_header = Box::new(disc_header.as_ref().clone()); + let mut partitions = Box::<[PartitionInfo]>::from(partitions.as_ref()); + disc_header.no_partition_encryption = match options.partition_encryption { + PartitionEncryption::ForceDecrypted => 1, + PartitionEncryption::ForceEncrypted => 0, + _ => unreachable!(), + }; + for partition in &mut partitions { + partition.has_encryption = disc_header.has_partition_encryption(); + } + alt_disc_header = Some(Arc::from(disc_header)); + alt_partitions = Some(Arc::from(partitions)); } + } else if !disc_header.is_gamecube() { + return Err(Error::DiscFormat("Invalid disc header".to_string())); } - reader.reset(); - Ok(reader) - } - pub fn reset(&mut self) { - self.block = Block::default(); - self.block_buf.fill(0); - self.block_idx = u32::MAX; - self.sector_buf.fill(0); - self.sector_idx = u32::MAX; - self.pos = 0; - } - - pub fn disc_size(&self) -> u64 { - self.io.meta().disc_size.unwrap_or_else(|| guess_disc_size(&self.partitions)) + // Calculate disc size + let io = reader.into_inner(); + let size = io.meta().disc_size.unwrap_or_else(|| guess_disc_size(&partitions)); + let preloader = Preloader::new( + SectorGroupLoader::new(io.clone(), disc_header.clone(), partitions.clone()), + options.preloader_threads, + ); + Ok(Self { + io, + pos: 0, + size, + mode: options.partition_encryption, + disc_header, + partitions, + region, + sector_group: None, + preloader, + alt_disc_header, + alt_partitions, + }) } #[inline] - pub fn header(&self) -> &DiscHeader { &self.disc_header } + pub fn reset(&mut self) { self.pos = 0; } + + #[inline] + pub fn position(&self) -> u64 { self.pos } + + #[inline] + pub fn disc_size(&self) -> u64 { self.size } + + #[inline] + pub fn header(&self) -> &DiscHeader { + self.alt_disc_header.as_ref().unwrap_or(&self.disc_header) + } #[inline] pub fn region(&self) -> Option<&[u8; REGION_SIZE]> { self.region.as_ref() } #[inline] - pub fn partitions(&self) -> &[PartitionInfo] { &self.partitions } + pub fn partitions(&self) -> &[PartitionInfo] { + self.alt_partitions.as_deref().unwrap_or(&self.partitions) + } #[inline] pub fn meta(&self) -> DiscMeta { self.io.meta() } @@ -134,15 +167,19 @@ impl DiscReader { &self, index: usize, options: &PartitionOptions, - ) -> Result> { + ) -> Result> { if self.disc_header.is_gamecube() { if index == 0 { - Ok(PartitionGC::new(self.io.clone(), self.disc_header.clone())?) + Ok(PartitionReaderGC::new( + self.io.clone(), + self.preloader.clone(), + self.disc_size(), + )?) } else { Err(Error::DiscFormat("GameCube discs only have one partition".to_string())) } } else if let Some(part) = self.partitions.get(index) { - Ok(PartitionWii::new(self.io.clone(), self.disc_header.clone(), part, options)?) + Ok(PartitionReaderWii::new(self.io.clone(), self.preloader.clone(), part, options)?) } else { Err(Error::DiscFormat(format!("Partition {index} not found"))) } @@ -154,108 +191,151 @@ impl DiscReader { &self, kind: PartitionKind, options: &PartitionOptions, - ) -> Result> { + ) -> Result> { if self.disc_header.is_gamecube() { if kind == PartitionKind::Data { - Ok(PartitionGC::new(self.io.clone(), self.disc_header.clone())?) + Ok(PartitionReaderGC::new( + self.io.clone(), + self.preloader.clone(), + self.disc_size(), + )?) } else { Err(Error::DiscFormat("GameCube discs only have a data partition".to_string())) } } else if let Some(part) = self.partitions.iter().find(|v| v.kind == kind) { - Ok(PartitionWii::new(self.io.clone(), self.disc_header.clone(), part, options)?) + Ok(PartitionReaderWii::new(self.io.clone(), self.preloader.clone(), part, options)?) } else { Err(Error::DiscFormat(format!("Partition type {kind} not found"))) } } + + pub fn fill_buf_internal(&mut self) -> io::Result { + if self.pos >= self.size { + return Ok(Bytes::new()); + } + + // Read from modified disc header + if self.pos < size_of::() as u64 { + if let Some(alt_disc_header) = &self.alt_disc_header { + return Ok(Bytes::copy_from_slice( + &alt_disc_header.as_bytes()[self.pos as usize..], + )); + } + } + + // Build sector group request + let abs_sector = (self.pos / SECTOR_SIZE as u64) as u32; + let (request, abs_group_sector, max_groups) = if let Some(partition) = + self.partitions.iter().find(|part| part.data_contains_sector(abs_sector)) + { + let group_idx = (abs_sector - partition.data_start_sector) / 64; + let abs_group_sector = partition.data_start_sector + group_idx * 64; + let max_groups = (partition.data_end_sector - partition.data_start_sector).div_ceil(64); + let request = SectorGroupRequest { + group_idx, + partition_idx: Some(partition.index as u8), + mode: self.mode, + }; + (request, abs_group_sector, max_groups) + } else { + let group_idx = abs_sector / 64; + let abs_group_sector = group_idx * 64; + let max_groups = self.size.div_ceil(SECTOR_GROUP_SIZE as u64) as u32; + let request = SectorGroupRequest { group_idx, partition_idx: None, mode: self.mode }; + (request, abs_group_sector, max_groups) + }; + + // Load sector group + let sector_group = if matches!(&self.sector_group, Some(sector_group) if sector_group.request == request) + { + // We can improve this in Rust 2024 with `if_let_rescope` + // https://github.com/rust-lang/rust/issues/124085 + self.sector_group.as_ref().unwrap() + } else { + self.sector_group.insert(self.preloader.fetch(request, max_groups)?) + }; + + // Calculate the number of consecutive sectors in the group + let group_sector = abs_sector - abs_group_sector; + let consecutive_sectors = sector_group.consecutive_sectors(group_sector); + if consecutive_sectors == 0 { + return Ok(Bytes::new()); + } + let num_sectors = group_sector + consecutive_sectors; + + // Read from sector group buffer + let group_start = abs_group_sector as u64 * SECTOR_SIZE as u64; + let offset = (self.pos - group_start) as usize; + let end = (num_sectors as u64 * SECTOR_SIZE as u64).min(self.size - group_start) as usize; + Ok(sector_group.data.slice(offset..end)) + } } impl BufRead for DiscReader { fn fill_buf(&mut self) -> io::Result<&[u8]> { - let block_idx = (self.pos / self.block_buf.len() as u64) as u32; - let abs_sector = (self.pos / SECTOR_SIZE as u64) as u32; + if self.pos >= self.size { + return Ok(&[]); + } - let partition = if self.disc_header.is_wii() { - self.partitions.iter().find(|part| { - abs_sector >= part.data_start_sector && abs_sector < part.data_end_sector - }) + // Read from modified disc header + if self.pos < size_of::() as u64 { + if let Some(alt_disc_header) = &self.alt_disc_header { + return Ok(&alt_disc_header.as_bytes()[self.pos as usize..]); + } + } + + // Build sector group request + let abs_sector = (self.pos / SECTOR_SIZE as u64) as u32; + let (request, abs_group_sector, max_groups) = if let Some(partition) = + self.partitions.iter().find(|part| part.data_contains_sector(abs_sector)) + { + let group_idx = (abs_sector - partition.data_start_sector) / 64; + let abs_group_sector = partition.data_start_sector + group_idx * 64; + let max_groups = (partition.data_end_sector - partition.data_start_sector).div_ceil(64); + let request = SectorGroupRequest { + group_idx, + partition_idx: Some(partition.index as u8), + mode: self.mode, + }; + (request, abs_group_sector, max_groups) } else { - None + let group_idx = abs_sector / 64; + let abs_group_sector = group_idx * 64; + let max_groups = self.size.div_ceil(SECTOR_GROUP_SIZE as u64) as u32; + let request = SectorGroupRequest { group_idx, partition_idx: None, mode: self.mode }; + (request, abs_group_sector, max_groups) }; - // Read new block - if block_idx != self.block_idx { - self.block = self.io.read_block(self.block_buf.as_mut(), block_idx, partition)?; - self.block_idx = block_idx; + // Load sector group + let sector_group = if matches!(&self.sector_group, Some(sector_group) if sector_group.request == request) + { + // We can improve this in Rust 2024 with `if_let_rescope` + // https://github.com/rust-lang/rust/issues/124085 + self.sector_group.as_ref().unwrap() + } else { + self.sector_group.insert(self.preloader.fetch(request, max_groups)?) + }; + + // Calculate the number of consecutive sectors in the group + let group_sector = abs_sector - abs_group_sector; + let consecutive_sectors = sector_group.consecutive_sectors(group_sector); + if consecutive_sectors == 0 { + return Ok(&[]); } + let num_sectors = group_sector + consecutive_sectors; - // Read new sector into buffer - if abs_sector != self.sector_idx { - match (self.mode, partition, self.disc_header.has_partition_encryption()) { - (PartitionEncryptionMode::Original, Some(partition), true) - | (PartitionEncryptionMode::ForceEncrypted, Some(partition), _) => { - self.block.encrypt( - self.sector_buf.as_mut(), - self.block_buf.as_ref(), - abs_sector, - partition, - )?; - } - (PartitionEncryptionMode::ForceDecrypted, Some(partition), _) => { - self.block.decrypt( - self.sector_buf.as_mut(), - self.block_buf.as_ref(), - abs_sector, - partition, - )?; - } - (PartitionEncryptionMode::AsIs, _, _) | (_, None, _) | (_, _, false) => { - self.block.copy_raw( - self.sector_buf.as_mut(), - self.block_buf.as_ref(), - abs_sector, - &self.disc_header, - )?; - } - } - self.sector_idx = abs_sector; - - if self.sector_idx == 0 - && self.disc_header.is_wii() - && matches!( - self.mode, - PartitionEncryptionMode::ForceDecrypted - | PartitionEncryptionMode::ForceEncrypted - ) - { - let (disc_header, _) = DiscHeader::mut_from_prefix(self.sector_buf.as_mut()) - .expect("Invalid disc header alignment"); - disc_header.no_partition_encryption = match self.mode { - PartitionEncryptionMode::ForceDecrypted => 1, - PartitionEncryptionMode::ForceEncrypted => 0, - _ => unreachable!(), - }; - } - } - - // Read from sector buffer - let offset = (self.pos % SECTOR_SIZE as u64) as usize; - Ok(&self.sector_buf[offset..]) + // Read from sector group buffer + let group_start = abs_group_sector as u64 * SECTOR_SIZE as u64; + let offset = (self.pos - group_start) as usize; + let end = (num_sectors as u64 * SECTOR_SIZE as u64).min(self.size - group_start) as usize; + Ok(§or_group.data[offset..end]) } #[inline] fn consume(&mut self, amt: usize) { self.pos += amt as u64; } } -impl Read for DiscReader { - #[inline] - fn read(&mut self, out: &mut [u8]) -> io::Result { - let buf = self.fill_buf()?; - let len = buf.len().min(out.len()); - out[..len].copy_from_slice(&buf[..len]); - self.consume(len); - Ok(len) - } -} +impl_read_for_bufread!(DiscReader); impl Seek for DiscReader { fn seek(&mut self, pos: SeekFrom) -> io::Result { @@ -273,7 +353,10 @@ impl Seek for DiscReader { } } -fn read_partition_info(reader: &mut DiscReader) -> Result> { +fn read_partition_info( + reader: &mut DirectDiscReader, + disc_header: Arc, +) -> Result> { reader.seek(SeekFrom::Start(WII_PART_GROUP_OFF)).context("Seeking to partition groups")?; let part_groups: [WiiPartGroup; 4] = read_from(reader).context("Reading partition groups")?; let mut part_info = Vec::new(); @@ -292,7 +375,7 @@ fn read_partition_info(reader: &mut DiscReader) -> Result> { reader .seek(SeekFrom::Start(offset)) .with_context(|| format!("Seeking to partition data {group_idx}:{part_idx}"))?; - let header: Box = read_box(reader) + let header: Arc = read_arc(reader) .with_context(|| format!("Reading partition header {group_idx}:{part_idx}"))?; let key = header.ticket.decrypt_title_key()?; @@ -303,17 +386,8 @@ fn read_partition_info(reader: &mut DiscReader) -> Result> { ))); } - let disc_header = reader.header(); let data_start_offset = entry.offset() + header.data_off(); - let mut data_size = header.data_size(); - if data_size == 0 { - // Read until next partition or end of disc - // TODO: handle multiple partition groups - data_size = entries - .get(part_idx + 1) - .map(|part| part.offset() - data_start_offset) - .unwrap_or(reader.disc_size() - data_start_offset); - } + let data_size = header.data_size(); let data_end_offset = data_start_offset + data_size; if data_start_offset % SECTOR_SIZE as u64 != 0 || data_end_offset % SECTOR_SIZE as u64 != 0 @@ -322,32 +396,58 @@ fn read_partition_info(reader: &mut DiscReader) -> Result> { "Partition {group_idx}:{part_idx} data is not sector aligned", ))); } - let mut info = PartitionInfo { + let start_sector = (start_offset / SECTOR_SIZE as u64) as u32; + let data_start_sector = (data_start_offset / SECTOR_SIZE as u64) as u32; + let mut data_end_sector = (data_end_offset / SECTOR_SIZE as u64) as u32; + + reader.reset(DirectDiscReaderMode::Partition { + disc_header: disc_header.clone(), + data_start_sector, + key, + }); + let partition_disc_header: Arc = + read_arc(reader).context("Reading partition disc header")?; + let partition_header = read_arc(reader).context("Reading partition header")?; + if partition_disc_header.is_wii() { + let raw_fst = read_fst(reader, &partition_header, true)?; + let fst = Fst::new(&raw_fst)?; + let max_fst_offset = fst + .nodes + .iter() + .filter_map(|n| match n.kind() { + NodeKind::File => Some(n.offset(true) + n.length() as u64), + _ => None, + }) + .max() + .unwrap_or(0); + if max_fst_offset > data_size { + if data_size == 0 { + // Guess data size for decrypted partitions + data_end_sector = max_fst_offset.div_ceil(SECTOR_SIZE as u64) as u32; + } else { + return Err(Error::DiscFormat(format!( + "Partition {group_idx}:{part_idx} FST exceeds data size", + ))); + } + } + } else { + warn!("Partition {group_idx}:{part_idx} is not valid"); + } + reader.reset(DirectDiscReaderMode::Raw); + + part_info.push(PartitionInfo { index: part_info.len(), kind: entry.kind.get().into(), - start_sector: (start_offset / SECTOR_SIZE as u64) as u32, - data_start_sector: (data_start_offset / SECTOR_SIZE as u64) as u32, - data_end_sector: (data_end_offset / SECTOR_SIZE as u64) as u32, + start_sector, + data_start_sector, + data_end_sector, key, header, - disc_header: DiscHeader::new_box_zeroed()?, - partition_header: PartitionHeader::new_box_zeroed()?, - hash_table: None, + disc_header: partition_disc_header, + partition_header, has_encryption: disc_header.has_partition_encryption(), has_hashes: disc_header.has_partition_hashes(), - }; - - let mut partition_reader = PartitionWii::new( - reader.io.clone(), - reader.disc_header.clone(), - &info, - &PartitionOptions { validate_hashes: false }, - )?; - info.disc_header = read_box(&mut partition_reader).context("Reading disc header")?; - info.partition_header = - read_box(&mut partition_reader).context("Reading partition header")?; - - part_info.push(info); + }); } } Ok(part_info) @@ -356,18 +456,9 @@ fn read_partition_info(reader: &mut DiscReader) -> Result> { fn guess_disc_size(part_info: &[PartitionInfo]) -> u64 { let max_offset = part_info .iter() - .flat_map(|v| { - let offset = v.start_sector as u64 * SECTOR_SIZE as u64; - [ - offset + v.header.tmd_off() + v.header.tmd_size(), - offset + v.header.cert_chain_off() + v.header.cert_chain_size(), - offset + v.header.h3_table_off() + v.header.h3_table_size(), - offset + v.header.data_off() + v.header.data_size(), - ] - }) + .map(|v| v.data_end_sector as u64 * SECTOR_SIZE as u64) .max() .unwrap_or(0x50000); - // TODO add FST offsets (decrypted partitions) if max_offset <= MINI_DVD_SIZE && !part_info.iter().any(|v| v.kind == PartitionKind::Data) { // Datel disc MINI_DVD_SIZE diff --git a/nod/src/disc/streams.rs b/nod/src/disc/streams.rs deleted file mode 100644 index fbb8277..0000000 --- a/nod/src/disc/streams.rs +++ /dev/null @@ -1,101 +0,0 @@ -//! Partition file read stream. - -use std::{ - io, - io::{BufRead, Read, Seek, SeekFrom}, -}; - -use super::PartitionBase; - -/// A file read stream borrowing a [`PartitionBase`]. -pub type FileStream<'a> = WindowedStream<&'a mut dyn PartitionBase>; - -/// A file read stream owning a [`PartitionBase`]. -pub type OwnedFileStream = WindowedStream>; - -/// A read stream with a fixed window. -#[derive(Clone)] -pub struct WindowedStream -where T: BufRead + Seek -{ - base: T, - pos: u64, - begin: u64, - end: u64, -} - -impl WindowedStream -where T: BufRead + Seek -{ - /// Creates a new windowed stream with offset and size. - /// - /// Seeks underlying stream immediately. - #[inline] - pub fn new(mut base: T, offset: u64, size: u64) -> io::Result { - base.seek(SeekFrom::Start(offset))?; - Ok(Self { base, pos: offset, begin: offset, end: offset + size }) - } - - /// Returns the length of the window. - #[inline] - #[allow(clippy::len_without_is_empty)] - pub fn len(&self) -> u64 { self.end - self.begin } -} - -impl Read for WindowedStream -where T: BufRead + Seek -{ - #[inline] - fn read(&mut self, out: &mut [u8]) -> io::Result { - let buf = self.fill_buf()?; - let len = buf.len().min(out.len()); - out[..len].copy_from_slice(&buf[..len]); - self.consume(len); - Ok(len) - } -} - -impl BufRead for WindowedStream -where T: BufRead + Seek -{ - #[inline] - fn fill_buf(&mut self) -> io::Result<&[u8]> { - let limit = self.end.saturating_sub(self.pos); - if limit == 0 { - return Ok(&[]); - } - let buf = self.base.fill_buf()?; - let max = (buf.len() as u64).min(limit) as usize; - Ok(&buf[..max]) - } - - #[inline] - fn consume(&mut self, amt: usize) { - self.base.consume(amt); - self.pos += amt as u64; - } -} - -impl Seek for WindowedStream -where T: BufRead + Seek -{ - #[inline] - fn seek(&mut self, pos: SeekFrom) -> io::Result { - let mut pos = match pos { - SeekFrom::Start(p) => self.begin + p, - SeekFrom::End(p) => self.end.saturating_add_signed(p), - SeekFrom::Current(p) => self.pos.saturating_add_signed(p), - }; - if pos < self.begin { - pos = self.begin; - } else if pos > self.end { - pos = self.end; - } - let result = self.base.seek(SeekFrom::Start(pos))?; - self.pos = result; - Ok(result - self.begin) - } - - #[inline] - fn stream_position(&mut self) -> io::Result { Ok(self.pos) } -} diff --git a/nod/src/disc/wii.rs b/nod/src/disc/wii.rs index 9aca343..7915c53 100644 --- a/nod/src/disc/wii.rs +++ b/nod/src/disc/wii.rs @@ -1,46 +1,53 @@ +//! Wii disc types. + use std::{ ffi::CStr, io, - io::{BufRead, Read, Seek, SeekFrom}, + io::{BufRead, Seek, SeekFrom}, mem::size_of, + sync::Arc, }; -use sha1::{Digest, Sha1}; -use zerocopy::{big_endian::*, FromBytes, FromZeros, Immutable, IntoBytes, KnownLayout}; +use zerocopy::{big_endian::*, FromBytes, Immutable, IntoBytes, KnownLayout}; -use super::{ - gcn::{read_part_meta, PartitionGC}, - DiscHeader, FileStream, Node, PartitionBase, PartitionMeta, SECTOR_SIZE, -}; use crate::{ - array_ref, - disc::streams::OwnedFileStream, - io::{ - aes_cbc_decrypt, - block::{Block, BlockIO, PartitionInfo}, - HashBytes, KeyBytes, + common::{HashBytes, KeyBytes, PartitionInfo}, + disc::{ + gcn::{read_part_meta, PartitionReaderGC}, + hashes::sha1_hash, + preloader::{Preloader, SectorGroup, SectorGroupRequest}, + SECTOR_GROUP_SIZE, SECTOR_SIZE, }, - static_assert, - util::{div_rem, read::read_box_slice}, - Error, PartitionOptions, Result, ResultContext, + io::block::BlockReader, + read::{PartitionEncryption, PartitionMeta, PartitionOptions, PartitionReader}, + util::{ + aes::aes_cbc_decrypt, + array_ref, div_rem, impl_read_for_bufread, + read::{read_arc, read_arc_slice}, + static_assert, + }, + Error, Result, ResultContext, }; /// Size in bytes of the hashes block in a Wii disc sector -pub(crate) const HASHES_SIZE: usize = 0x400; +pub const HASHES_SIZE: usize = 0x400; /// Size in bytes of the data block in a Wii disc sector (excluding hashes) -pub(crate) const SECTOR_DATA_SIZE: usize = SECTOR_SIZE - HASHES_SIZE; // 0x7C00 +pub const SECTOR_DATA_SIZE: usize = SECTOR_SIZE - HASHES_SIZE; // 0x7C00 -/// Size of the disc region info (region.bin) +/// Size in bytes of the disc region info (region.bin) pub const REGION_SIZE: usize = 0x20; +/// Size in bytes of the H3 table (h3.bin) +pub const H3_TABLE_SIZE: usize = 0x18000; + /// Offset of the disc region info pub const REGION_OFFSET: u64 = 0x4E000; // ppki (Retail) -const RVL_CERT_ISSUER_PPKI_TICKET: &str = "Root-CA00000001-XS00000003"; +pub(crate) const RVL_CERT_ISSUER_PPKI_TICKET: &str = "Root-CA00000001-XS00000003"; #[rustfmt::skip] -const RETAIL_COMMON_KEYS: [KeyBytes; 3] = [ +pub(crate) const RETAIL_COMMON_KEYS: [KeyBytes; 3] = [ /* RVL_KEY_RETAIL */ [0xeb, 0xe4, 0x2a, 0x22, 0x5e, 0x85, 0x93, 0xe4, 0x48, 0xd9, 0xc5, 0x45, 0x73, 0x81, 0xaa, 0xf7], /* RVL_KEY_KOREAN */ @@ -50,9 +57,9 @@ const RETAIL_COMMON_KEYS: [KeyBytes; 3] = [ ]; // dpki (Debug) -const RVL_CERT_ISSUER_DPKI_TICKET: &str = "Root-CA00000002-XS00000006"; +pub(crate) const RVL_CERT_ISSUER_DPKI_TICKET: &str = "Root-CA00000002-XS00000006"; #[rustfmt::skip] -const DEBUG_COMMON_KEYS: [KeyBytes; 3] = [ +pub(crate) const DEBUG_COMMON_KEYS: [KeyBytes; 3] = [ /* RVL_KEY_DEBUG */ [0xa1, 0x60, 0x4a, 0x6a, 0x71, 0x23, 0xb5, 0x29, 0xae, 0x8b, 0xec, 0x32, 0xc8, 0x16, 0xfc, 0xaa], /* RVL_KEY_KOREAN_DEBUG */ @@ -159,7 +166,6 @@ static_assert!(size_of::() == 0x2A4); impl Ticket { /// Decrypts the ticket title key using the appropriate common key - #[allow(clippy::missing_inline_in_public_items)] pub fn decrypt_title_key(&self) -> Result { let mut iv: KeyBytes = [0; 16]; iv[..8].copy_from_slice(&self.title_id); @@ -249,11 +255,11 @@ pub struct ContentMetadata { static_assert!(size_of::() == 0x24); -pub const H3_TABLE_SIZE: usize = 0x18000; - +/// Wii partition header. #[derive(Debug, Clone, PartialEq, FromBytes, IntoBytes, Immutable, KnownLayout)] #[repr(C, align(4))] pub struct WiiPartitionHeader { + /// Ticket pub ticket: Ticket, tmd_size: U32, tmd_off: U32, @@ -267,172 +273,146 @@ pub struct WiiPartitionHeader { static_assert!(size_of::() == 0x2C0); impl WiiPartitionHeader { + /// TMD size in bytes pub fn tmd_size(&self) -> u64 { self.tmd_size.get() as u64 } + /// TMD offset in bytes (relative to the partition start) pub fn tmd_off(&self) -> u64 { (self.tmd_off.get() as u64) << 2 } + /// Certificate chain size in bytes pub fn cert_chain_size(&self) -> u64 { self.cert_chain_size.get() as u64 } + /// Certificate chain offset in bytes (relative to the partition start) pub fn cert_chain_off(&self) -> u64 { (self.cert_chain_off.get() as u64) << 2 } + /// H3 table offset in bytes (relative to the partition start) pub fn h3_table_off(&self) -> u64 { (self.h3_table_off.get() as u64) << 2 } + /// H3 table size in bytes (always H3_TABLE_SIZE) pub fn h3_table_size(&self) -> u64 { H3_TABLE_SIZE as u64 } + /// Data offset in bytes (relative to the partition start) pub fn data_off(&self) -> u64 { (self.data_off.get() as u64) << 2 } + /// Data size in bytes pub fn data_size(&self) -> u64 { (self.data_size.get() as u64) << 2 } } -pub struct PartitionWii { - io: Box, +pub(crate) struct PartitionReaderWii { + io: Box, + preloader: Arc, partition: PartitionInfo, - block: Block, - block_buf: Box<[u8]>, - block_idx: u32, - sector_buf: Box<[u8; SECTOR_SIZE]>, - sector: u32, pos: u64, options: PartitionOptions, - raw_tmd: Option>, - raw_cert_chain: Option>, - raw_h3_table: Option>, + sector_group: Option, + meta: Option, } -impl Clone for PartitionWii { +impl Clone for PartitionReaderWii { fn clone(&self) -> Self { Self { io: self.io.clone(), + preloader: self.preloader.clone(), partition: self.partition.clone(), - block: Block::default(), - block_buf: <[u8]>::new_box_zeroed_with_elems(self.block_buf.len()).unwrap(), - block_idx: u32::MAX, - sector_buf: <[u8; SECTOR_SIZE]>::new_box_zeroed().unwrap(), - sector: u32::MAX, pos: 0, options: self.options.clone(), - raw_tmd: self.raw_tmd.clone(), - raw_cert_chain: self.raw_cert_chain.clone(), - raw_h3_table: self.raw_h3_table.clone(), + sector_group: None, + meta: self.meta.clone(), } } } -impl PartitionWii { +impl PartitionReaderWii { pub fn new( - inner: Box, - disc_header: Box, + io: Box, + preloader: Arc, partition: &PartitionInfo, options: &PartitionOptions, ) -> Result> { - let block_size = inner.block_size(); - let mut reader = PartitionGC::new(inner, disc_header)?; - - // Read TMD, cert chain, and H3 table - let offset = partition.start_sector as u64 * SECTOR_SIZE as u64; - let raw_tmd = if partition.header.tmd_size() != 0 { - reader - .seek(SeekFrom::Start(offset + partition.header.tmd_off())) - .context("Seeking to TMD offset")?; - Some( - read_box_slice::(&mut reader, partition.header.tmd_size() as usize) - .context("Reading TMD")?, - ) - } else { - None - }; - let raw_cert_chain = if partition.header.cert_chain_size() != 0 { - reader - .seek(SeekFrom::Start(offset + partition.header.cert_chain_off())) - .context("Seeking to cert chain offset")?; - Some( - read_box_slice::(&mut reader, partition.header.cert_chain_size() as usize) - .context("Reading cert chain")?, - ) - } else { - None - }; - let raw_h3_table = if partition.has_hashes { - reader - .seek(SeekFrom::Start(offset + partition.header.h3_table_off())) - .context("Seeking to H3 table offset")?; - Some(read_box_slice::(&mut reader, H3_TABLE_SIZE).context("Reading H3 table")?) - } else { - None - }; - - Ok(Box::new(Self { - io: reader.into_inner(), + let mut reader = Self { + io, + preloader, partition: partition.clone(), - block: Block::default(), - block_buf: <[u8]>::new_box_zeroed_with_elems(block_size as usize)?, - block_idx: u32::MAX, - sector_buf: <[u8; SECTOR_SIZE]>::new_box_zeroed()?, - sector: u32::MAX, pos: 0, options: options.clone(), - raw_tmd, - raw_cert_chain, - raw_h3_table, - })) + sector_group: None, + meta: None, + }; + if options.validate_hashes { + // Ensure we cache the H3 table + reader.meta()?; + } + Ok(Box::new(reader)) } + + #[inline] + pub fn len(&self) -> u64 { self.partition.data_size() } } -impl BufRead for PartitionWii { +impl BufRead for PartitionReaderWii { fn fill_buf(&mut self) -> io::Result<&[u8]> { - let part_sector = if self.partition.has_hashes { - (self.pos / SECTOR_DATA_SIZE as u64) as u32 + let (part_sector, sector_offset) = if self.partition.has_hashes { + ( + (self.pos / SECTOR_DATA_SIZE as u64) as u32, + (self.pos % SECTOR_DATA_SIZE as u64) as usize, + ) } else { - (self.pos / SECTOR_SIZE as u64) as u32 + ((self.pos / SECTOR_SIZE as u64) as u32, (self.pos % SECTOR_SIZE as u64) as usize) }; let abs_sector = self.partition.data_start_sector + part_sector; if abs_sector >= self.partition.data_end_sector { return Ok(&[]); } - // Read new block if necessary - let block_idx = - (abs_sector as u64 * SECTOR_SIZE as u64 / self.block_buf.len() as u64) as u32; - if block_idx != self.block_idx { - self.block = self.io.read_block( - self.block_buf.as_mut(), - block_idx, - self.partition.has_encryption.then_some(&self.partition), - )?; - self.block_idx = block_idx; - } + let group_idx = part_sector / 64; + let group_sector = part_sector % 64; - // Decrypt sector if necessary - if abs_sector != self.sector { - if self.partition.has_encryption { - self.block.decrypt( - self.sector_buf.as_mut(), - self.block_buf.as_ref(), - abs_sector, - &self.partition, - )?; + let max_groups = + (self.partition.data_end_sector - self.partition.data_start_sector).div_ceil(64); + let request = SectorGroupRequest { + group_idx, + partition_idx: Some(self.partition.index as u8), + mode: if self.options.validate_hashes { + PartitionEncryption::ForceDecrypted } else { - self.block.copy_raw( - self.sector_buf.as_mut(), - self.block_buf.as_ref(), - abs_sector, - &self.partition.disc_header, - )?; - } + PartitionEncryption::ForceDecryptedNoHashes + }, + }; + let sector_group = if matches!(&self.sector_group, Some(sector_group) if sector_group.request == request) + { + // We can improve this in Rust 2024 with `if_let_rescope` + // https://github.com/rust-lang/rust/issues/124085 + self.sector_group.as_ref().unwrap() + } else { + let sector_group = self.preloader.fetch(request, max_groups)?; if self.options.validate_hashes { - if let Some(h3_table) = self.raw_h3_table.as_deref() { - verify_hashes(self.sector_buf.as_ref(), part_sector, h3_table)?; + if let Some(h3_table) = self.meta.as_ref().and_then(|m| m.raw_h3_table.as_deref()) { + verify_hashes( + array_ref![sector_group.data, 0, SECTOR_GROUP_SIZE], + group_idx, + h3_table, + )?; } } - self.sector = abs_sector; - } + self.sector_group.insert(sector_group) + }; + // Read from sector group buffer + let consecutive_sectors = sector_group.consecutive_sectors(group_sector); + if consecutive_sectors == 0 { + return Ok(&[]); + } + let group_sector_offset = group_sector as usize * SECTOR_SIZE; if self.partition.has_hashes { - let offset = (self.pos % SECTOR_DATA_SIZE as u64) as usize; - Ok(&self.sector_buf[HASHES_SIZE + offset..]) + // Read until end of sector (avoid the next hash block) + let offset = group_sector_offset + HASHES_SIZE + sector_offset; + let end = group_sector_offset + SECTOR_SIZE; + Ok(§or_group.data[offset..end]) } else { - let offset = (self.pos % SECTOR_SIZE as u64) as usize; - Ok(&self.sector_buf[offset..]) + // Read until end of sector group (no hashes) + let offset = group_sector_offset + sector_offset; + let end = (group_sector + consecutive_sectors) as usize * SECTOR_SIZE; + Ok(§or_group.data[offset..end]) } } @@ -440,133 +420,130 @@ impl BufRead for PartitionWii { fn consume(&mut self, amt: usize) { self.pos += amt as u64; } } -impl Read for PartitionWii { - #[inline] - fn read(&mut self, out: &mut [u8]) -> io::Result { - let buf = self.fill_buf()?; - let len = buf.len().min(out.len()); - out[..len].copy_from_slice(&buf[..len]); - self.consume(len); - Ok(len) - } -} +impl_read_for_bufread!(PartitionReaderWii); -impl Seek for PartitionWii { +impl Seek for PartitionReaderWii { fn seek(&mut self, pos: SeekFrom) -> io::Result { self.pos = match pos { SeekFrom::Start(v) => v, - SeekFrom::End(_) => { - return Err(io::Error::new( - io::ErrorKind::Unsupported, - "WiiPartitionReader: SeekFrom::End is not supported".to_string(), - )); - } + SeekFrom::End(v) => self.len().saturating_add_signed(v), SeekFrom::Current(v) => self.pos.saturating_add_signed(v), }; Ok(self.pos) } + + fn stream_position(&mut self) -> io::Result { Ok(self.pos) } } -#[inline(always)] -pub(crate) fn as_digest(slice: &[u8; 20]) -> digest::Output { (*slice).into() } +fn verify_hashes(buf: &[u8; SECTOR_GROUP_SIZE], group_idx: u32, h3_table: &[u8]) -> io::Result<()> { + for sector in 0..64 { + let buf = array_ref![buf, sector * SECTOR_SIZE, SECTOR_SIZE]; + let part_sector = group_idx * 64 + sector as u32; + let (cluster, sector) = div_rem(part_sector as usize, 8); + let (group, sub_group) = div_rem(cluster, 8); -fn verify_hashes(buf: &[u8; SECTOR_SIZE], part_sector: u32, h3_table: &[u8]) -> io::Result<()> { - let (cluster, sector) = div_rem(part_sector as usize, 8); - let (group, sub_group) = div_rem(cluster, 8); - - // H0 hashes - for i in 0..31 { - let mut hash = Sha1::new(); - hash.update(array_ref![buf, (i + 1) * 0x400, 0x400]); - let expected = as_digest(array_ref![buf, i * 20, 20]); - let output = hash.finalize(); - if output != expected { - return Err(io::Error::new( - io::ErrorKind::InvalidData, - format!("Invalid H0 hash! (block {:?}) {:x}\n\texpected {:x}", i, output, expected), - )); + // H0 hashes + for i in 0..31 { + let expected = array_ref![buf, i * 20, 20]; + let output = sha1_hash(array_ref![buf, (i + 1) * 0x400, 0x400]); + if output != *expected { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + format!("Invalid H0 hash! (block {i})"), + )); + } } - } - // H1 hash - { - let mut hash = Sha1::new(); - hash.update(array_ref![buf, 0, 0x26C]); - let expected = as_digest(array_ref![buf, 0x280 + sector * 20, 20]); - let output = hash.finalize(); - if output != expected { - return Err(io::Error::new( - io::ErrorKind::InvalidData, - format!( - "Invalid H1 hash! (subgroup {:?}) {:x}\n\texpected {:x}", - sector, output, expected - ), - )); + // H1 hash + { + let expected = array_ref![buf, 0x280 + sector * 20, 20]; + let output = sha1_hash(array_ref![buf, 0, 0x26C]); + if output != *expected { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + format!("Invalid H1 hash! (sector {sector})",), + )); + } } - } - // H2 hash - { - let mut hash = Sha1::new(); - hash.update(array_ref![buf, 0x280, 0xA0]); - let expected = as_digest(array_ref![buf, 0x340 + sub_group * 20, 20]); - let output = hash.finalize(); - if output != expected { - return Err(io::Error::new( - io::ErrorKind::InvalidData, - format!( - "Invalid H2 hash! (group {:?}) {:x}\n\texpected {:x}", - sub_group, output, expected - ), - )); + // H2 hash + { + let expected = array_ref![buf, 0x340 + sub_group * 20, 20]; + let output = sha1_hash(array_ref![buf, 0x280, 0xA0]); + if output != *expected { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + format!("Invalid H2 hash! (subgroup {sub_group})"), + )); + } } - } - // H3 hash - { - let mut hash = Sha1::new(); - hash.update(array_ref![buf, 0x340, 0xA0]); - let expected = as_digest(array_ref![h3_table, group * 20, 20]); - let output = hash.finalize(); - if output != expected { - return Err(io::Error::new( - io::ErrorKind::InvalidData, - format!("Invalid H3 hash! {:x}\n\texpected {:x}", output, expected), - )); + // H3 hash + { + let expected = array_ref![h3_table, group * 20, 20]; + let output = sha1_hash(array_ref![buf, 0x340, 0xA0]); + if output != *expected { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + format!("Invalid H3 hash! (group {group})"), + )); + } } } Ok(()) } -impl PartitionBase for PartitionWii { - fn meta(&mut self) -> Result> { +impl PartitionReader for PartitionReaderWii { + fn is_wii(&self) -> bool { true } + + fn meta(&mut self) -> Result { + if let Some(meta) = &self.meta { + return Ok(meta.clone()); + } self.seek(SeekFrom::Start(0)).context("Seeking to partition header")?; let mut meta = read_part_meta(self, true)?; - meta.raw_ticket = Some(Box::from(self.partition.header.ticket.as_bytes())); - meta.raw_tmd = self.raw_tmd.clone(); - meta.raw_cert_chain = self.raw_cert_chain.clone(); - meta.raw_h3_table = self.raw_h3_table.clone(); + meta.raw_ticket = Some(Arc::from(self.partition.header.ticket.as_bytes())); + + // Read TMD, cert chain, and H3 table + let mut reader = PartitionReaderGC::new(self.io.clone(), self.preloader.clone(), u64::MAX)?; + let offset = self.partition.start_sector as u64 * SECTOR_SIZE as u64; + meta.raw_tmd = if self.partition.header.tmd_size() != 0 { + reader + .seek(SeekFrom::Start(offset + self.partition.header.tmd_off())) + .context("Seeking to TMD offset")?; + Some( + read_arc_slice::(&mut reader, self.partition.header.tmd_size() as usize) + .context("Reading TMD")?, + ) + } else { + None + }; + meta.raw_cert_chain = if self.partition.header.cert_chain_size() != 0 { + reader + .seek(SeekFrom::Start(offset + self.partition.header.cert_chain_off())) + .context("Seeking to cert chain offset")?; + Some( + read_arc_slice::( + &mut reader, + self.partition.header.cert_chain_size() as usize, + ) + .context("Reading cert chain")?, + ) + } else { + None + }; + meta.raw_h3_table = if self.partition.has_hashes { + reader + .seek(SeekFrom::Start(offset + self.partition.header.h3_table_off())) + .context("Seeking to H3 table offset")?; + + Some(read_arc::<[u8; H3_TABLE_SIZE], _>(&mut reader).context("Reading H3 table")?) + } else { + None + }; + + self.meta = Some(meta.clone()); Ok(meta) } - - fn open_file(&mut self, node: Node) -> io::Result { - if !node.is_file() { - return Err(io::Error::new( - io::ErrorKind::InvalidInput, - "Node is not a file".to_string(), - )); - } - FileStream::new(self, node.offset(true), node.length()) - } - - fn into_open_file(self: Box, node: Node) -> io::Result { - if !node.is_file() { - return Err(io::Error::new( - io::ErrorKind::InvalidInput, - "Node is not a file".to_string(), - )); - } - OwnedFileStream::new(self, node.offset(true), node.length()) - } } diff --git a/nod/src/disc/writer.rs b/nod/src/disc/writer.rs new file mode 100644 index 0000000..3c86a28 --- /dev/null +++ b/nod/src/disc/writer.rs @@ -0,0 +1,273 @@ +use std::{ + io, + io::{BufRead, Read}, +}; + +use bytes::{Bytes, BytesMut}; +use dyn_clone::DynClone; +use rayon::prelude::*; + +use crate::{ + common::PartitionInfo, + disc::{ + reader::DiscReader, + wii::{HASHES_SIZE, SECTOR_DATA_SIZE}, + SECTOR_SIZE, + }, + util::{aes::decrypt_sector_b2b, array_ref, array_ref_mut, lfg::LaggedFibonacci}, + write::{DiscFinalization, DiscWriterWeight, ProcessOptions}, + Error, Result, ResultContext, +}; + +/// A callback for writing disc data. +/// +/// The callback should write all data to the output stream before returning, or return an error if +/// writing fails. The second and third arguments are the current bytes processed and the total +/// bytes to process, respectively. For most formats, this has no relation to the written disc size, +/// but can be used to display progress. +pub type DataCallback<'a> = dyn FnMut(Bytes, u64, u64) -> io::Result<()> + Send + 'a; + +/// A trait for writing disc images. +pub trait DiscWriter: DynClone { + /// Processes the disc writer to completion. + /// + /// The data callback will be called, in order, for each block of data to write to the output + /// file. The callback should write all data before returning, or return an error if writing + /// fails. + fn process( + &self, + data_callback: &mut DataCallback, + options: &ProcessOptions, + ) -> Result; + + /// Returns the progress upper bound for the disc writer. + /// + /// For most formats, this has no relation to the written disc size, but can be used to display + /// progress. + fn progress_bound(&self) -> u64; + + /// Returns the weight of the disc writer. + /// + /// This can help determine the number of threads to dedicate for output processing, and may + /// differ based on the format's configuration, such as whether compression is enabled. + fn weight(&self) -> DiscWriterWeight; +} + +dyn_clone::clone_trait_object!(DiscWriter); + +#[derive(Default)] +pub struct BlockResult { + /// Input block index + pub block_idx: u32, + /// Input disc data (before processing) + pub disc_data: Bytes, + /// Output block data (after processing). If None, the disc data is used. + pub block_data: Bytes, + /// Output metadata + pub meta: T, +} + +pub trait BlockProcessor: Clone + Send + Sync { + type BlockMeta; + + fn process_block(&mut self, block_idx: u32) -> io::Result>; +} + +pub fn read_block(reader: &mut DiscReader, block_size: usize) -> io::Result<(Bytes, Bytes)> { + let initial_block = reader.fill_buf_internal()?; + if initial_block.len() >= block_size { + // Happy path: we have a full block that we can cheaply slice + let data = initial_block.slice(0..block_size); + reader.consume(block_size); + return Ok((data.clone(), data)); + } else if initial_block.is_empty() { + return Err(io::Error::from(io::ErrorKind::UnexpectedEof)); + } + reader.consume(initial_block.len()); + + // Combine smaller blocks into a new buffer + let mut buf = BytesMut::zeroed(block_size); + let mut len = initial_block.len(); + buf[..len].copy_from_slice(initial_block.as_ref()); + drop(initial_block); + while len < block_size { + let read = reader.read(&mut buf[len..])?; + if read == 0 { + break; + } + len += read; + } + // The block data is full size, padded with zeroes + let block_data = buf.freeze(); + // The disc data is the actual data read, without padding + let disc_data = block_data.slice(0..len); + Ok((block_data, disc_data)) +} + +/// Process blocks in parallel, ensuring that they are written in order. +pub(crate) fn par_process( + create_processor: impl Fn() -> P + Sync, + block_count: u32, + num_threads: usize, + mut callback: impl FnMut(BlockResult) -> Result<()> + Send, +) -> Result<()> +where + T: Send, + P: BlockProcessor, +{ + if num_threads == 0 { + // Fall back to single-threaded processing + let mut processor = create_processor(); + for block_idx in 0..block_count { + let block = processor + .process_block(block_idx) + .with_context(|| format!("Failed to process block {block_idx}"))?; + callback(block)?; + } + return Ok(()); + } + + let (block_tx, block_rx) = crossbeam_channel::bounded(block_count as usize); + for block_idx in 0..block_count { + block_tx.send(block_idx).unwrap(); + } + drop(block_tx); // Disconnect channel + + let (result_tx, result_rx) = crossbeam_channel::bounded(0); + let mut process_error = None; + let mut write_error = None; + rayon::join( + || { + if let Err(e) = (0..num_threads).into_par_iter().try_for_each_init( + || (block_rx.clone(), result_tx.clone(), create_processor()), + |(receiver, block_tx, processor), _| { + while let Ok(block_idx) = receiver.recv() { + let block = processor + .process_block(block_idx) + .with_context(|| format!("Failed to process block {block_idx}"))?; + if block_tx.send(block).is_err() { + break; + } + } + Ok::<_, Error>(()) + }, + ) { + process_error = Some(e); + } + drop(result_tx); // Disconnect channel + }, + || { + let mut current_block = 0; + let mut out_of_order = Vec::>::new(); + 'outer: while let Ok(result) = result_rx.recv() { + if result.block_idx == current_block { + if let Err(e) = callback(result) { + write_error = Some(e); + break; + } + current_block += 1; + // Check if any out of order blocks can be written + while out_of_order.first().is_some_and(|r| r.block_idx == current_block) { + let result = out_of_order.remove(0); + if let Err(e) = callback(result) { + write_error = Some(e); + break 'outer; + } + current_block += 1; + } + } else { + out_of_order.push(result); + out_of_order.sort_unstable_by_key(|r| r.block_idx); + } + } + }, + ); + if let Some(e) = process_error { + return Err(e); + } + if let Some(e) = write_error { + return Err(e); + } + + Ok(()) +} + +/// The determined block type. +pub enum CheckBlockResult { + Normal, + Zeroed, + Junk, +} + +/// Check if a block is zeroed or junk data. +pub(crate) fn check_block( + buf: &[u8], + decrypted_block: &mut [u8], + input_position: u64, + partition_info: &[PartitionInfo], + lfg: &mut LaggedFibonacci, + disc_id: [u8; 4], + disc_num: u8, +) -> io::Result { + let start_sector = (input_position / SECTOR_SIZE as u64) as u32; + let end_sector = ((input_position + buf.len() as u64) / SECTOR_SIZE as u64) as u32; + if let Some(partition) = partition_info.iter().find(|p| { + p.has_hashes && start_sector >= p.data_start_sector && end_sector < p.data_end_sector + }) { + if input_position % SECTOR_SIZE as u64 != 0 { + return Err(io::Error::new( + io::ErrorKind::Other, + "Partition block not aligned to sector boundary", + )); + } + if buf.len() % SECTOR_SIZE != 0 { + return Err(io::Error::new( + io::ErrorKind::Other, + "Partition block not a multiple of sector size", + )); + } + let block = if partition.has_encryption { + if decrypted_block.len() < buf.len() { + return Err(io::Error::new( + io::ErrorKind::Other, + "Decrypted block buffer too small", + )); + } + for i in 0..buf.len() / SECTOR_SIZE { + decrypt_sector_b2b( + array_ref![buf, SECTOR_SIZE * i, SECTOR_SIZE], + array_ref_mut![decrypted_block, SECTOR_SIZE * i, SECTOR_SIZE], + &partition.key, + ); + } + &decrypted_block[..buf.len()] + } else { + buf + }; + if sector_data_iter(block).all(|sector_data| sector_data.iter().all(|&b| b == 0)) { + return Ok(CheckBlockResult::Zeroed); + } + let partition_start = partition.data_start_sector as u64 * SECTOR_SIZE as u64; + let partition_offset = + ((input_position - partition_start) / SECTOR_SIZE as u64) * SECTOR_DATA_SIZE as u64; + if sector_data_iter(block).enumerate().all(|(i, sector_data)| { + let sector_offset = partition_offset + i as u64 * SECTOR_DATA_SIZE as u64; + lfg.check_sector_chunked(sector_data, disc_id, disc_num, sector_offset) + }) { + return Ok(CheckBlockResult::Junk); + } + } else { + if buf.iter().all(|&b| b == 0) { + return Ok(CheckBlockResult::Zeroed); + } + if lfg.check_sector_chunked(buf, disc_id, disc_num, input_position) { + return Ok(CheckBlockResult::Junk); + } + } + Ok(CheckBlockResult::Normal) +} + +#[inline] +fn sector_data_iter(buf: &[u8]) -> impl Iterator { + buf.chunks_exact(SECTOR_SIZE).map(|chunk| (&chunk[HASHES_SIZE..]).try_into().unwrap()) +} diff --git a/nod/src/io/block.rs b/nod/src/io/block.rs index a56e699..336cc06 100644 --- a/nod/src/io/block.rs +++ b/nod/src/io/block.rs @@ -1,106 +1,45 @@ -use std::{ - fs, io, - io::{Read, Seek}, - path::Path, -}; +use std::{fs, io, io::Read, path::Path}; use dyn_clone::DynClone; -use zerocopy::transmute_ref; use crate::{ - array_ref, + common::{Format, KeyBytes, MagicBytes, PartitionInfo}, disc::{ - hashes::HashTable, - wii::{WiiPartitionHeader, HASHES_SIZE, SECTOR_DATA_SIZE}, - DiscHeader, PartitionHeader, PartitionKind, GCN_MAGIC, SECTOR_SIZE, WII_MAGIC, + wii::{HASHES_SIZE, SECTOR_DATA_SIZE}, + DiscHeader, GCN_MAGIC, SECTOR_SIZE, WII_MAGIC, }, io::{ - aes_cbc_decrypt, aes_cbc_encrypt, split::SplitFileReader, DiscMeta, Format, KeyBytes, - MagicBytes, + split::SplitFileReader, + wia::{WIAException, WIAExceptionList}, }, - util::{lfg::LaggedFibonacci, read::read_from}, + read::{DiscMeta, DiscStream}, + util::{aes::decrypt_sector, array_ref, array_ref_mut, lfg::LaggedFibonacci, read::read_from}, Error, Result, ResultContext, }; -/// Required trait bounds for reading disc images. -pub trait DiscStream: Read + Seek + DynClone + Send + Sync {} - -impl DiscStream for T where T: Read + Seek + DynClone + Send + Sync + ?Sized {} - -dyn_clone::clone_trait_object!(DiscStream); - -/// Block I/O trait for reading disc images. -pub trait BlockIO: DynClone + Send + Sync { - /// Reads a block from the disc image. - fn read_block_internal( - &mut self, - out: &mut [u8], - block: u32, - partition: Option<&PartitionInfo>, - ) -> io::Result; - - /// Reads a full block from the disc image, combining smaller blocks if necessary. - fn read_block( - &mut self, - out: &mut [u8], - block: u32, - partition: Option<&PartitionInfo>, - ) -> io::Result { - let block_size_internal = self.block_size_internal(); - let block_size = self.block_size(); - if block_size_internal == block_size { - self.read_block_internal(out, block, partition) - } else { - let mut offset = 0usize; - let mut result = None; - let mut block_idx = - ((block as u64 * block_size as u64) / block_size_internal as u64) as u32; - while offset < block_size as usize { - let block = self.read_block_internal( - &mut out[offset..offset + block_size_internal as usize], - block_idx, - partition, - )?; - if result.is_none() { - result = Some(block); - } else if result != Some(block) { - if block == Block::Zero { - out[offset..offset + block_size_internal as usize].fill(0); - } else { - return Err(io::Error::new( - io::ErrorKind::InvalidData, - "Inconsistent block types in split block", - )); - } - } - offset += block_size_internal as usize; - block_idx += 1; - } - Ok(result.unwrap_or_default()) - } - } - - /// The format's block size in bytes. Can be smaller than the sector size (0x8000). - fn block_size_internal(&self) -> u32; +/// Block reader trait for reading disc images. +pub trait BlockReader: DynClone + Send + Sync { + /// Reads a block from the disc image containing the specified sector. + fn read_block(&mut self, out: &mut [u8], sector: u32) -> io::Result; /// The block size used for processing. Must be a multiple of the sector size (0x8000). - fn block_size(&self) -> u32 { self.block_size_internal().max(SECTOR_SIZE as u32) } + fn block_size(&self) -> u32; /// Returns extra metadata included in the disc file format, if any. fn meta(&self) -> DiscMeta; } -dyn_clone::clone_trait_object!(BlockIO); +dyn_clone::clone_trait_object!(BlockReader); -/// Creates a new [`BlockIO`] instance from a stream. -pub fn new(mut stream: Box) -> Result> { - let io: Box = match detect(stream.as_mut()).context("Detecting file type")? { - Some(Format::Iso) => crate::io::iso::DiscIOISO::new(stream)?, - Some(Format::Ciso) => crate::io::ciso::DiscIOCISO::new(stream)?, +/// Creates a new [`BlockReader`] instance from a stream. +pub fn new(mut stream: Box) -> Result> { + let io: Box = match detect(stream.as_mut()).context("Detecting file type")? { + Some(Format::Iso) => crate::io::iso::BlockReaderISO::new(stream)?, + Some(Format::Ciso) => crate::io::ciso::BlockReaderCISO::new(stream)?, Some(Format::Gcz) => { #[cfg(feature = "compress-zlib")] { - crate::io::gcz::DiscIOGCZ::new(stream)? + crate::io::gcz::BlockReaderGCZ::new(stream)? } #[cfg(not(feature = "compress-zlib"))] return Err(Error::DiscFormat("GCZ support is disabled".to_string())); @@ -108,17 +47,17 @@ pub fn new(mut stream: Box) -> Result> { Some(Format::Nfs) => { return Err(Error::DiscFormat("NFS requires a filesystem path".to_string())) } - Some(Format::Wbfs) => crate::io::wbfs::DiscIOWBFS::new(stream)?, - Some(Format::Wia | Format::Rvz) => crate::io::wia::DiscIOWIA::new(stream)?, - Some(Format::Tgc) => crate::io::tgc::DiscIOTGC::new(stream)?, + Some(Format::Wbfs) => crate::io::wbfs::BlockReaderWBFS::new(stream)?, + Some(Format::Wia | Format::Rvz) => crate::io::wia::BlockReaderWIA::new(stream)?, + Some(Format::Tgc) => crate::io::tgc::BlockReaderTGC::new(stream)?, None => return Err(Error::DiscFormat("Unknown disc format".to_string())), }; check_block_size(io.as_ref())?; Ok(io) } -/// Creates a new [`BlockIO`] instance from a filesystem path. -pub fn open(filename: &Path) -> Result> { +/// Creates a new [`BlockReader`] instance from a filesystem path. +pub fn open(filename: &Path) -> Result> { let path_result = fs::canonicalize(filename); if let Err(err) = path_result { return Err(Error::Io(format!("Failed to open {}", filename.display()), err)); @@ -132,28 +71,28 @@ pub fn open(filename: &Path) -> Result> { return Err(Error::DiscFormat(format!("Input is not a file: {}", filename.display()))); } let mut stream = Box::new(SplitFileReader::new(filename)?); - let io: Box = match detect(stream.as_mut()).context("Detecting file type")? { - Some(Format::Iso) => crate::io::iso::DiscIOISO::new(stream)?, - Some(Format::Ciso) => crate::io::ciso::DiscIOCISO::new(stream)?, + let io: Box = match detect(stream.as_mut()).context("Detecting file type")? { + Some(Format::Iso) => crate::io::iso::BlockReaderISO::new(stream)?, + Some(Format::Ciso) => crate::io::ciso::BlockReaderCISO::new(stream)?, Some(Format::Gcz) => { #[cfg(feature = "compress-zlib")] { - crate::io::gcz::DiscIOGCZ::new(stream)? + crate::io::gcz::BlockReaderGCZ::new(stream)? } #[cfg(not(feature = "compress-zlib"))] return Err(Error::DiscFormat("GCZ support is disabled".to_string())); } Some(Format::Nfs) => match path.parent() { Some(parent) if parent.is_dir() => { - crate::io::nfs::DiscIONFS::new(path.parent().unwrap())? + crate::io::nfs::BlockReaderNFS::new(path.parent().unwrap())? } _ => { return Err(Error::DiscFormat("Failed to locate NFS parent directory".to_string())); } }, - Some(Format::Tgc) => crate::io::tgc::DiscIOTGC::new(stream)?, - Some(Format::Wbfs) => crate::io::wbfs::DiscIOWBFS::new(stream)?, - Some(Format::Wia | Format::Rvz) => crate::io::wia::DiscIOWIA::new(stream)?, + Some(Format::Tgc) => crate::io::tgc::BlockReaderTGC::new(stream)?, + Some(Format::Wbfs) => crate::io::wbfs::BlockReaderWBFS::new(stream)?, + Some(Format::Wia | Format::Rvz) => crate::io::wia::BlockReaderWIA::new(stream)?, None => return Err(Error::DiscFormat("Unknown disc format".to_string())), }; check_block_size(io.as_ref())?; @@ -163,7 +102,7 @@ pub fn open(filename: &Path) -> Result> { pub const CISO_MAGIC: MagicBytes = *b"CISO"; pub const GCZ_MAGIC: MagicBytes = [0x01, 0xC0, 0x0B, 0xB1]; pub const NFS_MAGIC: MagicBytes = *b"EGGS"; -pub const TGC_MAGIC: MagicBytes = [0xae, 0x0f, 0x38, 0xa2]; +pub const TGC_MAGIC: MagicBytes = [0xAE, 0x0F, 0x38, 0xA2]; pub const WBFS_MAGIC: MagicBytes = *b"WBFS"; pub const WIA_MAGIC: MagicBytes = *b"WIA\x01"; pub const RVZ_MAGIC: MagicBytes = *b"RVZ\x01"; @@ -190,16 +129,7 @@ pub fn detect(stream: &mut R) -> io::Result> { Ok(out) } -fn check_block_size(io: &dyn BlockIO) -> Result<()> { - if io.block_size_internal() < SECTOR_SIZE as u32 - && SECTOR_SIZE as u32 % io.block_size_internal() != 0 - { - return Err(Error::DiscFormat(format!( - "Sector size {} is not divisible by block size {}", - SECTOR_SIZE, - io.block_size_internal(), - ))); - } +fn check_block_size(io: &dyn BlockReader) -> Result<()> { if io.block_size() % SECTOR_SIZE as u32 != 0 { return Err(Error::DiscFormat(format!( "Block size {} is not a multiple of sector size {}", @@ -210,182 +140,263 @@ fn check_block_size(io: &dyn BlockIO) -> Result<()> { Ok(()) } -/// Wii partition information. -#[derive(Debug, Clone)] -pub struct PartitionInfo { - /// The partition index. - pub index: usize, - /// The kind of disc partition. - pub kind: PartitionKind, - /// The start sector of the partition. - pub start_sector: u32, - /// The start sector of the partition's data. - pub data_start_sector: u32, - /// The end sector of the partition's data. - pub data_end_sector: u32, - /// The AES key for the partition, also known as the "title key". - pub key: KeyBytes, - /// The Wii partition header. - pub header: Box, - /// The disc header within the partition. - pub disc_header: Box, - /// The partition header within the partition. - pub partition_header: Box, - /// The hash table for the partition, if rebuilt. - pub hash_table: Option, - /// Whether the partition data is encrypted - pub has_encryption: bool, - /// Whether the partition data hashes are present - pub has_hashes: bool, -} - -/// The block kind returned by [`BlockIO::read_block`]. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] -pub enum Block { - /// Raw data or encrypted Wii partition data - Raw, - /// Encrypted Wii partition data - PartEncrypted, - /// Decrypted Wii partition data - PartDecrypted { - /// Whether the sector has its hash block intact - has_hashes: bool, - }, - /// Wii partition junk data - Junk, - /// All zeroes - #[default] - Zero, +/// A block of sectors within a disc image. +#[derive(Debug, Clone, Default)] +pub struct Block { + /// The starting sector of the block. + pub sector: u32, + /// The number of sectors in the block. + pub count: u32, + /// The block kind. + pub kind: BlockKind, + /// Any hash exceptions for the block. + pub hash_exceptions: Box<[WIAExceptionList]>, + /// The duration of I/O operations, if available. + pub io_duration: Option, } impl Block { - /// Decrypts the block's data (if necessary) and writes it to the output buffer. - pub(crate) fn decrypt( - self, - out: &mut [u8; SECTOR_SIZE], - data: &[u8], - abs_sector: u32, - partition: &PartitionInfo, - ) -> io::Result<()> { - let part_sector = abs_sector - partition.data_start_sector; - match self { - Block::Raw => { - out.copy_from_slice(block_sector::(data, abs_sector)?); - } - Block::PartEncrypted => { - out.copy_from_slice(block_sector::(data, abs_sector)?); - decrypt_sector(out, partition); - } - Block::PartDecrypted { has_hashes } => { - out.copy_from_slice(block_sector::(data, abs_sector)?); - if !has_hashes { - rebuild_hash_block(out, part_sector, partition); - } - } - Block::Junk => { - generate_junk(out, part_sector, Some(partition), &partition.disc_header); - rebuild_hash_block(out, part_sector, partition); - } - Block::Zero => { - out.fill(0); - rebuild_hash_block(out, part_sector, partition); - } + /// Creates a new block from a block of sectors. + #[inline] + pub fn new(block_idx: u32, block_size: u32, kind: BlockKind) -> Self { + let sectors_per_block = block_size / SECTOR_SIZE as u32; + Self { + sector: block_idx * sectors_per_block, + count: sectors_per_block, + kind, + hash_exceptions: Default::default(), + io_duration: None, + } + } + + /// Creates a new block from a single sector. + #[inline] + pub fn sector(sector: u32, kind: BlockKind) -> Self { + Self { sector, count: 1, kind, hash_exceptions: Default::default(), io_duration: None } + } + + /// Creates a new block from a range of sectors. + #[inline] + pub fn sectors(sector: u32, count: u32, kind: BlockKind) -> Self { + Self { sector, count, kind, hash_exceptions: Default::default(), io_duration: None } + } + + /// Returns whether the block contains the specified sector. + #[inline] + pub fn contains(&self, sector: u32) -> bool { + sector >= self.sector && sector < self.sector + self.count + } + + /// Returns an error if the block does not contain the specified sector. + pub fn ensure_contains(&self, sector: u32) -> io::Result<()> { + if !self.contains(sector) { + return Err(io::Error::new( + io::ErrorKind::Other, + format!( + "Sector {} not in block range {}-{}", + sector, + self.sector, + self.sector + self.count + ), + )); } Ok(()) } - /// Encrypts the block's data (if necessary) and writes it to the output buffer. - pub(crate) fn encrypt( - self, - out: &mut [u8; SECTOR_SIZE], - data: &[u8], - abs_sector: u32, - partition: &PartitionInfo, - ) -> io::Result<()> { - let part_sector = abs_sector - partition.data_start_sector; - match self { - Block::Raw => { - out.copy_from_slice(block_sector::(data, abs_sector)?); - encrypt_sector(out, partition); - } - Block::PartEncrypted => { - out.copy_from_slice(block_sector::(data, abs_sector)?); - } - Block::PartDecrypted { has_hashes } => { - out.copy_from_slice(block_sector::(data, abs_sector)?); - if !has_hashes { - rebuild_hash_block(out, part_sector, partition); + /// Decrypts block data in-place. The decrypted data can be accessed using + /// [`partition_data`](Block::partition_data). + pub(crate) fn decrypt_block(&self, data: &mut [u8], key: Option) -> io::Result<()> { + match self.kind { + BlockKind::None => {} + BlockKind::Raw => { + if let Some(key) = key { + for i in 0..self.count as usize { + decrypt_sector(array_ref_mut![data, i * SECTOR_SIZE, SECTOR_SIZE], &key); + } } - encrypt_sector(out, partition); } - Block::Junk => { - generate_junk(out, part_sector, Some(partition), &partition.disc_header); - rebuild_hash_block(out, part_sector, partition); - encrypt_sector(out, partition); + BlockKind::PartDecrypted { .. } => { + // no-op } - Block::Zero => { - out.fill(0); - rebuild_hash_block(out, part_sector, partition); - encrypt_sector(out, partition); + BlockKind::Junk => { + // unsupported, used for DirectDiscReader + data.fill(0); } + BlockKind::Zero => data.fill(0), } Ok(()) } - /// Copies the block's raw data to the output buffer. - pub(crate) fn copy_raw( - self, + /// Copies a sector's raw data to the output buffer. Returns whether the sector is encrypted + /// and whether it has hashes. + pub(crate) fn copy_sector( + &self, out: &mut [u8; SECTOR_SIZE], data: &[u8], abs_sector: u32, disc_header: &DiscHeader, - ) -> io::Result<()> { - match self { - Block::Raw | Block::PartEncrypted | Block::PartDecrypted { .. } => { - out.copy_from_slice(block_sector::(data, abs_sector)?); + partition: Option<&PartitionInfo>, + ) -> io::Result<(bool, bool)> { + let mut encrypted = false; + let mut has_hashes = false; + match self.kind { + BlockKind::None => {} + BlockKind::Raw => { + *out = *self.sector_buf(data, abs_sector)?; + if partition.is_some_and(|p| p.has_encryption) { + encrypted = true; + } + if partition.is_some_and(|p| p.has_hashes) { + has_hashes = true; + } } - Block::Junk => generate_junk(out, abs_sector, None, disc_header), - Block::Zero => out.fill(0), + BlockKind::PartDecrypted { hash_block } => { + if hash_block { + *out = *self.sector_buf(data, abs_sector)?; + has_hashes = partition.is_some_and(|p| p.has_hashes); + } else { + *array_ref_mut![out, HASHES_SIZE, SECTOR_DATA_SIZE] = + *self.sector_data_buf(data, abs_sector)?; + } + } + BlockKind::Junk => generate_junk_sector(out, abs_sector, partition, disc_header), + BlockKind::Zero => out.fill(0), } + Ok((encrypted, has_hashes)) + } + + /// Returns a sector's data from the block buffer. + pub(crate) fn sector_buf<'a>( + &self, + data: &'a [u8], + abs_sector: u32, + ) -> io::Result<&'a [u8; SECTOR_SIZE]> { + self.ensure_contains(abs_sector)?; + let block_offset = ((abs_sector - self.sector) * SECTOR_SIZE as u32) as usize; + Ok(array_ref!(data, block_offset, SECTOR_SIZE)) + } + + /// Returns a sector's partition data (excluding hashes) from the block buffer. + pub(crate) fn sector_data_buf<'a>( + &self, + data: &'a [u8], + abs_sector: u32, + ) -> io::Result<&'a [u8; SECTOR_DATA_SIZE]> { + self.ensure_contains(abs_sector)?; + let block_offset = ((abs_sector - self.sector) * SECTOR_DATA_SIZE as u32) as usize; + Ok(array_ref!(data, block_offset, SECTOR_DATA_SIZE)) + } + + /// Returns raw data from the block buffer, starting at the specified position. + pub(crate) fn data<'a>(&self, data: &'a [u8], pos: u64) -> io::Result<&'a [u8]> { + if self.kind == BlockKind::None { + return Ok(&[]); + } + self.ensure_contains((pos / SECTOR_SIZE as u64) as u32)?; + let offset = (pos - self.sector as u64 * SECTOR_SIZE as u64) as usize; + let end = self.count as usize * SECTOR_SIZE; + Ok(&data[offset..end]) + } + + /// Returns partition data (excluding hashes) from the block buffer, starting at the specified + /// position within the partition. + /// + /// If the block does not contain hashes, this will return the full block data. Otherwise, this + /// will return only the corresponding sector's data, ending at the sector boundary, to avoid + /// reading into the next sector's hash block. + pub(crate) fn partition_data<'a>( + &self, + data: &'a [u8], + pos: u64, + data_start_sector: u32, + partition_has_hashes: bool, + ) -> io::Result<&'a [u8]> { + let block_has_hashes = match self.kind { + BlockKind::Raw => partition_has_hashes, + BlockKind::PartDecrypted { hash_block, .. } => hash_block && partition_has_hashes, + BlockKind::Junk | BlockKind::Zero => false, + BlockKind::None => return Ok(&[]), + }; + let (part_sector, sector_offset) = if partition_has_hashes { + ((pos / SECTOR_DATA_SIZE as u64) as u32, (pos % SECTOR_DATA_SIZE as u64) as usize) + } else { + ((pos / SECTOR_SIZE as u64) as u32, (pos % SECTOR_SIZE as u64) as usize) + }; + let abs_sector = part_sector + data_start_sector; + self.ensure_contains(abs_sector)?; + let block_sector = (abs_sector - self.sector) as usize; + if block_has_hashes { + let offset = block_sector * SECTOR_SIZE + HASHES_SIZE + sector_offset; + let end = (block_sector + 1) * SECTOR_SIZE; // end of sector + Ok(&data[offset..end]) + } else if partition_has_hashes { + let offset = block_sector * SECTOR_DATA_SIZE + sector_offset; + let end = self.count as usize * SECTOR_DATA_SIZE; // end of block + Ok(&data[offset..end]) + } else { + let offset = block_sector * SECTOR_SIZE + sector_offset; + let end = self.count as usize * SECTOR_SIZE; // end of block + Ok(&data[offset..end]) + } + } + + pub(crate) fn append_hash_exceptions( + &self, + abs_sector: u32, + group_sector: u32, + out: &mut Vec, + ) -> io::Result<()> { + self.ensure_contains(abs_sector)?; + let block_sector = abs_sector - self.sector; + let group = (block_sector / 64) as usize; + let base_offset = ((block_sector % 64) as usize * HASHES_SIZE) as u16; + let new_base_offset = (group_sector * HASHES_SIZE as u32) as u16; + out.extend(self.hash_exceptions.get(group).iter().flat_map(|list| { + list.iter().filter_map(|exception| { + let offset = exception.offset.get(); + if offset >= base_offset && offset < base_offset + HASHES_SIZE as u16 { + let new_offset = (offset - base_offset) + new_base_offset; + Some(WIAException { offset: new_offset.into(), hash: exception.hash }) + } else { + None + } + }) + })); Ok(()) } } -#[inline(always)] -fn block_sector(data: &[u8], sector_idx: u32) -> io::Result<&[u8; N]> { - if data.len() % N != 0 { - return Err(io::Error::new( - io::ErrorKind::InvalidData, - format!("Expected block size {} to be a multiple of {}", data.len(), N), - )); - } - let rel_sector = sector_idx % (data.len() / N) as u32; - let offset = rel_sector as usize * N; - data.get(offset..offset + N) - .ok_or_else(|| { - io::Error::new( - io::ErrorKind::InvalidData, - format!( - "Sector {} out of range (block size {}, sector size {})", - rel_sector, - data.len(), - N - ), - ) - }) - .map(|v| unsafe { &*(v as *const [u8] as *const [u8; N]) }) +/// The block kind. +#[derive(Debug, Copy, Clone, PartialEq, Default)] +pub enum BlockKind { + /// Empty block, likely end of disc + #[default] + None, + /// Raw data or encrypted Wii partition data + Raw, + /// Decrypted Wii partition data + PartDecrypted { + /// Whether the sector has its hash block intact + hash_block: bool, + }, + /// Wii partition junk data + Junk, + /// All zeroes + Zero, } -fn generate_junk( +/// Generates junk data for a single sector. +pub fn generate_junk_sector( out: &mut [u8; SECTOR_SIZE], - sector: u32, + abs_sector: u32, partition: Option<&PartitionInfo>, disc_header: &DiscHeader, ) { - let (pos, offset) = if partition.is_some() { + let (pos, offset) = if partition.is_some_and(|p| p.has_hashes) { + let sector = abs_sector - partition.unwrap().data_start_sector; (sector as u64 * SECTOR_DATA_SIZE as u64, HASHES_SIZE) } else { - (sector as u64 * SECTOR_SIZE as u64, 0) + (abs_sector as u64 * SECTOR_SIZE as u64, 0) }; out[..offset].fill(0); let mut lfg = LaggedFibonacci::default(); @@ -396,33 +407,3 @@ fn generate_junk( pos, ); } - -fn rebuild_hash_block(out: &mut [u8; SECTOR_SIZE], part_sector: u32, partition: &PartitionInfo) { - let Some(hash_table) = partition.hash_table.as_ref() else { - return; - }; - let sector_idx = part_sector as usize; - let h0_hashes: &[u8; 0x26C] = - transmute_ref!(array_ref![hash_table.h0_hashes, sector_idx * 31, 31]); - out[0..0x26C].copy_from_slice(h0_hashes); - let h1_hashes: &[u8; 0xA0] = - transmute_ref!(array_ref![hash_table.h1_hashes, sector_idx & !7, 8]); - out[0x280..0x320].copy_from_slice(h1_hashes); - let h2_hashes: &[u8; 0xA0] = - transmute_ref!(array_ref![hash_table.h2_hashes, (sector_idx / 8) & !7, 8]); - out[0x340..0x3E0].copy_from_slice(h2_hashes); -} - -fn encrypt_sector(out: &mut [u8; SECTOR_SIZE], partition: &PartitionInfo) { - aes_cbc_encrypt(&partition.key, &[0u8; 16], &mut out[..HASHES_SIZE]); - // Data IV from encrypted hash block - let iv = *array_ref![out, 0x3D0, 16]; - aes_cbc_encrypt(&partition.key, &iv, &mut out[HASHES_SIZE..]); -} - -fn decrypt_sector(out: &mut [u8; SECTOR_SIZE], partition: &PartitionInfo) { - // Data IV from encrypted hash block - let iv = *array_ref![out, 0x3D0, 16]; - aes_cbc_decrypt(&partition.key, &[0u8; 16], &mut out[..HASHES_SIZE]); - aes_cbc_decrypt(&partition.key, &iv, &mut out[HASHES_SIZE..]); -} diff --git a/nod/src/io/ciso.rs b/nod/src/io/ciso.rs index 2f6466e..be7026d 100644 --- a/nod/src/io/ciso.rs +++ b/nod/src/io/ciso.rs @@ -2,20 +2,36 @@ use std::{ io, io::{Read, Seek, SeekFrom}, mem::size_of, + sync::Arc, }; -use zerocopy::{little_endian::*, FromBytes, Immutable, IntoBytes, KnownLayout}; +use bytes::{BufMut, Bytes, BytesMut}; +use zerocopy::{little_endian::*, FromBytes, FromZeros, Immutable, IntoBytes, KnownLayout}; use crate::{ - disc::SECTOR_SIZE, - io::{ - block::{Block, BlockIO, DiscStream, PartitionInfo, CISO_MAGIC}, - nkit::NKitHeader, - Format, MagicBytes, + common::{Compression, Format, MagicBytes}, + disc::{ + reader::DiscReader, + writer::{ + check_block, par_process, read_block, BlockProcessor, BlockResult, CheckBlockResult, + DataCallback, DiscWriter, + }, + SECTOR_SIZE, }, - static_assert, - util::read::read_from, - DiscMeta, Error, Result, ResultContext, + io::{ + block::{Block, BlockKind, BlockReader, CISO_MAGIC}, + nkit::{JunkBits, NKitHeader}, + }, + read::{DiscMeta, DiscStream}, + util::{ + array_ref, + digest::DigestManager, + lfg::LaggedFibonacci, + read::{box_to_bytes, read_arc}, + static_assert, + }, + write::{DiscFinalization, DiscWriterWeight, FormatOptions, ProcessOptions}, + Error, Result, ResultContext, }; pub const CISO_MAP_SIZE: usize = SECTOR_SIZE - 8; @@ -32,18 +48,18 @@ struct CISOHeader { static_assert!(size_of::() == SECTOR_SIZE); #[derive(Clone)] -pub struct DiscIOCISO { +pub struct BlockReaderCISO { inner: Box, - header: CISOHeader, - block_map: [u16; CISO_MAP_SIZE], + header: Arc, + block_map: Arc<[u16; CISO_MAP_SIZE]>, nkit_header: Option, } -impl DiscIOCISO { +impl BlockReaderCISO { pub fn new(mut inner: Box) -> Result> { // Read header inner.seek(SeekFrom::Start(0)).context("Seeking to start")?; - let header: CISOHeader = read_from(inner.as_mut()).context("Reading CISO header")?; + let header: Arc = read_arc(inner.as_mut()).context("Reading CISO header")?; if header.magic != CISO_MAGIC { return Err(Error::DiscFormat("Invalid CISO magic".to_string())); } @@ -69,54 +85,47 @@ impl DiscIOCISO { } // Read NKit header if present (after CISO data) - let nkit_header = if len > file_size + 4 { + let nkit_header = if len > file_size + 12 { inner.seek(SeekFrom::Start(file_size)).context("Seeking to NKit header")?; NKitHeader::try_read_from(inner.as_mut(), header.block_size.get(), true) } else { None }; - Ok(Box::new(Self { inner, header, block_map, nkit_header })) + Ok(Box::new(Self { inner, header, block_map: Arc::new(block_map), nkit_header })) } } -impl BlockIO for DiscIOCISO { - fn read_block_internal( - &mut self, - out: &mut [u8], - block: u32, - partition: Option<&PartitionInfo>, - ) -> io::Result { - if block >= CISO_MAP_SIZE as u32 { +impl BlockReader for BlockReaderCISO { + fn read_block(&mut self, out: &mut [u8], sector: u32) -> io::Result { + let block_size = self.header.block_size.get(); + let block_idx = ((sector as u64 * SECTOR_SIZE as u64) / block_size as u64) as u32; + if block_idx >= CISO_MAP_SIZE as u32 { // Out of bounds - return Ok(Block::Zero); + return Ok(Block::new(block_idx, block_size, BlockKind::None)); } // Find the block in the map - let phys_block = self.block_map[block as usize]; + let phys_block = self.block_map[block_idx as usize]; if phys_block == u16::MAX { // Check if block is junk data - if self.nkit_header.as_ref().and_then(|h| h.is_junk_block(block)).unwrap_or(false) { - return Ok(Block::Junk); + if self.nkit_header.as_ref().and_then(|h| h.is_junk_block(block_idx)).unwrap_or(false) { + return Ok(Block::new(block_idx, block_size, BlockKind::Junk)); }; // Otherwise, read zeroes - return Ok(Block::Zero); + return Ok(Block::new(block_idx, block_size, BlockKind::Zero)); } // Read block - let file_offset = size_of::() as u64 - + phys_block as u64 * self.header.block_size.get() as u64; + let file_offset = size_of::() as u64 + phys_block as u64 * block_size as u64; self.inner.seek(SeekFrom::Start(file_offset))?; self.inner.read_exact(out)?; - match partition { - Some(partition) if partition.has_encryption => Ok(Block::PartEncrypted), - _ => Ok(Block::Raw), - } + Ok(Block::new(block_idx, block_size, BlockKind::Raw)) } - fn block_size_internal(&self) -> u32 { self.header.block_size.get() } + fn block_size(&self) -> u32 { self.header.block_size.get() } fn meta(&self) -> DiscMeta { let mut result = DiscMeta { @@ -130,3 +139,187 @@ impl BlockIO for DiscIOCISO { result } } + +struct BlockProcessorCISO { + inner: DiscReader, + block_size: u32, + decrypted_block: Box<[u8]>, + lfg: LaggedFibonacci, + disc_id: [u8; 4], + disc_num: u8, +} + +impl Clone for BlockProcessorCISO { + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + block_size: self.block_size, + decrypted_block: <[u8]>::new_box_zeroed_with_elems(self.block_size as usize).unwrap(), + lfg: LaggedFibonacci::default(), + disc_id: self.disc_id, + disc_num: self.disc_num, + } + } +} + +impl BlockProcessor for BlockProcessorCISO { + type BlockMeta = CheckBlockResult; + + fn process_block(&mut self, block_idx: u32) -> io::Result> { + let block_size = self.block_size as usize; + let input_position = block_idx as u64 * block_size as u64; + self.inner.seek(SeekFrom::Start(input_position))?; + let (block_data, disc_data) = read_block(&mut self.inner, block_size)?; + + // Check if block is zeroed or junk + let result = match check_block( + disc_data.as_ref(), + &mut self.decrypted_block, + input_position, + self.inner.partitions(), + &mut self.lfg, + self.disc_id, + self.disc_num, + )? { + CheckBlockResult::Normal => { + BlockResult { block_idx, disc_data, block_data, meta: CheckBlockResult::Normal } + } + CheckBlockResult::Zeroed => BlockResult { + block_idx, + disc_data, + block_data: Bytes::new(), + meta: CheckBlockResult::Zeroed, + }, + CheckBlockResult::Junk => BlockResult { + block_idx, + disc_data, + block_data: Bytes::new(), + meta: CheckBlockResult::Junk, + }, + }; + Ok(result) + } +} + +#[derive(Clone)] +pub struct DiscWriterCISO { + inner: DiscReader, + block_size: u32, + block_count: u32, + disc_size: u64, +} + +pub const DEFAULT_BLOCK_SIZE: u32 = 0x200000; // 2 MiB + +impl DiscWriterCISO { + pub fn new(inner: DiscReader, options: &FormatOptions) -> Result> { + if options.format != Format::Ciso { + return Err(Error::DiscFormat("Invalid format for CISO writer".to_string())); + } + if options.compression != Compression::None { + return Err(Error::DiscFormat("CISO does not support compression".to_string())); + } + let block_size = DEFAULT_BLOCK_SIZE; + + let disc_size = inner.disc_size(); + let block_count = disc_size.div_ceil(block_size as u64) as u32; + if block_count > CISO_MAP_SIZE as u32 { + return Err(Error::DiscFormat(format!( + "CISO block count exceeds maximum: {} > {}", + block_count, CISO_MAP_SIZE + ))); + } + + Ok(Box::new(Self { inner, block_size, block_count, disc_size })) + } +} + +impl DiscWriter for DiscWriterCISO { + fn process( + &self, + data_callback: &mut DataCallback, + options: &ProcessOptions, + ) -> Result { + data_callback(BytesMut::zeroed(SECTOR_SIZE).freeze(), 0, self.disc_size) + .context("Failed to write header")?; + + // Determine junk data values + let disc_header = self.inner.header(); + let disc_id = *array_ref![disc_header.game_id, 0, 4]; + let disc_num = disc_header.disc_num; + + // Create hashers + let digest = DigestManager::new(options); + let block_size = self.block_size; + let mut junk_bits = JunkBits::new(block_size); + let mut input_position = 0; + + let mut block_count = 0; + let mut header = CISOHeader::new_box_zeroed()?; + header.magic = CISO_MAGIC; + header.block_size = block_size.into(); + par_process( + || BlockProcessorCISO { + inner: self.inner.clone(), + block_size, + decrypted_block: <[u8]>::new_box_zeroed_with_elems(block_size as usize).unwrap(), + lfg: LaggedFibonacci::default(), + disc_id, + disc_num, + }, + self.block_count, + options.processor_threads, + |block| -> Result<()> { + // Update hashers + let disc_data_len = block.disc_data.len() as u64; + digest.send(block.disc_data); + + // Check if block is zeroed or junk + match block.meta { + CheckBlockResult::Normal => { + header.block_present[block.block_idx as usize] = 1; + block_count += 1; + } + CheckBlockResult::Zeroed => {} + CheckBlockResult::Junk => { + junk_bits.set(block.block_idx, true); + } + } + + input_position += disc_data_len; + data_callback(block.block_data, input_position, self.disc_size) + .with_context(|| format!("Failed to write block {}", block.block_idx))?; + Ok(()) + }, + )?; + + // Collect hash results + let digest_results = digest.finish(); + let mut nkit_header = NKitHeader { + version: 2, + size: Some(self.disc_size), + crc32: None, + md5: None, + sha1: None, + xxh64: None, + junk_bits: Some(junk_bits), + encrypted: true, + }; + nkit_header.apply_digests(&digest_results); + + // Write NKit header after data + let mut buffer = BytesMut::new().writer(); + nkit_header.write_to(&mut buffer).context("Writing NKit header")?; + data_callback(buffer.into_inner().freeze(), self.disc_size, self.disc_size) + .context("Failed to write NKit header")?; + + let header = Bytes::from(box_to_bytes(header)); + let mut finalization = DiscFinalization { header, ..Default::default() }; + finalization.apply_digests(&digest_results); + Ok(finalization) + } + + fn progress_bound(&self) -> u64 { self.disc_size } + + fn weight(&self) -> DiscWriterWeight { DiscWriterWeight::Medium } +} diff --git a/nod/src/io/gcz.rs b/nod/src/io/gcz.rs index c12028f..ab17238 100644 --- a/nod/src/io/gcz.rs +++ b/nod/src/io/gcz.rs @@ -2,21 +2,30 @@ use std::{ io, io::{Read, Seek, SeekFrom}, mem::size_of, + sync::Arc, }; use adler::adler32_slice; -use miniz_oxide::{inflate, inflate::core::inflate_flags}; +use bytes::{BufMut, Bytes, BytesMut}; use zerocopy::{little_endian::*, FromBytes, FromZeros, Immutable, IntoBytes, KnownLayout}; -use zstd::zstd_safe::WriteBuf; use crate::{ - io::{ - block::{Block, BlockIO, DiscStream, GCZ_MAGIC}, - MagicBytes, + common::{Compression, Format, MagicBytes}, + disc::{ + reader::DiscReader, + writer::{par_process, read_block, BlockProcessor, BlockResult, DataCallback, DiscWriter}, + SECTOR_SIZE, }, - static_assert, - util::read::{read_box_slice, read_from}, - Compression, DiscMeta, Error, Format, PartitionInfo, Result, ResultContext, + io::block::{Block, BlockKind, BlockReader, GCZ_MAGIC}, + read::{DiscMeta, DiscStream}, + util::{ + compress::{Compressor, DecompressionKind, Decompressor}, + digest::DigestManager, + read::{read_arc_slice, read_from}, + static_assert, + }, + write::{DiscFinalization, DiscWriterWeight, FormatOptions, ProcessOptions}, + Error, Result, ResultContext, }; /// GCZ header (little endian) @@ -33,16 +42,17 @@ struct GCZHeader { static_assert!(size_of::() == 32); -pub struct DiscIOGCZ { +pub struct BlockReaderGCZ { inner: Box, header: GCZHeader, - block_map: Box<[U64]>, - block_hashes: Box<[U32]>, + block_map: Arc<[U64]>, + block_hashes: Arc<[U32]>, block_buf: Box<[u8]>, data_offset: u64, + decompressor: Decompressor, } -impl Clone for DiscIOGCZ { +impl Clone for BlockReaderGCZ { fn clone(&self) -> Self { Self { inner: self.inner.clone(), @@ -51,11 +61,12 @@ impl Clone for DiscIOGCZ { block_hashes: self.block_hashes.clone(), block_buf: <[u8]>::new_box_zeroed_with_elems(self.block_buf.len()).unwrap(), data_offset: self.data_offset, + decompressor: self.decompressor.clone(), } } } -impl DiscIOGCZ { +impl BlockReaderGCZ { pub fn new(mut inner: Box) -> Result> { // Read header inner.seek(SeekFrom::Start(0)).context("Seeking to start")?; @@ -66,41 +77,50 @@ impl DiscIOGCZ { // Read block map and hashes let block_count = header.block_count.get(); - let block_map = read_box_slice(inner.as_mut(), block_count as usize) + let block_map = read_arc_slice(inner.as_mut(), block_count as usize) .context("Reading GCZ block map")?; - let block_hashes = read_box_slice(inner.as_mut(), block_count as usize) + let block_hashes = read_arc_slice(inner.as_mut(), block_count as usize) .context("Reading GCZ block hashes")?; // header + block_count * (u64 + u32) let data_offset = size_of::() as u64 + block_count as u64 * 12; let block_buf = <[u8]>::new_box_zeroed_with_elems(header.block_size.get() as usize)?; - Ok(Box::new(Self { inner, header, block_map, block_hashes, block_buf, data_offset })) + let decompressor = Decompressor::new(DecompressionKind::Deflate); + Ok(Box::new(Self { + inner, + header, + block_map, + block_hashes, + block_buf, + data_offset, + decompressor, + })) } } -impl BlockIO for DiscIOGCZ { - fn read_block_internal( - &mut self, - out: &mut [u8], - block: u32, - partition: Option<&PartitionInfo>, - ) -> io::Result { - if block >= self.header.block_count.get() { +impl BlockReader for BlockReaderGCZ { + fn read_block(&mut self, out: &mut [u8], sector: u32) -> io::Result { + let block_size = self.header.block_size.get(); + let block_idx = ((sector as u64 * SECTOR_SIZE as u64) / block_size as u64) as u32; + if block_idx >= self.header.block_count.get() { // Out of bounds - return Ok(Block::Zero); + return Ok(Block::new(block_idx, block_size, BlockKind::None)); } // Find block offset and size - let mut file_offset = self.block_map[block as usize].get(); + let mut file_offset = self.block_map[block_idx as usize].get(); let mut compressed = true; if file_offset & (1 << 63) != 0 { file_offset &= !(1 << 63); compressed = false; } - let compressed_size = - ((self.block_map.get(block as usize + 1).unwrap_or(&self.header.compressed_size).get() - & !(1 << 63)) - - file_offset) as usize; + let compressed_size = ((self + .block_map + .get(block_idx as usize + 1) + .unwrap_or(&self.header.compressed_size) + .get() + & !(1 << 63)) + - file_offset) as usize; if compressed_size > self.block_buf.len() { return Err(io::Error::new( io::ErrorKind::InvalidData, @@ -127,58 +147,43 @@ impl BlockIO for DiscIOGCZ { // Verify block checksum let checksum = adler32_slice(&self.block_buf[..compressed_size]); - let expected_checksum = self.block_hashes[block as usize].get(); + let expected_checksum = self.block_hashes[block_idx as usize].get(); if checksum != expected_checksum { return Err(io::Error::new( io::ErrorKind::InvalidData, format!( - "Block checksum mismatch: {:#010x} != {:#010x}", - checksum, expected_checksum + "Block {} checksum mismatch: {:#010x} != {:#010x}", + block_idx, checksum, expected_checksum ), )); } if compressed { // Decompress block - let mut decompressor = inflate::core::DecompressorOxide::new(); - let input = &self.block_buf[..compressed_size]; - let (status, in_size, out_size) = inflate::core::decompress( - &mut decompressor, - input, - out, - 0, - inflate_flags::TINFL_FLAG_PARSE_ZLIB_HEADER - | inflate_flags::TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF, - ); - if status != inflate::TINFLStatus::Done - || in_size != compressed_size - || out_size != self.block_buf.len() - { + let out_len = self.decompressor.decompress(&self.block_buf[..compressed_size], out)?; + if out_len != block_size as usize { return Err(io::Error::new( io::ErrorKind::InvalidData, format!( - "Deflate decompression failed: {:?} (in: {}, out: {})", - status, in_size, out_size + "Block {} decompression failed: in: {}, out: {}", + block_idx, compressed_size, out_len ), )); } } else { // Copy uncompressed block - out.copy_from_slice(self.block_buf.as_slice()); + out.copy_from_slice(self.block_buf.as_ref()); } - match partition { - Some(partition) if partition.has_encryption => Ok(Block::PartEncrypted), - _ => Ok(Block::Raw), - } + Ok(Block::new(block_idx, block_size, BlockKind::Raw)) } - fn block_size_internal(&self) -> u32 { self.header.block_size.get() } + fn block_size(&self) -> u32 { self.header.block_size.get() } fn meta(&self) -> DiscMeta { DiscMeta { format: Format::Gcz, - compression: Compression::Deflate, + compression: Compression::Deflate(0), block_size: Some(self.header.block_size.get()), lossless: true, disc_size: Some(self.header.disc_size.get()), @@ -186,3 +191,174 @@ impl BlockIO for DiscIOGCZ { } } } + +struct BlockProcessorGCZ { + inner: DiscReader, + header: GCZHeader, + compressor: Compressor, +} + +impl Clone for BlockProcessorGCZ { + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + header: self.header.clone(), + compressor: self.compressor.clone(), + } + } +} + +struct BlockMetaGCZ { + is_compressed: bool, + block_hash: u32, +} + +impl BlockProcessor for BlockProcessorGCZ { + type BlockMeta = BlockMetaGCZ; + + fn process_block(&mut self, block_idx: u32) -> io::Result> { + let block_size = self.header.block_size.get(); + self.inner.seek(SeekFrom::Start(block_idx as u64 * block_size as u64))?; + let (mut block_data, disc_data) = read_block(&mut self.inner, block_size as usize)?; + + // Try to compress block + let is_compressed = if self.compressor.compress(&block_data)? { + println!("Compressed block {} to {}", block_idx, self.compressor.buffer.len()); + block_data = Bytes::copy_from_slice(self.compressor.buffer.as_slice()); + true + } else { + false + }; + + let block_hash = adler32_slice(block_data.as_ref()); + Ok(BlockResult { + block_idx, + disc_data, + block_data, + meta: BlockMetaGCZ { is_compressed, block_hash }, + }) + } +} + +#[derive(Clone)] +pub struct DiscWriterGCZ { + inner: DiscReader, + header: GCZHeader, + compression: Compression, +} + +pub const DEFAULT_BLOCK_SIZE: u32 = 0x8000; // 32 KiB + +// Level 0 will be converted to the default level in [`Compression::validate_level`] +pub const DEFAULT_COMPRESSION: Compression = Compression::Deflate(0); + +impl DiscWriterGCZ { + pub fn new(inner: DiscReader, options: &FormatOptions) -> Result> { + if options.format != Format::Gcz { + return Err(Error::DiscFormat("Invalid format for GCZ writer".to_string())); + } + if !matches!(options.compression, Compression::Deflate(_)) { + return Err(Error::DiscFormat(format!( + "Unsupported compression for GCZ: {:?}", + options.compression + ))); + } + + let block_size = options.block_size; + if block_size < SECTOR_SIZE as u32 || block_size % SECTOR_SIZE as u32 != 0 { + return Err(Error::DiscFormat("Invalid block size for GCZ".to_string())); + } + + let disc_header = inner.header(); + let disc_size = inner.disc_size(); + let block_count = disc_size.div_ceil(block_size as u64) as u32; + + // Generate header + let header = GCZHeader { + magic: GCZ_MAGIC, + disc_type: if disc_header.is_wii() { 1 } else { 0 }.into(), + compressed_size: 0.into(), // Written when finalized + disc_size: disc_size.into(), + block_size: block_size.into(), + block_count: block_count.into(), + }; + + Ok(Box::new(Self { inner, header, compression: options.compression })) + } +} + +impl DiscWriter for DiscWriterGCZ { + fn process( + &self, + data_callback: &mut DataCallback, + options: &ProcessOptions, + ) -> Result { + let disc_size = self.header.disc_size.get(); + let block_size = self.header.block_size.get(); + let block_count = self.header.block_count.get(); + + // Create hashers + let digest = DigestManager::new(options); + + // Generate block map and hashes + let mut block_map = <[U64]>::new_box_zeroed_with_elems(block_count as usize)?; + let mut block_hashes = <[U32]>::new_box_zeroed_with_elems(block_count as usize)?; + + let header_data_size = size_of::() + + size_of_val(block_map.as_ref()) + + size_of_val(block_hashes.as_ref()); + let mut header_data = BytesMut::with_capacity(header_data_size); + header_data.put_slice(self.header.as_bytes()); + header_data.resize(header_data_size, 0); + data_callback(header_data.freeze(), 0, disc_size).context("Failed to write GCZ header")?; + + let mut input_position = 0; + let mut data_position = 0; + par_process( + || BlockProcessorGCZ { + inner: self.inner.clone(), + header: self.header.clone(), + compressor: Compressor::new(self.compression, block_size as usize), + }, + block_count, + options.processor_threads, + |block| { + // Update hashers + let disc_data_len = block.disc_data.len() as u64; + digest.send(block.disc_data); + + // Update block map and hash + if block.meta.is_compressed { + block_map[block.block_idx as usize] = data_position.into(); + } else { + block_map[block.block_idx as usize] = (data_position | (1 << 63)).into(); + } + block_hashes[block.block_idx as usize] = block.meta.block_hash.into(); + + // Write block data + input_position += disc_data_len; + data_position += block.block_data.len() as u64; + data_callback(block.block_data, input_position, disc_size) + .with_context(|| format!("Failed to write block {}", block.block_idx))?; + Ok(()) + }, + )?; + + // Write updated header, block map and hashes + let mut header = self.header.clone(); + header.compressed_size = data_position.into(); + let mut header_data = BytesMut::with_capacity(header_data_size); + header_data.extend_from_slice(header.as_bytes()); + header_data.extend_from_slice(block_map.as_bytes()); + header_data.extend_from_slice(block_hashes.as_bytes()); + + let mut finalization = + DiscFinalization { header: header_data.freeze(), ..Default::default() }; + finalization.apply_digests(&digest.finish()); + Ok(finalization) + } + + fn progress_bound(&self) -> u64 { self.header.disc_size.get() } + + fn weight(&self) -> DiscWriterWeight { DiscWriterWeight::Heavy } +} diff --git a/nod/src/io/iso.rs b/nod/src/io/iso.rs index aea206f..98b6e4f 100644 --- a/nod/src/io/iso.rs +++ b/nod/src/io/iso.rs @@ -1,68 +1,97 @@ use std::{ io, - io::{Read, Seek, SeekFrom}, + io::{BufRead, Read, Seek, SeekFrom}, }; use crate::{ - disc::SECTOR_SIZE, - io::{ - block::{Block, BlockIO, DiscStream, PartitionInfo}, - Format, + common::Format, + disc::{ + reader::DiscReader, + writer::{DataCallback, DiscWriter}, + SECTOR_SIZE, }, - DiscMeta, Result, ResultContext, + io::block::{Block, BlockKind, BlockReader}, + read::{DiscMeta, DiscStream}, + util::digest::DigestManager, + write::{DiscFinalization, DiscWriterWeight, ProcessOptions}, + Result, ResultContext, }; #[derive(Clone)] -pub struct DiscIOISO { +pub struct BlockReaderISO { inner: Box, - stream_len: u64, + disc_size: u64, } -impl DiscIOISO { +impl BlockReaderISO { pub fn new(mut inner: Box) -> Result> { - let stream_len = inner.seek(SeekFrom::End(0)).context("Determining stream length")?; - inner.seek(SeekFrom::Start(0)).context("Seeking to start")?; - Ok(Box::new(Self { inner, stream_len })) + let disc_size = inner.seek(SeekFrom::End(0)).context("Determining stream length")?; + Ok(Box::new(Self { inner, disc_size })) } } -impl BlockIO for DiscIOISO { - fn read_block_internal( - &mut self, - out: &mut [u8], - block: u32, - partition: Option<&PartitionInfo>, - ) -> io::Result { - let offset = block as u64 * SECTOR_SIZE as u64; - if offset >= self.stream_len { +impl BlockReader for BlockReaderISO { + fn read_block(&mut self, out: &mut [u8], sector: u32) -> io::Result { + let pos = sector as u64 * SECTOR_SIZE as u64; + if pos >= self.disc_size { // End of file - return Ok(Block::Zero); + return Ok(Block::sector(sector, BlockKind::None)); } - self.inner.seek(SeekFrom::Start(offset))?; - if offset + SECTOR_SIZE as u64 > self.stream_len { + self.inner.seek(SeekFrom::Start(pos))?; + if pos + SECTOR_SIZE as u64 > self.disc_size { // If the last block is not a full sector, fill the rest with zeroes - let read = (self.stream_len - offset) as usize; + let read = (self.disc_size - pos) as usize; self.inner.read_exact(&mut out[..read])?; out[read..].fill(0); } else { self.inner.read_exact(out)?; } - match partition { - Some(partition) if partition.has_encryption => Ok(Block::PartEncrypted), - _ => Ok(Block::Raw), - } + Ok(Block::sector(sector, BlockKind::Raw)) } - fn block_size_internal(&self) -> u32 { SECTOR_SIZE as u32 } + fn block_size(&self) -> u32 { SECTOR_SIZE as u32 } fn meta(&self) -> DiscMeta { DiscMeta { format: Format::Iso, lossless: true, - disc_size: Some(self.stream_len), + disc_size: Some(self.disc_size), ..Default::default() } } } + +impl DiscWriter for DiscReader { + fn process( + &self, + data_callback: &mut DataCallback, + options: &ProcessOptions, + ) -> Result { + let mut reader = self.clone(); + let digest = DigestManager::new(options); + loop { + let pos = reader.position(); + let data = reader + .fill_buf_internal() + .with_context(|| format!("Reading disc data at offset {pos}"))?; + let len = data.len(); + if len == 0 { + break; + } + // Update hashers + digest.send(data.clone()); + data_callback(data, pos + len as u64, reader.disc_size()) + .context("Failed to write disc data")?; + reader.consume(len); + } + let mut finalization = DiscFinalization::default(); + finalization.apply_digests(&digest.finish()); + Ok(finalization) + } + + fn progress_bound(&self) -> u64 { self.disc_size() } + + fn weight(&self) -> DiscWriterWeight { DiscWriterWeight::Light } +} diff --git a/nod/src/io/mapped.rs b/nod/src/io/mapped.rs new file mode 100644 index 0000000..e881cc7 --- /dev/null +++ b/nod/src/io/mapped.rs @@ -0,0 +1,51 @@ +use std::{io, path::Path, sync::Arc}; + +use memmap2::Mmap; + +use crate::{util::impl_read_for_bufread, Result, ResultContext}; + +pub struct MappedFileReader { + inner: Arc, + pos: usize, +} + +impl Clone for MappedFileReader { + fn clone(&self) -> Self { Self { inner: self.inner.clone(), pos: 0 } } +} + +impl MappedFileReader { + #[expect(unused)] + pub fn new(path: &Path) -> Result { + let file = std::fs::File::open(path) + .with_context(|| format!("Failed to open file {}", path.display()))?; + let inner = unsafe { Mmap::map(&file) } + .with_context(|| format!("Failed to map file {}", path.display()))?; + Ok(Self { inner: Arc::new(inner), pos: 0 }) + } +} + +impl io::BufRead for MappedFileReader { + fn fill_buf(&mut self) -> io::Result<&[u8]> { + if self.pos < self.inner.len() { + Ok(&self.inner[self.pos..]) + } else { + Ok(&[]) + } + } + + fn consume(&mut self, amt: usize) { self.pos = self.pos.saturating_add(amt); } +} + +impl_read_for_bufread!(MappedFileReader); + +impl io::Seek for MappedFileReader { + fn seek(&mut self, pos: io::SeekFrom) -> io::Result { + let pos = match pos { + io::SeekFrom::Start(pos) => pos, + io::SeekFrom::End(pos) => (self.inner.len() as u64).saturating_add_signed(pos), + io::SeekFrom::Current(off) => (self.pos as u64).saturating_add_signed(off), + }; + self.pos = pos.try_into().map_err(|_| io::ErrorKind::UnexpectedEof)?; + Ok(pos) + } +} diff --git a/nod/src/io/mod.rs b/nod/src/io/mod.rs index a8e1e8b..c5bae02 100644 --- a/nod/src/io/mod.rs +++ b/nod/src/io/mod.rs @@ -1,142 +1,14 @@ //! Disc file format related logic (CISO, NFS, WBFS, WIA, etc.) -use std::fmt; - pub(crate) mod block; pub(crate) mod ciso; #[cfg(feature = "compress-zlib")] pub(crate) mod gcz; pub(crate) mod iso; +pub(crate) mod mapped; pub(crate) mod nfs; pub(crate) mod nkit; pub(crate) mod split; pub(crate) mod tgc; pub(crate) mod wbfs; pub(crate) mod wia; - -/// SHA-1 hash bytes -pub type HashBytes = [u8; 20]; - -/// AES key bytes -pub type KeyBytes = [u8; 16]; - -/// Magic bytes -pub type MagicBytes = [u8; 4]; - -/// The disc file format. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] -pub enum Format { - /// ISO / GCM (GameCube master disc) - #[default] - Iso, - /// CISO (Compact ISO) - Ciso, - /// GCZ - Gcz, - /// NFS (Wii U VC) - Nfs, - /// RVZ - Rvz, - /// WBFS - Wbfs, - /// WIA - Wia, - /// TGC - Tgc, -} - -impl fmt::Display for Format { - #[inline] - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Format::Iso => write!(f, "ISO"), - Format::Ciso => write!(f, "CISO"), - Format::Gcz => write!(f, "GCZ"), - Format::Nfs => write!(f, "NFS"), - Format::Rvz => write!(f, "RVZ"), - Format::Wbfs => write!(f, "WBFS"), - Format::Wia => write!(f, "WIA"), - Format::Tgc => write!(f, "TGC"), - } - } -} - -/// The disc file format's compression algorithm. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] -pub enum Compression { - /// No compression - #[default] - None, - /// BZIP2 - Bzip2, - /// Deflate (GCZ only) - Deflate, - /// LZMA - Lzma, - /// LZMA2 - Lzma2, - /// Purge (WIA only) - Purge, - /// Zstandard - Zstandard, -} - -impl fmt::Display for Compression { - #[inline] - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Compression::None => write!(f, "None"), - Compression::Bzip2 => write!(f, "BZIP2"), - Compression::Deflate => write!(f, "Deflate"), - Compression::Lzma => write!(f, "LZMA"), - Compression::Lzma2 => write!(f, "LZMA2"), - Compression::Purge => write!(f, "Purge"), - Compression::Zstandard => write!(f, "Zstandard"), - } - } -} - -/// Extra metadata about the underlying disc file format. -#[derive(Debug, Clone, Default)] -pub struct DiscMeta { - /// The disc file format. - pub format: Format, - /// The format's compression algorithm. - pub compression: Compression, - /// If the format uses blocks, the block size in bytes. - pub block_size: Option, - /// Whether Wii partitions are stored decrypted in the format. - pub decrypted: bool, - /// Whether the format omits Wii partition data hashes. - pub needs_hash_recovery: bool, - /// Whether the format supports recovering the original disc data losslessly. - pub lossless: bool, - /// The original disc's size in bytes, if stored by the format. - pub disc_size: Option, - /// The original disc's CRC32 hash, if stored by the format. - pub crc32: Option, - /// The original disc's MD5 hash, if stored by the format. - pub md5: Option<[u8; 16]>, - /// The original disc's SHA-1 hash, if stored by the format. - pub sha1: Option<[u8; 20]>, - /// The original disc's XXH64 hash, if stored by the format. - pub xxhash64: Option, -} - -/// Encrypts data in-place using AES-128-CBC with the given key and IV. -/// Requires the data length to be a multiple of the AES block size (16 bytes). -pub fn aes_cbc_encrypt(key: &KeyBytes, iv: &KeyBytes, data: &mut [u8]) { - use aes::cipher::{block_padding::NoPadding, BlockEncryptMut, KeyIvInit}; - >::new(key.into(), iv.into()) - .encrypt_padded_mut::(data, data.len()) - .unwrap(); -} - -/// Decrypts data in-place using AES-128-CBC with the given key and IV. -/// Requires the data length to be a multiple of the AES block size (16 bytes). -pub fn aes_cbc_decrypt(key: &KeyBytes, iv: &KeyBytes, data: &mut [u8]) { - use aes::cipher::{block_padding::NoPadding, BlockDecryptMut, KeyIvInit}; - >::new(key.into(), iv.into()) - .decrypt_padded_mut::(data) - .unwrap(); -} diff --git a/nod/src/io/nfs.rs b/nod/src/io/nfs.rs index 799ee47..1c08447 100644 --- a/nod/src/io/nfs.rs +++ b/nod/src/io/nfs.rs @@ -4,22 +4,21 @@ use std::{ io::{BufReader, Read, Seek, SeekFrom}, mem::size_of, path::{Component, Path, PathBuf}, + sync::Arc, }; use zerocopy::{big_endian::U32, FromBytes, FromZeros, Immutable, IntoBytes, KnownLayout}; use crate::{ - array_ref_mut, + common::{Format, KeyBytes, MagicBytes}, disc::SECTOR_SIZE, io::{ - aes_cbc_decrypt, - block::{Block, BlockIO, PartitionInfo, NFS_MAGIC}, + block::{Block, BlockKind, BlockReader, NFS_MAGIC}, split::SplitFileReader, - Format, KeyBytes, MagicBytes, }, - static_assert, - util::read::read_from, - DiscMeta, Error, Result, ResultContext, + read::DiscMeta, + util::{aes::aes_cbc_decrypt, array_ref_mut, read::read_arc, static_assert}, + Error, Result, ResultContext, }; pub const NFS_END_MAGIC: MagicBytes = *b"SGGE"; @@ -84,19 +83,19 @@ impl NFSHeader { } #[derive(Clone)] -pub struct DiscIONFS { +pub struct BlockReaderNFS { inner: SplitFileReader, - header: NFSHeader, + header: Arc, raw_size: u64, disc_size: u64, key: KeyBytes, } -impl DiscIONFS { +impl BlockReaderNFS { pub fn new(directory: &Path) -> Result> { let mut disc_io = Box::new(Self { inner: SplitFileReader::empty(), - header: NFSHeader::new_zeroed(), + header: Arc::new(NFSHeader::new_zeroed()), raw_size: 0, disc_size: 0, key: [0; 16], @@ -106,18 +105,13 @@ impl DiscIONFS { } } -impl BlockIO for DiscIONFS { - fn read_block_internal( - &mut self, - out: &mut [u8], - sector: u32, - partition: Option<&PartitionInfo>, - ) -> io::Result { +impl BlockReader for BlockReaderNFS { + fn read_block(&mut self, out: &mut [u8], sector: u32) -> io::Result { // Calculate physical sector let phys_sector = self.header.phys_sector(sector); if phys_sector == u32::MAX { // Logical zero sector - return Ok(Block::Zero); + return Ok(Block::sector(sector, BlockKind::Raw)); } // Read sector @@ -130,15 +124,10 @@ impl BlockIO for DiscIONFS { *array_ref_mut!(iv, 12, 4) = sector.to_be_bytes(); aes_cbc_decrypt(&self.key, &iv, out); - match partition { - Some(partition) if partition.has_encryption => { - Ok(Block::PartDecrypted { has_hashes: true }) - } - _ => Ok(Block::Raw), - } + Ok(Block::sector(sector, BlockKind::PartDecrypted { hash_block: true })) } - fn block_size_internal(&self) -> u32 { SECTOR_SIZE as u32 } + fn block_size(&self) -> u32 { SECTOR_SIZE as u32 } fn meta(&self) -> DiscMeta { DiscMeta { format: Format::Nfs, decrypted: true, ..Default::default() } @@ -168,7 +157,7 @@ fn get_nfs(directory: &Path, num: u32) -> Result { } } -impl DiscIONFS { +impl BlockReaderNFS { pub fn load_files(&mut self, directory: &Path) -> Result<()> { { // Load key file @@ -201,7 +190,7 @@ impl DiscIONFS { let mut file = BufReader::new( File::open(&path).with_context(|| format!("Opening file {}", path.display()))?, ); - let header: NFSHeader = read_from(&mut file) + let header: Arc = read_arc(&mut file) .with_context(|| format!("Reading NFS header from file {}", path.display()))?; header.validate()?; // log::debug!("{:?}", header); diff --git a/nod/src/io/nkit.rs b/nod/src/io/nkit.rs index 557d90a..8b14325 100644 --- a/nod/src/io/nkit.rs +++ b/nod/src/io/nkit.rs @@ -1,13 +1,15 @@ use std::{ io, - io::{Read, Seek, SeekFrom}, + io::{Read, Seek, SeekFrom, Write}, }; +use tracing::warn; + use crate::{ + common::MagicBytes, disc::DL_DVD_SIZE, - io::MagicBytes, + read::DiscMeta, util::read::{read_from, read_u16_be, read_u32_be, read_u64_be, read_vec}, - DiscMeta, }; #[allow(unused)] @@ -56,19 +58,32 @@ const fn calc_header_size(version: u8, flags: u16, key_len: u32) -> usize { size } -#[allow(unused)] #[derive(Debug, Clone)] pub struct NKitHeader { pub version: u8, - pub flags: u16, pub size: Option, pub crc32: Option, pub md5: Option<[u8; 16]>, pub sha1: Option<[u8; 20]>, - pub xxhash64: Option, + pub xxh64: Option, /// Bitstream of blocks that are junk data - pub junk_bits: Option>, - pub block_size: u32, + pub junk_bits: Option, + pub encrypted: bool, +} + +impl Default for NKitHeader { + fn default() -> Self { + Self { + version: 2, + size: None, + crc32: None, + md5: None, + sha1: None, + xxh64: None, + junk_bits: None, + encrypted: false, + } + } } const VERSION_PREFIX: [u8; 7] = *b"NKIT v"; @@ -82,7 +97,7 @@ impl NKitHeader { match NKitHeader::read_from(reader, block_size, has_junk_bits) { Ok(header) => Some(header), Err(e) => { - log::warn!("Failed to read NKit header: {}", e); + warn!("Failed to read NKit header: {}", e); None } } @@ -136,25 +151,20 @@ impl NKitHeader { let sha1 = (flags & NKitHeaderFlags::Sha1 as u16 != 0) .then(|| read_from::<[u8; 20], _>(&mut inner)) .transpose()?; - let xxhash64 = (flags & NKitHeaderFlags::Xxhash64 as u16 != 0) + let xxh64 = (flags & NKitHeaderFlags::Xxhash64 as u16 != 0) .then(|| read_u64_be(&mut inner)) .transpose()?; - let junk_bits = if has_junk_bits { - let n = DL_DVD_SIZE.div_ceil(block_size as u64).div_ceil(8); - Some(read_vec(reader, n as usize)?) - } else { - None - }; + let junk_bits = + if has_junk_bits { Some(JunkBits::read_from(reader, block_size)?) } else { None }; - Ok(Self { version, flags, size, crc32, md5, sha1, xxhash64, junk_bits, block_size }) + let encrypted = flags & NKitHeaderFlags::Encrypted as u16 != 0; + + Ok(Self { version, size, crc32, md5, sha1, xxh64, junk_bits, encrypted }) } pub fn is_junk_block(&self, block: u32) -> Option { - self.junk_bits - .as_ref() - .and_then(|v| v.get((block / 8) as usize)) - .map(|&b| b & (1 << (7 - (block & 7))) != 0) + self.junk_bits.as_ref().map(|v| v.get(block)) } pub fn apply(&self, meta: &mut DiscMeta) { @@ -164,6 +174,128 @@ impl NKitHeader { meta.crc32 = self.crc32; meta.md5 = self.md5; meta.sha1 = self.sha1; - meta.xxhash64 = self.xxhash64; + meta.xxh64 = self.xxh64; + } + + fn calc_flags(&self) -> u16 { + let mut flags = 0; + if self.size.is_some() { + flags |= NKitHeaderFlags::Size as u16; + } + if self.crc32.is_some() { + flags |= NKitHeaderFlags::Crc32 as u16; + } + if self.md5.is_some() { + flags |= NKitHeaderFlags::Md5 as u16; + } + if self.sha1.is_some() { + flags |= NKitHeaderFlags::Sha1 as u16; + } + if self.xxh64.is_some() { + flags |= NKitHeaderFlags::Xxhash64 as u16; + } + if self.encrypted { + flags |= NKitHeaderFlags::Encrypted as u16; + } + flags + } + + pub fn write_to(&self, w: &mut W) -> io::Result<()> + where W: Write + ?Sized { + w.write_all(&VERSION_PREFIX)?; + w.write_all(&[b'0' + self.version])?; + let flags = self.calc_flags(); + match self.version { + 1 => {} + 2 => { + let header_size = calc_header_size(self.version, flags, 0) as u16; + w.write_all(&header_size.to_be_bytes())?; + w.write_all(&flags.to_be_bytes())?; + } + version => { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + format!("Unsupported NKit header version: {}", version), + )); + } + }; + if let Some(size) = self.size { + w.write_all(&size.to_be_bytes())?; + } + if let Some(crc32) = self.crc32 { + w.write_all(&crc32.to_be_bytes())?; + } else if self.version == 1 { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + "Missing CRC32 in NKit v1 header", + )); + } + if let Some(md5) = self.md5 { + w.write_all(&md5)?; + } else if self.version == 1 { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + "Missing MD5 in NKit v1 header", + )); + } + if let Some(sha1) = self.sha1 { + w.write_all(&sha1)?; + } else if self.version == 1 { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + "Missing SHA1 in NKit v1 header", + )); + } + if let Some(xxh64) = self.xxh64 { + w.write_all(&xxh64.to_be_bytes())?; + } else if self.version == 1 { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + "Missing XXHash64 in NKit header", + )); + } + if let Some(junk_bits) = &self.junk_bits { + junk_bits.write_to(w)?; + } + Ok(()) + } +} + +#[derive(Debug, Clone)] +pub struct JunkBits(Vec); + +impl JunkBits { + pub fn new(block_size: u32) -> Self { Self(vec![0; Self::len(block_size)]) } + + pub fn read_from(reader: &mut R, block_size: u32) -> io::Result + where R: Read + ?Sized { + Ok(Self(read_vec(reader, Self::len(block_size))?)) + } + + pub fn write_to(&self, w: &mut W) -> io::Result<()> + where W: Write + ?Sized { + w.write_all(&self.0) + } + + pub fn set(&mut self, block: u32, is_junk: bool) { + let Some(byte) = self.0.get_mut((block / 8) as usize) else { + return; + }; + if is_junk { + *byte |= 1 << (7 - (block & 7)); + } else { + *byte &= !(1 << (7 - (block & 7))); + } + } + + pub fn get(&self, block: u32) -> bool { + let Some(&byte) = self.0.get((block / 8) as usize) else { + return false; + }; + byte & (1 << (7 - (block & 7))) != 0 + } + + fn len(block_size: u32) -> usize { + DL_DVD_SIZE.div_ceil(block_size as u64).div_ceil(8) as usize } } diff --git a/nod/src/io/split.rs b/nod/src/io/split.rs index c92f3da..7e5b138 100644 --- a/nod/src/io/split.rs +++ b/nod/src/io/split.rs @@ -1,11 +1,12 @@ use std::{ - cmp::min, fs::File, io, io::{BufReader, Read, Seek, SeekFrom}, path::{Path, PathBuf}, }; +use tracing::instrument; + use crate::{ErrorContext, Result, ResultContext}; #[derive(Debug)] @@ -101,10 +102,9 @@ impl SplitFileReader { } pub fn len(&self) -> u64 { self.files.last().map_or(0, |f| f.begin + f.size) } -} -impl Read for SplitFileReader { - fn read(&mut self, buf: &mut [u8]) -> io::Result { + #[instrument(name = "SplitFileReader::check_open_file", skip_all)] + fn check_open_file(&mut self) -> io::Result>>> { if self.open_file.is_none() || !self.open_file.as_ref().unwrap().contains(self.pos) { self.open_file = if let Some(split) = self.files.iter().find(|f| f.contains(self.pos)) { let mut file = BufReader::new(File::open(&split.inner)?); @@ -115,10 +115,18 @@ impl Read for SplitFileReader { None }; } - let Some(split) = self.open_file.as_mut() else { + Ok(self.open_file.as_mut()) + } +} + +impl Read for SplitFileReader { + #[instrument(name = "SplitFileReader::read", skip_all)] + fn read(&mut self, buf: &mut [u8]) -> io::Result { + let pos = self.pos; + let Some(split) = self.check_open_file()? else { return Ok(0); }; - let to_read = min(buf.len(), (split.begin + split.size - self.pos) as usize); + let to_read = buf.len().min((split.begin + split.size - pos) as usize); let read = split.inner.read(&mut buf[..to_read])?; self.pos += read as u64; Ok(read) @@ -126,6 +134,7 @@ impl Read for SplitFileReader { } impl Seek for SplitFileReader { + #[instrument(name = "SplitFileReader::seek", skip_all)] fn seek(&mut self, pos: SeekFrom) -> io::Result { self.pos = match pos { SeekFrom::Start(pos) => pos, diff --git a/nod/src/io/tgc.rs b/nod/src/io/tgc.rs index 0508c9e..822237c 100644 --- a/nod/src/io/tgc.rs +++ b/nod/src/io/tgc.rs @@ -1,19 +1,31 @@ use std::{ io, - io::{Read, Seek, SeekFrom}, - mem::size_of, + io::{BufRead, Read, Seek, SeekFrom}, + sync::Arc, }; -use zerocopy::{big_endian::U32, FromBytes, Immutable, IntoBytes, KnownLayout}; +use bytes::{BufMut, Bytes, BytesMut}; +use zerocopy::{big_endian::U32, FromBytes, FromZeros, Immutable, IntoBytes, KnownLayout}; use crate::{ - disc::SECTOR_SIZE, - io::{ - block::{Block, BlockIO, DiscStream, PartitionInfo, TGC_MAGIC}, - Format, MagicBytes, + build::gc::{insert_junk_data, FileCallback, GCPartitionStream, WriteInfo, WriteKind}, + common::{Compression, Format, MagicBytes, PartitionKind}, + disc::{ + fst::Fst, + gcn::{read_dol, read_fst}, + reader::DiscReader, + writer::{DataCallback, DiscWriter}, + DiscHeader, PartitionHeader, SECTOR_SIZE, }, - util::read::{read_box_slice, read_from}, - DiscHeader, DiscMeta, Error, Node, PartitionHeader, Result, ResultContext, + io::block::{Block, BlockKind, BlockReader, TGC_MAGIC}, + read::{DiscMeta, DiscStream, PartitionOptions, PartitionReader}, + util::{ + align_up_32, array_ref, + read::{read_arc, read_arc_slice, read_from, read_with_zero_fill}, + static_assert, + }, + write::{DiscFinalization, DiscWriterWeight, FormatOptions, ProcessOptions}, + Error, Result, ResultContext, }; /// TGC header (big endian) @@ -46,21 +58,21 @@ struct TGCHeader { banner_offset: U32, /// Size of the banner banner_size: U32, - /// Original user data offset in the GCM - gcm_user_offset: U32, + /// Start of user files in the original GCM + gcm_files_start: U32, } +static_assert!(size_of::() == 0x38); + +const GCM_HEADER_SIZE: usize = 0x100000; + #[derive(Clone)] -pub struct DiscIOTGC { - inner: Box, - stream_len: u64, - header: TGCHeader, - fst: Box<[u8]>, +pub struct BlockReaderTGC { + inner: GCPartitionStream, } -impl DiscIOTGC { - pub fn new(mut inner: Box) -> Result> { - let stream_len = inner.seek(SeekFrom::End(0)).context("Determining stream length")?; +impl BlockReaderTGC { + pub fn new(mut inner: Box) -> Result> { inner.seek(SeekFrom::Start(0)).context("Seeking to start")?; // Read header @@ -68,89 +80,253 @@ impl DiscIOTGC { if header.magic != TGC_MAGIC { return Err(Error::DiscFormat("Invalid TGC magic".to_string())); } + let disc_size = (header.gcm_files_start.get() + header.user_size.get()) as u64; - // Read FST and adjust offsets + // Read disc header and partition header inner - .seek(SeekFrom::Start(header.fst_offset.get() as u64)) - .context("Seeking to TGC FST")?; - let mut fst = read_box_slice(inner.as_mut(), header.fst_size.get() as usize) - .context("Reading TGC FST")?; - let (root_node, _) = Node::ref_from_prefix(&fst) - .map_err(|_| Error::DiscFormat("Invalid TGC FST".to_string()))?; - let node_count = root_node.length() as usize; - let (nodes, _) = <[Node]>::mut_from_prefix_with_elems(&mut fst, node_count) - .map_err(|_| Error::DiscFormat("Invalid TGC FST".to_string()))?; - for node in nodes { + .seek(SeekFrom::Start(header.header_offset.get() as u64)) + .context("Seeking to GCM header")?; + let raw_header = + read_arc::<[u8; GCM_HEADER_SIZE], _>(inner.as_mut()).context("Reading GCM header")?; + + let (disc_header, remain) = DiscHeader::ref_from_prefix(raw_header.as_ref()) + .expect("Invalid disc header alignment"); + let disc_header = disc_header.clone(); + let (partition_header, _) = + PartitionHeader::ref_from_prefix(remain).expect("Invalid partition header alignment"); + let partition_header = partition_header.clone(); + + // Read DOL + inner.seek(SeekFrom::Start(header.dol_offset.get() as u64)).context("Seeking to DOL")?; + let raw_dol = read_arc_slice::(inner.as_mut(), header.dol_size.get() as usize) + .context("Reading DOL")?; + + // Read FST + inner.seek(SeekFrom::Start(header.fst_offset.get() as u64)).context("Seeking to FST")?; + let raw_fst = read_arc_slice::(inner.as_mut(), header.fst_size.get() as usize) + .context("Reading FST")?; + let fst = Fst::new(&raw_fst)?; + + let mut write_info = Vec::with_capacity(5 + fst.num_files()); + write_info.push(WriteInfo { + kind: WriteKind::Static(raw_header, "sys/header.bin"), + size: GCM_HEADER_SIZE as u64, + offset: 0, + }); + write_info.push(WriteInfo { + kind: WriteKind::Static(raw_dol, "sys/main.dol"), + size: header.dol_size.get() as u64, + offset: partition_header.dol_offset(false), + }); + write_info.push(WriteInfo { + kind: WriteKind::Static(raw_fst.clone(), "sys/fst.bin"), + size: header.fst_size.get() as u64, + offset: partition_header.fst_offset(false), + }); + + // Collect files + for (_, node, path) in fst.iter() { + if node.is_dir() { + continue; + } + write_info.push(WriteInfo { + kind: WriteKind::File(path), + size: node.length() as u64, + offset: node.offset(false), + }); + } + write_info.sort_unstable_by(|a, b| a.offset.cmp(&b.offset).then(a.size.cmp(&b.size))); + let write_info = insert_junk_data(write_info, &partition_header); + + let file_callback = FileCallbackTGC::new(inner, raw_fst, header); + let disc_id = *array_ref![disc_header.game_id, 0, 4]; + let disc_num = disc_header.disc_num; + Ok(Box::new(Self { + inner: GCPartitionStream::new( + file_callback, + Arc::from(write_info), + disc_size, + disc_id, + disc_num, + ), + })) + } +} + +impl BlockReader for BlockReaderTGC { + fn read_block(&mut self, out: &mut [u8], sector: u32) -> io::Result { + let count = (out.len() / SECTOR_SIZE) as u32; + self.inner.set_position(sector as u64 * SECTOR_SIZE as u64); + let read = read_with_zero_fill(&mut self.inner, out)?; + Ok(Block::sectors(sector, count, if read == 0 { BlockKind::None } else { BlockKind::Raw })) + } + + fn block_size(&self) -> u32 { SECTOR_SIZE as u32 } + + fn meta(&self) -> DiscMeta { + DiscMeta { format: Format::Tgc, disc_size: Some(self.inner.len()), ..Default::default() } + } +} + +#[derive(Clone)] +struct FileCallbackTGC { + inner: Box, + fst: Arc<[u8]>, + header: TGCHeader, +} + +impl FileCallbackTGC { + fn new(inner: Box, fst: Arc<[u8]>, header: TGCHeader) -> Self { + Self { inner, fst, header } + } +} + +impl FileCallback for FileCallbackTGC { + fn read_file(&mut self, out: &mut [u8], name: &str, offset: u64) -> io::Result<()> { + let fst = Fst::new(&self.fst).map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; + let (_, node) = fst.find(name).ok_or_else(|| { + io::Error::new(io::ErrorKind::NotFound, format!("File not found in FST: {}", name)) + })?; + if !node.is_file() { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + format!("Path is a directory: {}", name), + )); + } + // Calculate file offset in TGC + let file_start = (node.offset(false) as u32 - self.header.gcm_files_start.get()) + + self.header.user_offset.get(); + self.inner.seek(SeekFrom::Start(file_start as u64 + offset))?; + self.inner.read_exact(out)?; + Ok(()) + } +} + +#[derive(Clone)] +pub struct DiscWriterTGC { + inner: Box, + header: TGCHeader, + header_data: Bytes, + output_size: u64, +} + +impl DiscWriterTGC { + pub fn new(reader: DiscReader, options: &FormatOptions) -> Result> { + if options.format != Format::Tgc { + return Err(Error::DiscFormat("Invalid format for TGC writer".to_string())); + } + if options.compression != Compression::None { + return Err(Error::DiscFormat("TGC does not support compression".to_string())); + } + + let mut inner = + reader.open_partition_kind(PartitionKind::Data, &PartitionOptions::default())?; + + // Read GCM header + let mut raw_header = <[u8; GCM_HEADER_SIZE]>::new_box_zeroed()?; + inner.read_exact(raw_header.as_mut()).context("Reading GCM header")?; + let (_, remain) = DiscHeader::ref_from_prefix(raw_header.as_ref()) + .expect("Invalid disc header alignment"); + let (partition_header, _) = + PartitionHeader::ref_from_prefix(remain).expect("Invalid partition header alignment"); + + // Read DOL + let raw_dol = read_dol(inner.as_mut(), partition_header, false)?; + let raw_fst = read_fst(inner.as_mut(), partition_header, false)?; + + // Parse FST + let fst = Fst::new(&raw_fst)?; + let mut gcm_files_start = u32::MAX; + for (_, node, _) in fst.iter() { if node.is_file() { - node.offset = node.offset - header.gcm_user_offset - + (header.user_offset - header.header_offset); + let start = node.offset(false) as u32; + if start < gcm_files_start { + gcm_files_start = start; + } } } - Ok(Box::new(Self { inner, stream_len, header, fst })) + // Layout system files + let gcm_header_offset = SECTOR_SIZE as u32; + let fst_offset = gcm_header_offset + GCM_HEADER_SIZE as u32; + let dol_offset = align_up_32(fst_offset + partition_header.fst_size.get(), 32); + let user_size = + partition_header.user_offset.get() + partition_header.user_size.get() - gcm_files_start; + let user_end = + align_up_32(dol_offset + raw_dol.len() as u32 + user_size, SECTOR_SIZE as u32); + let user_offset = user_end - user_size; + + let header = TGCHeader { + magic: TGC_MAGIC, + version: 0.into(), + header_offset: gcm_header_offset.into(), + header_size: (GCM_HEADER_SIZE as u32).into(), + fst_offset: fst_offset.into(), + fst_size: partition_header.fst_size, + fst_max_size: partition_header.fst_max_size, + dol_offset: dol_offset.into(), + dol_size: (raw_dol.len() as u32).into(), + user_offset: user_offset.into(), + user_size: user_size.into(), + banner_offset: 0.into(), + banner_size: 0.into(), + gcm_files_start: gcm_files_start.into(), + }; + let mut buffer = BytesMut::with_capacity(user_offset as usize); + buffer.put_slice(header.as_bytes()); + buffer.put_bytes(0, gcm_header_offset as usize - buffer.len()); + + // Write GCM header + buffer.put_slice(raw_header.as_ref()); + buffer.put_bytes(0, fst_offset as usize - buffer.len()); + + // Write FST + buffer.put_slice(raw_fst.as_ref()); + buffer.put_bytes(0, dol_offset as usize - buffer.len()); + + // Write DOL + buffer.put_slice(raw_dol.as_ref()); + buffer.put_bytes(0, user_offset as usize - buffer.len()); + + let header_data = buffer.freeze(); + Ok(Box::new(Self { inner, header, header_data, output_size: user_end as u64 })) } } -impl BlockIO for DiscIOTGC { - fn read_block_internal( - &mut self, - out: &mut [u8], - block: u32, - partition: Option<&PartitionInfo>, - ) -> io::Result { - let offset = self.header.header_offset.get() as u64 + block as u64 * SECTOR_SIZE as u64; - if offset >= self.stream_len { - // End of file - return Ok(Block::Zero); +impl DiscWriter for DiscWriterTGC { + fn process( + &self, + data_callback: &mut DataCallback, + _options: &ProcessOptions, + ) -> Result { + let mut data_position = self.header.user_offset.get() as u64; + data_callback(self.header_data.clone(), data_position, self.output_size) + .context("Failed to write TGC header")?; + + // Write user data serially + let mut inner = self.inner.clone(); + inner + .seek(SeekFrom::Start(self.header.gcm_files_start.get() as u64)) + .context("Seeking to GCM files start")?; + loop { + // TODO use DiscReader::fill_buf_internal + let buf = inner + .fill_buf() + .with_context(|| format!("Reading disc data at offset {data_position}"))?; + let len = buf.len(); + if len == 0 { + break; + } + data_position += len as u64; + data_callback(Bytes::copy_from_slice(buf), data_position, self.output_size) + .context("Failed to write disc data")?; + inner.consume(len); } - self.inner.seek(SeekFrom::Start(offset))?; - if offset + SECTOR_SIZE as u64 > self.stream_len { - // If the last block is not a full sector, fill the rest with zeroes - let read = (self.stream_len - offset) as usize; - self.inner.read_exact(&mut out[..read])?; - out[read..].fill(0); - } else { - self.inner.read_exact(out)?; - } - - // Adjust internal GCM header - if block == 0 { - let partition_header = PartitionHeader::mut_from_bytes( - &mut out[size_of::() - ..size_of::() + size_of::()], - ) - .unwrap(); - partition_header.dol_offset = self.header.dol_offset - self.header.header_offset; - partition_header.fst_offset = self.header.fst_offset - self.header.header_offset; - } - - // Copy modified FST to output - if offset + out.len() as u64 > self.header.fst_offset.get() as u64 - && offset < self.header.fst_offset.get() as u64 + self.header.fst_size.get() as u64 - { - let out_offset = (self.header.fst_offset.get() as u64).saturating_sub(offset) as usize; - let fst_offset = offset.saturating_sub(self.header.fst_offset.get() as u64) as usize; - let copy_len = - (out.len() - out_offset).min(self.header.fst_size.get() as usize - fst_offset); - out[out_offset..out_offset + copy_len] - .copy_from_slice(&self.fst[fst_offset..fst_offset + copy_len]); - } - - match partition { - Some(partition) if partition.has_encryption => Ok(Block::PartEncrypted), - _ => Ok(Block::Raw), - } + Ok(DiscFinalization::default()) } - fn block_size_internal(&self) -> u32 { SECTOR_SIZE as u32 } + fn progress_bound(&self) -> u64 { self.output_size } - fn meta(&self) -> DiscMeta { - DiscMeta { - format: Format::Tgc, - lossless: true, - disc_size: Some(self.stream_len - self.header.header_offset.get() as u64), - ..Default::default() - } - } + fn weight(&self) -> DiscWriterWeight { DiscWriterWeight::Light } } diff --git a/nod/src/io/wbfs.rs b/nod/src/io/wbfs.rs index a9c31ac..53b28d1 100644 --- a/nod/src/io/wbfs.rs +++ b/nod/src/io/wbfs.rs @@ -2,17 +2,34 @@ use std::{ io, io::{Read, Seek, SeekFrom}, mem::size_of, + sync::Arc, }; -use zerocopy::{big_endian::*, FromBytes, Immutable, IntoBytes, KnownLayout}; +use bytes::{BufMut, Bytes, BytesMut}; +use zerocopy::{big_endian::*, FromBytes, FromZeros, Immutable, IntoBytes, KnownLayout}; use crate::{ - io::{ - block::{Block, BlockIO, DiscStream, PartitionInfo, WBFS_MAGIC}, - nkit::NKitHeader, - DiscMeta, Format, MagicBytes, + common::{Compression, Format, MagicBytes}, + disc::{ + reader::DiscReader, + writer::{ + check_block, par_process, read_block, BlockProcessor, BlockResult, CheckBlockResult, + DataCallback, DiscWriter, + }, + SECTOR_SIZE, }, - util::read::{read_box_slice, read_from}, + io::{ + block::{Block, BlockKind, BlockReader, WBFS_MAGIC}, + nkit::{JunkBits, NKitHeader}, + }, + read::{DiscMeta, DiscStream}, + util::{ + array_ref, + digest::DigestManager, + lfg::LaggedFibonacci, + read::{read_arc_slice, read_box_slice, read_from}, + }, + write::{DiscFinalization, DiscWriterWeight, FormatOptions, ProcessOptions}, Error, Result, ResultContext, }; @@ -23,7 +40,8 @@ struct WBFSHeader { num_sectors: U32, sector_size_shift: u8, block_size_shift: u8, - _pad: [u8; 2], + version: u8, + _pad: u8, } impl WBFSHeader { @@ -36,19 +54,20 @@ impl WBFSHeader { const DISC_HEADER_SIZE: usize = 0x100; const NUM_WII_SECTORS: u32 = 143432 * 2; // Double layer discs +const NKIT_HEADER_OFFSET: u64 = 0x10000; #[derive(Clone)] -pub struct DiscIOWBFS { +pub struct BlockReaderWBFS { inner: Box, /// WBFS header header: WBFSHeader, /// Map of Wii LBAs to WBFS LBAs - block_map: Box<[U16]>, + block_map: Arc<[U16]>, /// Optional NKit header nkit_header: Option, } -impl DiscIOWBFS { +impl BlockReaderWBFS { pub fn new(mut inner: Box) -> Result> { inner.seek(SeekFrom::Start(0)).context("Seeking to start")?; let header: WBFSHeader = read_from(inner.as_mut()).context("Reading WBFS header")?; @@ -81,39 +100,36 @@ impl DiscIOWBFS { inner .seek(SeekFrom::Start(header.sector_size() as u64 + DISC_HEADER_SIZE as u64)) .context("Seeking to WBFS LBA table")?; // Skip header - let block_map: Box<[U16]> = read_box_slice(inner.as_mut(), header.max_blocks() as usize) + let block_map: Arc<[U16]> = read_arc_slice(inner.as_mut(), header.max_blocks() as usize) .context("Reading WBFS LBA table")?; // Read NKit header if present (always at 0x10000) - inner.seek(SeekFrom::Start(0x10000)).context("Seeking to NKit header")?; + inner.seek(SeekFrom::Start(NKIT_HEADER_OFFSET)).context("Seeking to NKit header")?; let nkit_header = NKitHeader::try_read_from(inner.as_mut(), header.block_size(), true); Ok(Box::new(Self { inner, header, block_map, nkit_header })) } } -impl BlockIO for DiscIOWBFS { - fn read_block_internal( - &mut self, - out: &mut [u8], - block: u32, - partition: Option<&PartitionInfo>, - ) -> io::Result { +impl BlockReader for BlockReaderWBFS { + fn read_block(&mut self, out: &mut [u8], sector: u32) -> io::Result { let block_size = self.header.block_size(); - if block >= self.header.max_blocks() { - return Ok(Block::Zero); + let block_idx = ((sector as u64 * SECTOR_SIZE as u64) / block_size as u64) as u32; + if block_idx >= self.header.max_blocks() { + // Out of bounds + return Ok(Block::new(block_idx, block_size, BlockKind::None)); } // Find the block in the map - let phys_block = self.block_map[block as usize].get(); + let phys_block = self.block_map[block_idx as usize].get(); if phys_block == 0 { // Check if block is junk data - if self.nkit_header.as_ref().and_then(|h| h.is_junk_block(block)).unwrap_or(false) { - return Ok(Block::Junk); + if self.nkit_header.as_ref().and_then(|h| h.is_junk_block(block_idx)).unwrap_or(false) { + return Ok(Block::new(block_idx, block_size, BlockKind::Junk)); } // Otherwise, read zeroes - return Ok(Block::Zero); + return Ok(Block::new(block_idx, block_size, BlockKind::Zero)); } // Read block @@ -121,13 +137,10 @@ impl BlockIO for DiscIOWBFS { self.inner.seek(SeekFrom::Start(block_start))?; self.inner.read_exact(out)?; - match partition { - Some(partition) if partition.has_encryption => Ok(Block::PartEncrypted), - _ => Ok(Block::Raw), - } + Ok(Block::new(block_idx, block_size, BlockKind::Raw)) } - fn block_size_internal(&self) -> u32 { self.header.block_size() } + fn block_size(&self) -> u32 { self.header.block_size() } fn meta(&self) -> DiscMeta { let mut result = DiscMeta { @@ -141,3 +154,228 @@ impl BlockIO for DiscIOWBFS { result } } + +struct BlockProcessorWBFS { + inner: DiscReader, + header: WBFSHeader, + decrypted_block: Box<[u8]>, + lfg: LaggedFibonacci, + disc_id: [u8; 4], + disc_num: u8, +} + +impl Clone for BlockProcessorWBFS { + fn clone(&self) -> Self { + let block_size = self.header.block_size() as usize; + Self { + inner: self.inner.clone(), + header: self.header.clone(), + decrypted_block: <[u8]>::new_box_zeroed_with_elems(block_size).unwrap(), + lfg: LaggedFibonacci::default(), + disc_id: self.disc_id, + disc_num: self.disc_num, + } + } +} + +impl BlockProcessor for BlockProcessorWBFS { + type BlockMeta = CheckBlockResult; + + fn process_block(&mut self, block_idx: u32) -> io::Result> { + let block_size = self.header.block_size() as usize; + let input_position = block_idx as u64 * block_size as u64; + self.inner.seek(SeekFrom::Start(input_position))?; + let (block_data, disc_data) = read_block(&mut self.inner, block_size)?; + + // Check if block is zeroed or junk + let result = match check_block( + disc_data.as_ref(), + &mut self.decrypted_block, + input_position, + self.inner.partitions(), + &mut self.lfg, + self.disc_id, + self.disc_num, + )? { + CheckBlockResult::Normal => { + BlockResult { block_idx, disc_data, block_data, meta: CheckBlockResult::Normal } + } + CheckBlockResult::Zeroed => BlockResult { + block_idx, + disc_data, + block_data: Bytes::new(), + meta: CheckBlockResult::Zeroed, + }, + CheckBlockResult::Junk => BlockResult { + block_idx, + disc_data, + block_data: Bytes::new(), + meta: CheckBlockResult::Junk, + }, + }; + Ok(result) + } +} + +#[derive(Clone)] +pub struct DiscWriterWBFS { + inner: DiscReader, + header: WBFSHeader, + disc_table: Box<[u8]>, + block_count: u16, +} + +pub const DEFAULT_BLOCK_SIZE: u32 = 0x200000; // 2 MiB + +impl DiscWriterWBFS { + pub fn new(mut inner: DiscReader, options: &FormatOptions) -> Result> { + if options.format != Format::Wbfs { + return Err(Error::DiscFormat("Invalid format for WBFS writer".to_string())); + } + if options.compression != Compression::None { + return Err(Error::DiscFormat("WBFS does not support compression".to_string())); + } + let block_size = options.block_size; + if block_size < SECTOR_SIZE as u32 || block_size % SECTOR_SIZE as u32 != 0 { + return Err(Error::DiscFormat("Invalid block size for WBFS".to_string())); + } + let sector_size = 512u32; + + let disc_size = inner.disc_size(); + let block_count = disc_size.div_ceil(block_size as u64); + if block_count > u16::MAX as u64 { + return Err(Error::DiscFormat("Block size too small".to_string())); + } + let block_count = block_count as u16; + + // Create header + let header = WBFSHeader { + magic: WBFS_MAGIC, + num_sectors: 0.into(), // Written during finalization + sector_size_shift: sector_size.trailing_zeros() as u8, + block_size_shift: block_size.trailing_zeros() as u8, + version: 1, + _pad: 0, + }; + + // Create disc table + let mut disc_table = + <[u8]>::new_box_zeroed_with_elems(sector_size as usize - size_of::())?; + disc_table[0] = 1; + + let mut header_size = size_of::(); + header_size += size_of_val(disc_table.as_ref()); + header_size += DISC_HEADER_SIZE; + header_size += header.max_blocks() as usize * size_of::(); + if header_size > block_size as usize { + return Err(Error::Other("WBFS info too large for block".to_string())); + } + + inner.seek(SeekFrom::Start(0)).context("Seeking to start")?; + Ok(Box::new(Self { inner, header, disc_table, block_count })) + } +} + +impl DiscWriter for DiscWriterWBFS { + fn process( + &self, + data_callback: &mut DataCallback, + options: &ProcessOptions, + ) -> Result { + let block_size = self.header.block_size(); + let max_blocks = self.header.max_blocks(); + let mut block_map = <[U16]>::new_box_zeroed_with_elems(max_blocks as usize)?; + + let disc_size = self.inner.disc_size(); + let mut header_data = BytesMut::with_capacity(block_size as usize); + header_data.put_slice(self.header.as_bytes()); + header_data.put_slice(self.disc_table.as_ref()); + header_data.put_slice(&self.inner.header().as_bytes()[..DISC_HEADER_SIZE]); + header_data.put_slice(block_map.as_bytes()); + header_data.resize(block_size as usize, 0); + data_callback(header_data.freeze(), 0, disc_size).context("Failed to write header")?; + + // Determine junk data values + let disc_header = self.inner.header(); + let disc_id = *array_ref![disc_header.game_id, 0, 4]; + let disc_num = disc_header.disc_num; + + // Create hashers + let digest = DigestManager::new(options); + let mut junk_bits = JunkBits::new(block_size); + let mut input_position = 0; + + let mut phys_block = 1; + par_process( + || BlockProcessorWBFS { + inner: self.inner.clone(), + header: self.header.clone(), + decrypted_block: <[u8]>::new_box_zeroed_with_elems(block_size as usize).unwrap(), + lfg: LaggedFibonacci::default(), + disc_id, + disc_num, + }, + self.block_count as u32, + options.processor_threads, + |block| -> Result<()> { + // Update hashers + let disc_data_len = block.disc_data.len() as u64; + digest.send(block.disc_data); + + // Check if block is zeroed or junk + match block.meta { + CheckBlockResult::Normal => { + block_map[block.block_idx as usize] = phys_block.into(); + phys_block += 1; + } + CheckBlockResult::Zeroed => {} + CheckBlockResult::Junk => { + junk_bits.set(block.block_idx, true); + } + } + + input_position += disc_data_len; + data_callback(block.block_data.clone(), input_position, disc_size) + .with_context(|| format!("Failed to write block {}", block.block_idx))?; + Ok(()) + }, + )?; + + // Collect hash results + let digest_results = digest.finish(); + let mut nkit_header = NKitHeader { + version: 2, + size: Some(disc_size), + crc32: None, + md5: None, + sha1: None, + xxh64: None, + junk_bits: Some(junk_bits), + encrypted: true, + }; + nkit_header.apply_digests(&digest_results); + + // Update header + let mut header = self.header.clone(); + header.num_sectors = (((phys_block as u64 * header.block_size() as u64) + / header.sector_size() as u64) as u32) + .into(); + let mut header_data = BytesMut::with_capacity(block_size as usize); + header_data.put_slice(header.as_bytes()); + header_data.put_slice(&self.disc_table); + header_data.put_slice(&self.inner.header().as_bytes()[..DISC_HEADER_SIZE]); + header_data.put_slice(block_map.as_bytes()); + header_data.resize(NKIT_HEADER_OFFSET as usize, 0); + let mut w = header_data.writer(); + nkit_header.write_to(&mut w).context("Writing NKit header")?; + let header_data = w.into_inner().freeze(); + + let mut finalization = DiscFinalization { header: header_data, ..Default::default() }; + finalization.apply_digests(&digest_results); + Ok(finalization) + } + + fn progress_bound(&self) -> u64 { self.inner.disc_size() } + + fn weight(&self) -> DiscWriterWeight { DiscWriterWeight::Medium } +} diff --git a/nod/src/io/wia.rs b/nod/src/io/wia.rs index 0020afb..bcd95c9 100644 --- a/nod/src/io/wia.rs +++ b/nod/src/io/wia.rs @@ -1,31 +1,51 @@ use std::{ + borrow::Cow, io, io::{Read, Seek, SeekFrom}, mem::size_of, + sync::Arc, + time::Instant, }; -use zerocopy::{big_endian::*, FromBytes, Immutable, IntoBytes, KnownLayout}; +use bytes::{Buf, BufMut, Bytes, BytesMut}; +use tracing::{debug, instrument}; +use zerocopy::{big_endian::*, FromBytes, FromZeros, Immutable, IntoBytes, KnownLayout}; use crate::{ + common::{Compression, Format, HashBytes, KeyBytes, MagicBytes}, disc::{ hashes::sha1_hash, - wii::{HASHES_SIZE, SECTOR_DATA_SIZE}, + reader::DiscReader, + wii::SECTOR_DATA_SIZE, + writer::{par_process, read_block, BlockProcessor, BlockResult, DataCallback, DiscWriter}, SECTOR_SIZE, }, io::{ - block::{Block, BlockIO, DiscStream, PartitionInfo, RVZ_MAGIC, WIA_MAGIC}, + block::{Block, BlockKind, BlockReader, RVZ_MAGIC, WIA_MAGIC}, nkit::NKitHeader, - Compression, Format, HashBytes, KeyBytes, MagicBytes, }, - static_assert, + read::{DiscMeta, DiscStream}, util::{ + aes::decrypt_sector_data_b2b, + align_up_32, align_up_64, array_ref, array_ref_mut, + compress::{Compressor, DecompressionKind, Decompressor}, + digest::DigestManager, lfg::LaggedFibonacci, - read::{read_box_slice, read_from, read_u16_be, read_vec}, - take_seek::TakeSeekExt, + read::{read_arc_slice, read_from, read_vec}, + static_assert, }, - DiscMeta, Error, Result, ResultContext, + write::{DiscFinalization, DiscWriterWeight, FormatOptions, ProcessOptions}, + Error, Result, ResultContext, }; +const WIA_VERSION: u32 = 0x01000000; +const WIA_VERSION_WRITE_COMPATIBLE: u32 = 0x01000000; +const WIA_VERSION_READ_COMPATIBLE: u32 = 0x00080000; + +const RVZ_VERSION: u32 = 0x01000000; +const RVZ_VERSION_WRITE_COMPATIBLE: u32 = 0x00030000; +const RVZ_VERSION_READ_COMPATIBLE: u32 = 0x00030000; + /// This struct is stored at offset 0x0 and is 0x48 bytes long. The wit source code says its format /// will never be changed. #[derive(Clone, Debug, PartialEq, FromBytes, IntoBytes, Immutable, KnownLayout)] @@ -69,6 +89,17 @@ impl WIAFileHeader { if self.magic != WIA_MAGIC && self.magic != RVZ_MAGIC { return Err(Error::DiscFormat(format!("Invalid WIA/RVZ magic: {:#X?}", self.magic))); } + // Check version + let is_rvz = self.magic == RVZ_MAGIC; + let version = if is_rvz { RVZ_VERSION } else { WIA_VERSION }; + let version_read_compat = + if is_rvz { RVZ_VERSION_READ_COMPATIBLE } else { WIA_VERSION_READ_COMPATIBLE }; + if version < self.version_compatible.get() || version_read_compat > self.version.get() { + return Err(Error::DiscFormat(format!( + "Unsupported WIA/RVZ version: {:#X}", + self.version.get() + ))); + } // Check file head hash let bytes = self.as_bytes(); verify_hash(&bytes[..bytes.len() - size_of::()], &self.file_head_hash)?; @@ -94,6 +125,19 @@ pub enum DiscKind { Wii, } +impl From for u32 { + fn from(value: DiscKind) -> Self { + match value { + DiscKind::GameCube => 1, + DiscKind::Wii => 2, + } + } +} + +impl From for U32 { + fn from(value: DiscKind) -> Self { u32::from(value).into() } +} + impl TryFrom for DiscKind { type Error = Error; @@ -123,6 +167,23 @@ pub enum WIACompression { Zstandard, } +impl From for u32 { + fn from(value: WIACompression) -> Self { + match value { + WIACompression::None => 0, + WIACompression::Purge => 1, + WIACompression::Bzip2 => 2, + WIACompression::Lzma => 3, + WIACompression::Lzma2 => 4, + WIACompression::Zstandard => 5, + } + } +} + +impl From for U32 { + fn from(value: WIACompression) -> Self { u32::from(value).into() } +} + impl TryFrom for WIACompression { type Error = Error; @@ -218,9 +279,20 @@ pub struct WIADisc { static_assert!(size_of::() == 0xDC); impl WIADisc { - pub fn validate(&self) -> Result<()> { + pub fn validate(&self, is_rvz: bool) -> Result<()> { DiscKind::try_from(self.disc_type.get())?; WIACompression::try_from(self.compression.get())?; + let chunk_size = self.chunk_size.get(); + if is_rvz { + if chunk_size < SECTOR_SIZE as u32 || !chunk_size.is_power_of_two() { + return Err(Error::DiscFormat(format!( + "Invalid RVZ chunk size: {:#X}", + chunk_size + ))); + } + } else if chunk_size < 0x200000 || chunk_size % 0x200000 != 0 { + return Err(Error::DiscFormat(format!("Invalid WIA chunk size: {:#X}", chunk_size))); + } if self.partition_type_size.get() != size_of::() as u32 { return Err(Error::DiscFormat(format!( "WIA/RVZ partition type size is {}, expected {}", @@ -255,10 +327,21 @@ pub struct WIAPartitionData { static_assert!(size_of::() == 0x10); impl WIAPartitionData { - pub fn contains(&self, sector: u32) -> bool { + pub fn start_offset(&self) -> u64 { self.first_sector.get() as u64 * SECTOR_SIZE as u64 } + + pub fn end_offset(&self) -> u64 { + self.start_offset() + self.num_sectors.get() as u64 * SECTOR_SIZE as u64 + } + + pub fn contains_sector(&self, sector: u32) -> bool { let start = self.first_sector.get(); sector >= start && sector < start + self.num_sectors.get() } + + pub fn contains_group(&self, group: u32) -> bool { + let start = self.group_index.get(); + group >= start && group < start + self.num_groups.get() + } } /// This struct is used for keeping track of Wii partition data that on the actual disc is encrypted @@ -319,11 +402,19 @@ impl WIARawData { pub fn end_offset(&self) -> u64 { self.raw_data_offset.get() + self.raw_data_size.get() } - pub fn end_sector(&self) -> u32 { (self.end_offset() / SECTOR_SIZE as u64) as u32 } + pub fn end_sector(&self) -> u32 { + // Round up for unaligned raw data end offsets + self.end_offset().div_ceil(SECTOR_SIZE as u64) as u32 + } - pub fn contains(&self, sector: u32) -> bool { + pub fn contains_sector(&self, sector: u32) -> bool { sector >= self.start_sector() && sector < self.end_sector() } + + pub fn contains_group(&self, group: u32) -> bool { + let start = self.group_index.get(); + group >= start && group < start + self.num_groups.get() + } } /// This struct points directly to the actual disc data, stored compressed. @@ -368,8 +459,10 @@ pub struct RVZGroup { } impl RVZGroup { + #[inline] pub fn data_size(&self) -> u32 { self.data_size_and_flag.get() & 0x7FFFFFFF } + #[inline] pub fn is_compressed(&self) -> bool { self.data_size_and_flag.get() & 0x80000000 != 0 } } @@ -383,6 +476,12 @@ impl From<&WIAGroup> for RVZGroup { } } +impl From<&RVZGroup> for WIAGroup { + fn from(value: &RVZGroup) -> Self { + Self { data_offset: value.data_offset, data_size: value.data_size().into() } + } +} + /// This struct represents a 20-byte difference between the recalculated hash data and the original /// hash data. (See also [WIAExceptionList]) /// @@ -441,77 +540,20 @@ pub struct WIAException { /// end offset of the last [WIAExceptionList] is not evenly divisible by 4, padding is inserted /// after it so that the data afterwards will start at a 4 byte boundary. This padding is not /// inserted for the other compression methods. -type WIAExceptionList = Box<[WIAException]>; +pub type WIAExceptionList = Box<[WIAException]>; -#[derive(Clone)] -pub enum Decompressor { - None, - #[cfg(feature = "compress-bzip2")] - Bzip2, - #[cfg(feature = "compress-lzma")] - Lzma(Box<[u8]>), - #[cfg(feature = "compress-lzma")] - Lzma2(Box<[u8]>), - #[cfg(feature = "compress-zstd")] - Zstandard, -} - -impl Decompressor { - pub fn new(disc: &WIADisc) -> Result { - let _data = &disc.compr_data[..disc.compr_data_len as usize]; - match disc.compression() { - WIACompression::None => Ok(Self::None), - #[cfg(feature = "compress-bzip2")] - WIACompression::Bzip2 => Ok(Self::Bzip2), - #[cfg(feature = "compress-lzma")] - WIACompression::Lzma => Ok(Self::Lzma(Box::from(_data))), - #[cfg(feature = "compress-lzma")] - WIACompression::Lzma2 => Ok(Self::Lzma2(Box::from(_data))), - #[cfg(feature = "compress-zstd")] - WIACompression::Zstandard => Ok(Self::Zstandard), - comp => Err(Error::DiscFormat(format!("Unsupported WIA/RVZ compression: {:?}", comp))), - } - } - - pub fn wrap<'a, R>(&mut self, reader: R) -> io::Result> - where R: Read + 'a { - Ok(match self { - Decompressor::None => Box::new(reader), - #[cfg(feature = "compress-bzip2")] - Decompressor::Bzip2 => Box::new(bzip2::read::BzDecoder::new(reader)), - #[cfg(feature = "compress-lzma")] - Decompressor::Lzma(data) => { - use crate::util::compress::{lzma_props_decode, new_lzma_decoder}; - let options = lzma_props_decode(data)?; - Box::new(new_lzma_decoder(reader, &options)?) - } - #[cfg(feature = "compress-lzma")] - Decompressor::Lzma2(data) => { - use crate::util::compress::{lzma2_props_decode, new_lzma2_decoder}; - let options = lzma2_props_decode(data)?; - Box::new(new_lzma2_decoder(reader, &options)?) - } - #[cfg(feature = "compress-zstd")] - Decompressor::Zstandard => Box::new(zstd::stream::Decoder::new(reader)?), - }) - } -} - -pub struct DiscIOWIA { +pub struct BlockReaderWIA { inner: Box, header: WIAFileHeader, disc: WIADisc, - partitions: Box<[WIAPartition]>, - raw_data: Box<[WIARawData]>, - groups: Box<[RVZGroup]>, + partitions: Arc<[WIAPartition]>, + raw_data: Arc<[WIARawData]>, + groups: Arc<[RVZGroup]>, nkit_header: Option, decompressor: Decompressor, - group: u32, - group_data: Vec, - exception_lists: Vec, } -impl Clone for DiscIOWIA { +impl Clone for BlockReaderWIA { fn clone(&self) -> Self { Self { inner: self.inner.clone(), @@ -522,9 +564,6 @@ impl Clone for DiscIOWIA { groups: self.groups.clone(), nkit_header: self.nkit_header.clone(), decompressor: self.decompressor.clone(), - group: u32::MAX, - group_data: Vec::new(), - exception_lists: Vec::new(), } } } @@ -544,7 +583,7 @@ fn verify_hash(buf: &[u8], expected: &HashBytes) -> Result<()> { Ok(()) } -impl DiscIOWIA { +impl BlockReaderWIA { pub fn new(mut inner: Box) -> Result> { // Load & verify file header inner.seek(SeekFrom::Start(0)).context("Seeking to start")?; @@ -552,7 +591,7 @@ impl DiscIOWIA { read_from(inner.as_mut()).context("Reading WIA/RVZ file header")?; header.validate()?; let is_rvz = header.is_rvz(); - // log::debug!("Header: {:?}", header); + debug!("Header: {:?}", header); // Load & verify disc header let mut disc_buf: Vec = read_vec(inner.as_mut(), header.disc_size.get() as usize) @@ -560,16 +599,8 @@ impl DiscIOWIA { verify_hash(&disc_buf, &header.disc_hash)?; disc_buf.resize(size_of::(), 0); let disc = WIADisc::read_from_bytes(disc_buf.as_slice()).unwrap(); - disc.validate()?; - // if !options.rebuild_hashes { - // // If we're not rebuilding hashes, disable partition hashes in disc header - // disc.disc_head[0x60] = 1; - // } - // if !options.rebuild_encryption { - // // If we're not re-encrypting, disable partition encryption in disc header - // disc.disc_head[0x61] = 1; - // } - // log::debug!("Disc: {:?}", disc); + disc.validate(is_rvz)?; + debug!("Disc: {:?}", disc); // Read NKit header if present (after disc header) let nkit_header = NKitHeader::try_read_from(inner.as_mut(), disc.chunk_size.get(), false); @@ -578,37 +609,43 @@ impl DiscIOWIA { inner .seek(SeekFrom::Start(disc.partition_offset.get())) .context("Seeking to WIA/RVZ partition headers")?; - let partitions: Box<[WIAPartition]> = - read_box_slice(inner.as_mut(), disc.num_partitions.get() as usize) + let partitions: Arc<[WIAPartition]> = + read_arc_slice(inner.as_mut(), disc.num_partitions.get() as usize) .context("Reading WIA/RVZ partition headers")?; verify_hash(partitions.as_ref().as_bytes(), &disc.partition_hash)?; - // log::debug!("Partitions: {:?}", partitions); + debug!("Partitions: {:?}", partitions); // Create decompressor - let mut decompressor = Decompressor::new(&disc)?; + let mut decompressor = Decompressor::new(DecompressionKind::from_wia(&disc)?); // Load raw data headers - let raw_data: Box<[WIARawData]> = { + let raw_data: Arc<[WIARawData]> = { inner .seek(SeekFrom::Start(disc.raw_data_offset.get())) .context("Seeking to WIA/RVZ raw data headers")?; let mut reader = decompressor + .kind .wrap(inner.as_mut().take(disc.raw_data_size.get() as u64)) .context("Creating WIA/RVZ decompressor")?; - read_box_slice(&mut reader, disc.num_raw_data.get() as usize) + read_arc_slice(&mut reader, disc.num_raw_data.get() as usize) .context("Reading WIA/RVZ raw data headers")? }; // Validate raw data alignment for (idx, rd) in raw_data.iter().enumerate() { let start_offset = rd.start_offset(); let end_offset = rd.end_offset(); - if (start_offset % SECTOR_SIZE as u64) != 0 || (end_offset % SECTOR_SIZE as u64) != 0 { + let is_last = idx == raw_data.len() - 1; + if (start_offset % SECTOR_SIZE as u64) != 0 + // Allow raw data end to be unaligned if it's the last + || (!is_last && (end_offset % SECTOR_SIZE as u64) != 0) + { return Err(Error::DiscFormat(format!( "WIA/RVZ raw data {} not aligned to sector: {:#X}..{:#X}", idx, start_offset, end_offset ))); } } + debug!("Num raw data: {}", raw_data.len()); // log::debug!("Raw data: {:?}", raw_data); // Load group headers @@ -617,19 +654,21 @@ impl DiscIOWIA { .seek(SeekFrom::Start(disc.group_offset.get())) .context("Seeking to WIA/RVZ group headers")?; let mut reader = decompressor + .kind .wrap(inner.as_mut().take(disc.group_size.get() as u64)) .context("Creating WIA/RVZ decompressor")?; if is_rvz { - read_box_slice(&mut reader, disc.num_groups.get() as usize) + read_arc_slice(&mut reader, disc.num_groups.get() as usize) .context("Reading WIA/RVZ group headers")? } else { - let wia_groups: Box<[WIAGroup]> = - read_box_slice(&mut reader, disc.num_groups.get() as usize) + let wia_groups: Arc<[WIAGroup]> = + read_arc_slice(&mut reader, disc.num_groups.get() as usize) .context("Reading WIA/RVZ group headers")?; wia_groups.iter().map(RVZGroup::from).collect() } - // log::debug!("Groups: {:?}", groups); }; + debug!("Num groups: {}", groups.len()); + // std::fs::write("groups.txt", format!("Groups: {:#?}", groups)).unwrap(); Ok(Box::new(Self { header, @@ -640,260 +679,220 @@ impl DiscIOWIA { inner, nkit_header, decompressor, - group: u32::MAX, - group_data: vec![], - exception_lists: vec![], })) } } -fn read_exception_lists( - reader: &mut R, - in_partition: bool, +fn read_exception_lists( + bytes: &mut Bytes, chunk_size: u32, -) -> io::Result> -where - R: Read + ?Sized, -{ - if !in_partition { - return Ok(vec![]); - } - + align: bool, +) -> io::Result> { + let initial_remaining = bytes.remaining(); // One exception list for each 2 MiB of data let num_exception_list = (chunk_size as usize).div_ceil(0x200000); - // log::debug!("Num exception list: {:?}", num_exception_list); - let mut exception_lists = Vec::with_capacity(num_exception_list); - for i in 0..num_exception_list { - let num_exceptions = read_u16_be(reader)?; - let exceptions: Box<[WIAException]> = read_box_slice(reader, num_exceptions as usize)?; - if !exceptions.is_empty() { - log::debug!("Exception list {}: {:?}", i, exceptions); + let mut exception_lists = vec![WIAExceptionList::default(); num_exception_list]; + for exception_list in exception_lists.iter_mut() { + if bytes.remaining() < 2 { + return Err(io::Error::new( + io::ErrorKind::UnexpectedEof, + "Reading WIA/RVZ exception list count", + )); + } + let num_exceptions = bytes.get_u16(); + if bytes.remaining() < num_exceptions as usize * size_of::() { + return Err(io::Error::new( + io::ErrorKind::UnexpectedEof, + "Reading WIA/RVZ exception list", + )); + } + let mut exceptions = + <[WIAException]>::new_box_zeroed_with_elems(num_exceptions as usize).unwrap(); + bytes.copy_to_slice(exceptions.as_mut_bytes()); + if !exceptions.is_empty() { + debug!("Exception list: {:?}", exceptions); + } + *exception_list = exceptions; + } + if align { + let rem = (initial_remaining - bytes.remaining()) % 4; + if rem != 0 { + bytes.advance(4 - rem); } - exception_lists.push(exceptions); } Ok(exception_lists) } -impl BlockIO for DiscIOWIA { - fn read_block_internal( - &mut self, - out: &mut [u8], - sector: u32, - partition: Option<&PartitionInfo>, - ) -> io::Result { +impl BlockReader for BlockReaderWIA { + #[instrument(name = "BlockReaderWIA::read_block", skip_all)] + fn read_block(&mut self, out: &mut [u8], sector: u32) -> io::Result { let chunk_size = self.disc.chunk_size.get(); let sectors_per_chunk = chunk_size / SECTOR_SIZE as u32; - let in_partition = partition.is_some_and(|info| info.has_encryption); - let (group_index, group_sector, partition_offset) = if in_partition { - let partition = partition.unwrap(); - - // Find the partition - let Some(wia_part) = self.partitions.get(partition.index) else { - return Err(io::Error::new( - io::ErrorKind::InvalidInput, - format!("Couldn't find WIA/RVZ partition index {}", partition.index), - )); + let (group_index, group_sector, end_offset, partition_offset, in_partition) = + if let Some((p, pd)) = self.partitions.iter().find_map(|p| { + p.partition_data.iter().find_map(|pd| pd.contains_sector(sector).then_some((p, pd))) + }) { + let pd_group_idx = (sector - pd.first_sector.get()) / sectors_per_chunk; + ( + pd.group_index.get() + pd_group_idx, + pd.first_sector.get() + pd_group_idx * sectors_per_chunk, + pd.end_offset(), + // Data offset within partition data (from start of partition) + (sector - p.partition_data[0].first_sector.get()) as u64 + * SECTOR_DATA_SIZE as u64, + true, + ) + } else if let Some(rd) = self.raw_data.iter().find(|rd| rd.contains_sector(sector)) { + let rd_group_idx = (sector - rd.start_sector()) / sectors_per_chunk; + ( + rd.group_index.get() + rd_group_idx, + rd.start_sector() + rd_group_idx * sectors_per_chunk, + rd.end_offset(), + 0, // Always on a sector boundary + false, + ) + } else { + return Ok(Block::sector(sector, BlockKind::None)); }; - // Sanity check partition sector ranges - let wia_part_start = wia_part.partition_data[0].first_sector.get(); - let wia_part_end = wia_part.partition_data[1].first_sector.get() - + wia_part.partition_data[1].num_sectors.get(); - if partition.data_start_sector != wia_part_start - || partition.data_end_sector != wia_part_end - { - return Err(io::Error::new( - io::ErrorKind::InvalidInput, - format!( - "WIA/RVZ partition sector mismatch: {}..{} != {}..{}", - wia_part_start, - wia_part_end, - partition.data_start_sector, - partition.data_end_sector - ), - )); - } - - // Find the partition data for the sector - let Some(pd) = wia_part.partition_data.iter().find(|pd| pd.contains(sector)) else { - return Err(io::Error::new( - io::ErrorKind::InvalidInput, - format!("Couldn't find WIA/RVZ partition data for sector {}", sector), - )); - }; - - // Find the group index for the sector - let part_data_sector = sector - pd.first_sector.get(); - let part_group_index = part_data_sector / sectors_per_chunk; - let part_group_sector = part_data_sector % sectors_per_chunk; - if part_group_index >= pd.num_groups.get() { - return Err(io::Error::new( - io::ErrorKind::InvalidInput, - format!( - "WIA/RVZ partition group index out of range: {} >= {}", - part_group_index, - pd.num_groups.get() - ), - )); - } - - // Calculate the group offset within the partition - let part_group_offset = - (((part_group_index * sectors_per_chunk) + pd.first_sector.get()) - - wia_part.partition_data[0].first_sector.get()) as u64 - * SECTOR_DATA_SIZE as u64; - (pd.group_index.get() + part_group_index, part_group_sector, part_group_offset) + // Round up to handle unaligned raw data end offset + let end_sector = end_offset.div_ceil(SECTOR_SIZE as u64) as u32; + let group_sectors = (end_sector - group_sector).min(sectors_per_chunk); + let group_size = if in_partition { + // Partition data does not include hashes + group_sectors * SECTOR_DATA_SIZE as u32 } else { - let Some(rd) = self.raw_data.iter().find(|d| d.contains(sector)) else { - return Err(io::Error::new( - io::ErrorKind::InvalidInput, - format!("Couldn't find WIA/RVZ raw data for sector {}", sector), - )); - }; - - // Find the group index for the sector - let data_sector = sector - (rd.raw_data_offset.get() / SECTOR_SIZE as u64) as u32; - let group_index = data_sector / sectors_per_chunk; - let group_sector = data_sector % sectors_per_chunk; - if group_index >= rd.num_groups.get() { - return Err(io::Error::new( - io::ErrorKind::InvalidInput, - format!( - "WIA/RVZ raw data group index out of range: {} >= {}", - group_index, - rd.num_groups.get() - ), - )); - } - - (rd.group_index.get() + group_index, group_sector, 0) + (group_sectors as u64 * SECTOR_SIZE as u64) + // Last group might be smaller than a sector + .min(end_offset - (group_sector as u64 * SECTOR_SIZE as u64)) as u32 }; + if group_size as usize > out.len() { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + format!( + "Output buffer too small for WIA/RVZ group data: {} < {}", + out.len(), + group_size + ), + )); + } // Fetch the group let Some(group) = self.groups.get(group_index as usize) else { return Err(io::Error::new( - io::ErrorKind::InvalidInput, + io::ErrorKind::InvalidData, format!("Couldn't find WIA/RVZ group index {}", group_index), )); }; // Special case for all-zero data if group.data_size() == 0 { - self.exception_lists.clear(); - return Ok(Block::Zero); + return Ok(Block::sectors(group_sector, group_sectors, BlockKind::Zero)); } - // Read group data if necessary - if group_index != self.group { - let group_data_size = if in_partition { - // Within a partition, hashes are excluded from the data size - (sectors_per_chunk * SECTOR_DATA_SIZE as u32) as usize - } else { - chunk_size as usize - }; - self.group_data = Vec::with_capacity(group_data_size); - let group_data_start = group.data_offset.get() as u64 * 4; - self.inner.seek(SeekFrom::Start(group_data_start))?; + let group_data_start = group.data_offset.get() as u64 * 4; + let mut group_data = BytesMut::zeroed(group.data_size() as usize); + let io_start = Instant::now(); + self.inner.seek(SeekFrom::Start(group_data_start))?; + self.inner.read_exact(group_data.as_mut())?; + let io_duration = io_start.elapsed(); + let mut group_data = group_data.freeze(); - let mut reader = (&mut self.inner).take_seek(group.data_size() as u64); - let uncompressed_exception_lists = - matches!(self.disc.compression(), WIACompression::None | WIACompression::Purge) - || !group.is_compressed(); - if uncompressed_exception_lists { - self.exception_lists = - read_exception_lists(&mut reader, in_partition, self.disc.chunk_size.get())?; - // Align to 4 - let rem = reader.stream_position()? % 4; - if rem != 0 { - reader.seek(SeekFrom::Current((4 - rem) as i64))?; - } - } - let mut reader: Box = if group.is_compressed() { - self.decompressor.wrap(reader)? - } else { - Box::new(reader) - }; - if !uncompressed_exception_lists { - self.exception_lists = read_exception_lists( - reader.as_mut(), - in_partition, - self.disc.chunk_size.get(), - )?; - } - - if group.rvz_packed_size.get() > 0 { - // Decode RVZ packed data - let mut lfg = LaggedFibonacci::default(); - loop { - let mut size_bytes = [0u8; 4]; - match reader.read_exact(&mut size_bytes) { - Ok(_) => {} - Err(e) if e.kind() == io::ErrorKind::UnexpectedEof => break, - Err(e) => { - return Err(io::Error::new(e.kind(), "Failed to read RVZ packed size")); - } - } - let size = u32::from_be_bytes(size_bytes); - let cur_data_len = self.group_data.len(); - if size & 0x80000000 != 0 { - // Junk data - let size = size & 0x7FFFFFFF; - lfg.init_with_reader(reader.as_mut())?; - lfg.skip( - ((partition_offset + cur_data_len as u64) % SECTOR_SIZE as u64) - as usize, - ); - self.group_data.resize(cur_data_len + size as usize, 0); - lfg.fill(&mut self.group_data[cur_data_len..]); - } else { - // Real data - self.group_data.resize(cur_data_len + size as usize, 0); - reader.read_exact(&mut self.group_data[cur_data_len..])?; - } - } - } else { - // Read and decompress data - reader.read_to_end(&mut self.group_data)?; - } - - self.group = group_index; + let uncompressed_exception_lists = + matches!(self.disc.compression(), WIACompression::None | WIACompression::Purge) + || !group.is_compressed(); + let mut exception_lists = vec![]; + if in_partition && uncompressed_exception_lists { + exception_lists = read_exception_lists(&mut group_data, chunk_size, true)?; } - - // Read sector from cached group data - if in_partition { - let sector_data_start = group_sector as usize * SECTOR_DATA_SIZE; - out[..HASHES_SIZE].fill(0); - out[HASHES_SIZE..SECTOR_SIZE].copy_from_slice( - &self.group_data[sector_data_start..sector_data_start + SECTOR_DATA_SIZE], - ); - Ok(Block::PartDecrypted { has_hashes: false }) + let mut decompressed = if group.is_compressed() { + let mut decompressed = BytesMut::zeroed(chunk_size as usize); + let len = self.decompressor.decompress(group_data.as_ref(), decompressed.as_mut())?; + decompressed.truncate(len); + decompressed.freeze() } else { - let sector_data_start = group_sector as usize * SECTOR_SIZE; - out.copy_from_slice( - &self.group_data[sector_data_start..sector_data_start + SECTOR_SIZE], - ); - Ok(Block::Raw) + group_data + }; + if in_partition && !uncompressed_exception_lists { + exception_lists = read_exception_lists(&mut decompressed, chunk_size, false)?; } + + if group.rvz_packed_size.get() > 0 { + // Decode RVZ packed data + let mut read = 0; + let mut lfg = LaggedFibonacci::default(); + while decompressed.remaining() >= 4 { + let size = decompressed.get_u32(); + if size & 0x80000000 != 0 { + // Junk data + let size = size & 0x7FFFFFFF; + lfg.init_with_buf(&mut decompressed)?; + lfg.skip(((partition_offset + read as u64) % SECTOR_SIZE as u64) as usize); + lfg.fill(&mut out[read..read + size as usize]); + read += size as usize; + } else { + // Real data + decompressed.copy_to_slice(&mut out[read..read + size as usize]); + read += size as usize; + } + } + if read != group_size as usize { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + format!("RVZ packed data size mismatch: {} != {}", read, group_size), + )); + } + } else { + // Read and decompress data + if decompressed.remaining() != group_size as usize { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + format!( + "WIA/RVZ group {} data size mismatch: {} != {}", + group_index, + decompressed.remaining(), + group_size + ), + )); + } + decompressed.copy_to_slice(&mut out[..group_size as usize]); + } + if !decompressed.is_empty() { + return Err(io::Error::new(io::ErrorKind::Other, "Failed to consume all group data")); + } + + // Read first 0x80 bytes from disc header + if group_sector == 0 { + *array_ref_mut![out, 0, DISC_HEAD_SIZE] = self.disc.disc_head; + } + + let mut block = if in_partition { + let mut block = Block::sectors(group_sector, group_sectors, BlockKind::PartDecrypted { + hash_block: false, + }); + block.hash_exceptions = exception_lists.into_boxed_slice(); + block + } else { + Block::sectors(group_sector, group_sectors, BlockKind::Raw) + }; + block.io_duration = Some(io_duration); + Ok(block) } - fn block_size_internal(&self) -> u32 { - // WIA/RVZ chunks aren't always the full size, so we'll consider the - // block size to be one sector, and handle the complexity ourselves. - SECTOR_SIZE as u32 - } + fn block_size(&self) -> u32 { self.disc.chunk_size.get() } fn meta(&self) -> DiscMeta { + let level = self.disc.compression_level.get(); let mut result = DiscMeta { format: if self.header.is_rvz() { Format::Rvz } else { Format::Wia }, block_size: Some(self.disc.chunk_size.get()), compression: match self.disc.compression() { - WIACompression::None => Compression::None, - WIACompression::Purge => Compression::Purge, - WIACompression::Bzip2 => Compression::Bzip2, - WIACompression::Lzma => Compression::Lzma, - WIACompression::Lzma2 => Compression::Lzma2, - WIACompression::Zstandard => Compression::Zstandard, + WIACompression::None | WIACompression::Purge => Compression::None, + WIACompression::Bzip2 => Compression::Bzip2(level as u8), + WIACompression::Lzma => Compression::Lzma(level as u8), + WIACompression::Lzma2 => Compression::Lzma2(level as u8), + WIACompression::Zstandard => Compression::Zstandard(level as i8), }, decrypted: true, needs_hash_recovery: true, @@ -907,3 +906,637 @@ impl BlockIO for DiscIOWIA { result } } + +struct BlockProcessorWIA { + inner: DiscReader, + header: WIAFileHeader, + disc: WIADisc, + partitions: Arc<[WIAPartition]>, + raw_data: Arc<[WIARawData]>, + compressor: Compressor, + // lfg: LaggedFibonacci, +} + +impl Clone for BlockProcessorWIA { + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + header: self.header.clone(), + disc: self.disc.clone(), + partitions: self.partitions.clone(), + raw_data: self.raw_data.clone(), + compressor: self.compressor.clone(), + // lfg: LaggedFibonacci::default(), + } + } +} + +#[allow(unused)] +struct BlockMetaWIA { + is_compressed: bool, + is_rvz_packed: bool, + data_size: u32, // Not aligned +} + +impl BlockProcessor for BlockProcessorWIA { + type BlockMeta = BlockMetaWIA; + + #[instrument(name = "BlockProcessorWIA::process_block", skip_all)] + fn process_block(&mut self, group_idx: u32) -> io::Result> { + let is_rvz = self.header.is_rvz(); + let chunk_size = self.disc.chunk_size.get() as u64; + let (group_start, section_end, key) = if let Some((p, pd)) = + self.partitions.iter().find_map(|p| { + p.partition_data + .iter() + .find_map(|pd| pd.contains_group(group_idx).then_some((p, pd))) + }) { + let part_group_offset = (group_idx - pd.group_index.get()) as u64 * chunk_size; + (pd.start_offset() + part_group_offset, pd.end_offset(), Some(p.partition_key)) + } else if let Some(rd) = self.raw_data.iter().find(|rd| rd.contains_group(group_idx)) { + ( + rd.start_offset() + (group_idx - rd.group_index.get()) as u64 * chunk_size, + rd.end_offset(), + None, + ) + } else { + return Err(io::Error::new( + io::ErrorKind::Other, + format!("Couldn't find partition or raw data for group {}", group_idx), + )); + }; + + let group_size = (section_end - group_start).min(chunk_size) as usize; + self.inner.seek(SeekFrom::Start(group_start))?; + let (_, disc_data) = read_block(&mut self.inner, group_size)?; + + // Decrypt group and calculate hash exceptions + let (block_data, data_size, exceptions_end) = if let Some(key) = key { + if disc_data.len() % SECTOR_SIZE != 0 { + return Err(io::Error::new( + io::ErrorKind::Other, + "Partition group size not aligned to sector", + )); + } + let num_exception_list = (chunk_size as usize).div_ceil(0x200000); // 2 MiB + let mut buf = BytesMut::with_capacity(chunk_size as usize); + for _ in 0..num_exception_list { + buf.put_u16(0); // num_exceptions + } + + // Align to 4 after exception lists. + // We'll "undo" this for compression, see below. + let exceptions_end = buf.len(); + let rem = buf.len() % 4; + if rem != 0 { + buf.put_bytes(0, 4 - rem); + } + + for i in 0..disc_data.len() / SECTOR_SIZE { + let offset = buf.len(); + buf.resize(offset + SECTOR_DATA_SIZE, 0); + decrypt_sector_data_b2b( + array_ref![disc_data, i * SECTOR_SIZE, SECTOR_SIZE], + array_ref_mut![buf, offset, SECTOR_DATA_SIZE], + &key, + ); + // TODO hash exceptions + } + + // Use pre-alignment for data size + let data_size = buf.len() as u32; + // Align to 4 + let rem = buf.len() % 4; + if rem != 0 { + buf.put_bytes(0, 4 - rem); + } + (buf.freeze(), data_size, exceptions_end) + } else { + if disc_data.len() % 4 != 0 { + return Err(io::Error::new(io::ErrorKind::Other, "Raw data size not aligned to 4")); + } + (disc_data.clone(), disc_data.len() as u32, 0) + }; + + // Compress group + let buf = &block_data[..data_size as usize]; + if buf.iter().all(|&b| b == 0) { + // Skip empty group + return Ok(BlockResult { + block_idx: group_idx, + disc_data, + block_data: Bytes::new(), + meta: BlockMetaWIA { is_compressed: false, is_rvz_packed: false, data_size: 0 }, + }); + } + if self.compressor.kind != Compression::None { + let rem = exceptions_end % 4; + let compressed = if rem != 0 { + // Annoyingly, hash exceptions are aligned to 4 bytes _only if_ they're uncompressed. + // We need to create an entirely separate buffer _without_ the alignment for + // compression. If we end up writing the uncompressed data, we'll use the original, + // aligned buffer. + let pad = 4 - rem; + let mut buf = <[u8]>::new_box_zeroed_with_elems(data_size as usize - pad).unwrap(); + buf[..exceptions_end].copy_from_slice(&block_data[..exceptions_end]); + buf[exceptions_end..].copy_from_slice(&block_data[exceptions_end + pad..]); + self.compressor.compress(buf.as_ref()) + } else { + self.compressor.compress(buf) + } + .map_err(|e| { + io::Error::new(io::ErrorKind::Other, format!("Failed to compress group: {}", e)) + })?; + if compressed { + let compressed_size = self.compressor.buffer.len(); + // For WIA, we must always store compressed data. + // For RVZ, only store compressed data if it's smaller than uncompressed. + if !is_rvz || align_up_32(compressed_size as u32, 4) < data_size { + let rem = compressed_size % 4; + if rem != 0 { + // Align to 4 + self.compressor.buffer.resize(compressed_size + (4 - rem), 0); + } + let block_data = Bytes::copy_from_slice(self.compressor.buffer.as_slice()); + return Ok(BlockResult { + block_idx: group_idx, + disc_data, + block_data, + meta: BlockMetaWIA { + is_compressed: true, + is_rvz_packed: false, + data_size: compressed_size as u32, + }, + }); + } + } else if !is_rvz { + return Err(io::Error::new( + io::ErrorKind::Other, + format!( + "Failed to compress group {}: len {}, capacity {}", + group_idx, + self.compressor.buffer.len(), + self.compressor.buffer.capacity() + ), + )); + } + } + + // Store uncompressed group + Ok(BlockResult { + block_idx: group_idx, + disc_data, + block_data, + meta: BlockMetaWIA { is_compressed: false, is_rvz_packed: false, data_size }, + }) + } +} + +#[allow(unused)] +fn try_rvz_pack(_data: &[u8]) -> bool { todo!("RVZ packing") } + +#[derive(Clone)] +pub struct DiscWriterWIA { + inner: DiscReader, + header: WIAFileHeader, + disc: WIADisc, + partitions: Arc<[WIAPartition]>, + raw_data: Arc<[WIARawData]>, + group_count: u32, + data_start: u32, + is_rvz: bool, + compression: Compression, + initial_header_data: Bytes, // TODO remove +} + +#[inline] +fn partition_offset_to_raw(partition_offset: u64) -> u64 { + (partition_offset / SECTOR_DATA_SIZE as u64) * SECTOR_SIZE as u64 +} + +pub const RVZ_DEFAULT_CHUNK_SIZE: u32 = 0x20000; // 128 KiB +pub const WIA_DEFAULT_CHUNK_SIZE: u32 = 0x200000; // 2 MiB + +// Level 0 will be converted to the default level in [`Compression::validate_level`] +pub const RVZ_DEFAULT_COMPRESSION: Compression = Compression::Zstandard(0); +pub const WIA_DEFAULT_COMPRESSION: Compression = Compression::Lzma(0); + +impl DiscWriterWIA { + pub fn new(inner: DiscReader, options: &FormatOptions) -> Result> { + let is_rvz = options.format == Format::Rvz; + let chunk_size = options.block_size; + + let disc_header = inner.header(); + let disc_size = inner.disc_size(); + + let mut num_partitions = 0; + let mut num_raw_data = 1; + let partition_info = inner.partitions(); + for partition in partition_info { + if !partition.has_hashes { + continue; + } + num_partitions += 1; + num_raw_data += 1; + } + // println!("Num partitions: {}", num_partitions); + // println!("Num raw data: {}", num_raw_data); + + // Write header + let header = WIAFileHeader { + magic: if is_rvz { RVZ_MAGIC } else { WIA_MAGIC }, + version: if is_rvz { RVZ_VERSION } else { WIA_VERSION }.into(), + version_compatible: if is_rvz { + RVZ_VERSION_WRITE_COMPATIBLE + } else { + WIA_VERSION_WRITE_COMPATIBLE + } + .into(), + disc_size: (size_of::() as u32).into(), + disc_hash: Default::default(), + iso_file_size: disc_size.into(), + wia_file_size: Default::default(), + file_head_hash: Default::default(), + }; + let mut header_data = BytesMut::new(); + header_data.put_slice(header.as_bytes()); + + let (compression, level) = match compression_to_wia(options.compression) { + Some(v) => v, + None => { + return Err(Error::Other(format!( + "Unsupported compression for WIA/RVZ: {}", + options.compression + ))) + } + }; + let compr_data = compr_data(options.compression).context("Building compression data")?; + let mut disc = WIADisc { + disc_type: if disc_header.is_wii() { DiscKind::Wii } else { DiscKind::GameCube }.into(), + compression: compression.into(), + compression_level: level.into(), + chunk_size: chunk_size.into(), + disc_head: *array_ref![disc_header.as_bytes(), 0, DISC_HEAD_SIZE], + num_partitions: num_partitions.into(), + partition_type_size: (size_of::() as u32).into(), + partition_offset: Default::default(), + partition_hash: Default::default(), + num_raw_data: num_raw_data.into(), + raw_data_offset: Default::default(), + raw_data_size: Default::default(), + num_groups: Default::default(), + group_offset: Default::default(), + group_size: Default::default(), + compr_data_len: compr_data.len() as u8, + compr_data: Default::default(), + }; + disc.compr_data[..compr_data.len()].copy_from_slice(compr_data.as_ref()); + disc.validate(is_rvz)?; + header_data.put_slice(disc.as_bytes()); + + let nkit_header = NKitHeader { + version: 2, + size: Some(disc_size), + crc32: Some(Default::default()), + md5: Some(Default::default()), + sha1: Some(Default::default()), + xxh64: Some(Default::default()), + junk_bits: None, + encrypted: false, + }; + let mut w = header_data.writer(); + nkit_header.write_to(&mut w).context("Writing NKit header")?; + let mut header_data = w.into_inner(); + + let mut partitions = <[WIAPartition]>::new_box_zeroed_with_elems(num_partitions as usize)?; + let mut raw_data = <[WIARawData]>::new_box_zeroed_with_elems(num_raw_data as usize)?; + + let mut raw_data_idx = 0; + let mut group_idx = 0; + for (partition, wia_partition) in + partition_info.iter().filter(|p| p.has_hashes).zip(partitions.iter_mut()) + { + let partition_start = partition.data_start_sector as u64 * SECTOR_SIZE as u64; + let partition_end = partition.data_end_sector as u64 * SECTOR_SIZE as u64; + + let partition_header = partition.partition_header.as_ref(); + let management_data_end = align_up_64( + partition_header.fst_offset(true) + partition_header.fst_size(true), + 0x200000, // Align to 2 MiB + ); + let management_end_sector = ((partition_start + + partition_offset_to_raw(management_data_end)) + .min(partition_end) + / SECTOR_SIZE as u64) as u32; + + { + let cur_raw_data = &mut raw_data[raw_data_idx]; + let raw_data_size = partition_start - cur_raw_data.raw_data_offset.get(); + let raw_data_groups = raw_data_size.div_ceil(chunk_size as u64) as u32; + cur_raw_data.raw_data_size = raw_data_size.into(); + cur_raw_data.group_index = group_idx.into(); + cur_raw_data.num_groups = raw_data_groups.into(); + group_idx += raw_data_groups; + raw_data_idx += 1; + } + + wia_partition.partition_key = partition.key; + + let management_num_sectors = management_end_sector - partition.data_start_sector; + let management_num_groups = (management_num_sectors as u64 * SECTOR_SIZE as u64) + .div_ceil(chunk_size as u64) as u32; + wia_partition.partition_data[0] = WIAPartitionData { + first_sector: partition.data_start_sector.into(), + num_sectors: management_num_sectors.into(), + group_index: group_idx.into(), + num_groups: management_num_groups.into(), + }; + group_idx += management_num_groups; + + let data_num_sectors = partition.data_end_sector - management_end_sector; + let data_num_groups = + (data_num_sectors as u64 * SECTOR_SIZE as u64).div_ceil(chunk_size as u64) as u32; + wia_partition.partition_data[1] = WIAPartitionData { + first_sector: management_end_sector.into(), + num_sectors: data_num_sectors.into(), + group_index: group_idx.into(), + num_groups: data_num_groups.into(), + }; + group_idx += data_num_groups; + + let next_raw_data = &mut raw_data[raw_data_idx]; + next_raw_data.raw_data_offset = partition_end.into(); + } + disc.partition_hash = sha1_hash(partitions.as_bytes()); + + { + // Remaining raw data + let cur_raw_data = &mut raw_data[raw_data_idx]; + let raw_data_size = disc_size - cur_raw_data.raw_data_offset.get(); + let raw_data_groups = raw_data_size.div_ceil(chunk_size as u64) as u32; + cur_raw_data.raw_data_size = raw_data_size.into(); + cur_raw_data.group_index = group_idx.into(); + cur_raw_data.num_groups = raw_data_groups.into(); + group_idx += raw_data_groups; + } + + disc.num_groups = group_idx.into(); + let raw_data_size = size_of::() as u32 * num_raw_data; + let group_size = + if is_rvz { size_of::() } else { size_of::() } as u32 * group_idx; + + header_data.put_slice(partitions.as_bytes()); + header_data.put_bytes(0, raw_data_size as usize); + header_data.put_bytes(0, group_size as usize); + // Group data alignment + let rem = header_data.len() % 4; + if rem != 0 { + header_data.put_bytes(0, 4 - rem); + } + + // println!("Header: {:?}", header); + // println!("Disc: {:?}", disc); + // println!("Partitions: {:?}", partitions); + // println!("Raw data: {:?}", raw_data); + + let data_start = header_data.len() as u32; + + Ok(Box::new(Self { + inner, + header, + disc, + partitions: Arc::from(partitions), + raw_data: Arc::from(raw_data), + group_count: group_idx, + data_start, + is_rvz, + compression: options.compression, + initial_header_data: header_data.freeze(), + })) + } +} + +impl DiscWriter for DiscWriterWIA { + fn process( + &self, + data_callback: &mut DataCallback, + options: &ProcessOptions, + ) -> Result { + let disc_size = self.inner.disc_size(); + data_callback(self.initial_header_data.clone(), 0, disc_size) + .context("Failed to write WIA/RVZ header")?; + + let chunk_size = self.disc.chunk_size.get(); + let compressor_buf_size = if self.is_rvz { + // For RVZ, if a group's compressed size is larger than uncompressed, we discard it. + // This means we can just allocate a buffer for the chunk size. + chunk_size as usize + } else { + // For WIA, we can't mark groups as uncompressed, so we need to compress them all. + // This means our compression buffer needs to account for worst-case compression. + compress_bound(self.compression, chunk_size as usize) + }; + let mut compressor = Compressor::new(self.compression, compressor_buf_size); + + let digest = DigestManager::new(options); + let mut input_position = 0; + let mut file_position = self.data_start as u64; + let mut groups = <[RVZGroup]>::new_box_zeroed_with_elems(self.group_count as usize)?; + par_process( + || BlockProcessorWIA { + inner: self.inner.clone(), + header: self.header.clone(), + disc: self.disc.clone(), + partitions: self.partitions.clone(), + raw_data: self.raw_data.clone(), + compressor: compressor.clone(), + // lfg: LaggedFibonacci::default(), + }, + self.group_count, + options.processor_threads, + |group| -> Result<()> { + // Update hashers + let disc_data_len = group.disc_data.len() as u64; + digest.send(group.disc_data); + + let group_idx = group.block_idx; + if file_position % 4 != 0 { + return Err(Error::Other("File position not aligned to 4".to_string())); + } + groups[group_idx as usize] = RVZGroup { + data_offset: ((file_position / 4) as u32).into(), + data_size_and_flag: (group.meta.data_size + | if group.meta.is_compressed { 0x80000000 } else { 0 }) + .into(), + rvz_packed_size: 0.into(), // TODO + }; + + // Write group data + input_position += disc_data_len; + if group.block_data.len() % 4 != 0 { + return Err(Error::Other("Group data size not aligned to 4".to_string())); + } + file_position += group.block_data.len() as u64; + data_callback(group.block_data, input_position, disc_size) + .with_context(|| format!("Failed to write group {group_idx}"))?; + Ok(()) + }, + )?; + + // Collect hash results + let digest_results = digest.finish(); + let mut nkit_header = NKitHeader { + version: 2, + size: Some(disc_size), + crc32: None, + md5: None, + sha1: None, + xxh64: None, + junk_bits: None, + encrypted: false, + }; + nkit_header.apply_digests(&digest_results); + let mut nkit_header_data = Vec::new(); + nkit_header.write_to(&mut nkit_header_data).context("Writing NKit header")?; + + let mut header = self.header.clone(); + let mut disc = self.disc.clone(); + + // Compress raw data and groups + compressor.buffer = Vec::with_capacity(self.data_start as usize); + if !compressor.compress(self.raw_data.as_bytes()).context("Compressing raw data")? { + return Err(Error::Other("Failed to compress raw data".to_string())); + } + let compressed_raw_data = compressor.buffer.clone(); + // println!( + // "Compressed raw data: {} -> {} (max size {})", + // self.raw_data.as_bytes().len(), + // compressed_raw_data.len(), + // self.data_start + // ); + disc.raw_data_size = (compressed_raw_data.len() as u32).into(); + + let groups_data = if self.is_rvz { + Cow::Borrowed(groups.as_bytes()) + } else { + let mut groups_buf = Vec::with_capacity(groups.len() * size_of::()); + for group in &groups { + if compressor.kind != Compression::None + && !group.is_compressed() + && group.data_size() > 0 + { + return Err(Error::Other("Uncompressed group in compressed WIA".to_string())); + } + if group.rvz_packed_size.get() > 0 { + return Err(Error::Other("RVZ packed group in WIA".to_string())); + } + groups_buf.extend_from_slice(WIAGroup::from(group).as_bytes()); + } + Cow::Owned(groups_buf) + }; + if !compressor.compress(groups_data.as_ref()).context("Compressing groups")? { + return Err(Error::Other("Failed to compress groups".to_string())); + } + let compressed_groups = compressor.buffer; + // println!( + // "Compressed groups: {} -> {} (max size {})", + // groups_data.len(), + // compressed_groups.len(), + // self.data_start + // ); + disc.group_size = (compressed_groups.len() as u32).into(); + + // Update header and calculate hashes + let mut header_offset = size_of::() as u32 + + size_of::() as u32 + + nkit_header_data.len() as u32; + disc.partition_offset = (header_offset as u64).into(); + header_offset += size_of_val(self.partitions.as_ref()) as u32; + disc.raw_data_offset = (header_offset as u64).into(); + header_offset += compressed_raw_data.len() as u32; + disc.group_offset = (header_offset as u64).into(); + header_offset += compressed_groups.len() as u32; + if header_offset > self.data_start { + return Err(Error::Other("Header offset exceeds max".to_string())); + } + header.disc_hash = sha1_hash(disc.as_bytes()); + header.wia_file_size = file_position.into(); + let header_bytes = header.as_bytes(); + header.file_head_hash = + sha1_hash(&header_bytes[..size_of::() - size_of::()]); + + let mut header_data = BytesMut::with_capacity(header_offset as usize); + header_data.put_slice(header.as_bytes()); + header_data.put_slice(disc.as_bytes()); + header_data.put_slice(&nkit_header_data); + header_data.put_slice(self.partitions.as_bytes()); + header_data.put_slice(&compressed_raw_data); + header_data.put_slice(&compressed_groups); + if header_data.len() as u32 != header_offset { + return Err(Error::Other("Header offset mismatch".to_string())); + } + + let mut finalization = + DiscFinalization { header: header_data.freeze(), ..Default::default() }; + finalization.apply_digests(&digest_results); + Ok(finalization) + } + + fn progress_bound(&self) -> u64 { self.inner.disc_size() } + + fn weight(&self) -> DiscWriterWeight { + if self.disc.compression() == WIACompression::None { + DiscWriterWeight::Medium + } else { + DiscWriterWeight::Heavy + } + } +} + +fn compression_to_wia(compression: Compression) -> Option<(WIACompression, i32)> { + match compression { + Compression::None => Some((WIACompression::None, 0)), + Compression::Bzip2(level) => Some((WIACompression::Bzip2, level as i32)), + Compression::Lzma(level) => Some((WIACompression::Lzma, level as i32)), + Compression::Lzma2(level) => Some((WIACompression::Lzma2, level as i32)), + Compression::Zstandard(level) => Some((WIACompression::Zstandard, level as i32)), + _ => None, + } +} + +fn compr_data(compression: Compression) -> io::Result> { + match compression { + #[cfg(feature = "compress-lzma")] + Compression::Lzma(level) => { + let options = liblzma::stream::LzmaOptions::new_preset(level as u32)?; + Ok(Box::new(crate::util::compress::lzma_util::lzma_props_encode(&options)?)) + } + #[cfg(feature = "compress-lzma")] + Compression::Lzma2(level) => { + let options = liblzma::stream::LzmaOptions::new_preset(level as u32)?; + Ok(Box::new(crate::util::compress::lzma_util::lzma2_props_encode(&options)?)) + } + _ => Ok(Box::default()), + } +} + +fn compress_bound(compression: Compression, size: usize) -> usize { + match compression { + Compression::None => size, + Compression::Bzip2(_) => { + // 1.25 * size + size.div_ceil(4) + size + } + Compression::Lzma(_) => { + // 1.1 * size + 64 KiB + size.div_ceil(10) + size + 64000 + } + Compression::Lzma2(_) => { + // 1.001 * size + 1 KiB + size.div_ceil(1000) + size + 1000 + } + #[cfg(feature = "compress-zstd")] + Compression::Zstandard(_) => zstd_safe::compress_bound(size), + _ => unimplemented!("CompressionKind::compress_bound {:?}", compression), + } +} diff --git a/nod/src/lib.rs b/nod/src/lib.rs index 0a8f67f..05deea0 100644 --- a/nod/src/lib.rs +++ b/nod/src/lib.rs @@ -1,4 +1,5 @@ -#![warn(missing_docs, clippy::missing_inline_in_public_items)] +#![allow(clippy::new_ret_no_self)] +#![warn(missing_docs)] //! Library for traversing & reading Nintendo Optical Disc (GameCube and Wii) images. //! //! Originally based on the C++ library [nod](https://github.com/AxioDL/nod), @@ -11,6 +12,7 @@ //! - CISO (+ NKit 2 lossless) //! - NFS (Wii U VC) //! - GCZ +//! - TGC //! //! # Examples //! @@ -19,17 +21,21 @@ //! ```no_run //! use std::io::Read; //! +//! use nod::{ +//! common::PartitionKind, +//! read::{DiscOptions, DiscReader, PartitionOptions}, +//! }; +//! //! // Open a disc image and the first data partition. -//! let disc = nod::Disc::new("path/to/file.iso") -//! .expect("Failed to open disc"); -//! let mut partition = disc.open_partition_kind(nod::PartitionKind::Data) +//! let disc = +//! DiscReader::new("path/to/file.iso", &DiscOptions::default()).expect("Failed to open disc"); +//! let mut partition = disc +//! .open_partition_kind(PartitionKind::Data, &PartitionOptions::default()) //! .expect("Failed to open data partition"); //! //! // Read partition metadata and the file system table. -//! let meta = partition.meta() -//! .expect("Failed to read partition metadata"); -//! let fst = meta.fst() -//! .expect("File system table is invalid"); +//! let meta = partition.meta().expect("Failed to read partition metadata"); +//! let fst = meta.fst().expect("File system table is invalid"); //! //! // Find a file by path and read it into a string. //! if let Some((_, node)) = fst.find("/MP3/Worlds.txt") { @@ -46,38 +52,106 @@ //! Converting a disc image to raw ISO: //! //! ```no_run -//! // Enable `PartitionEncryptionMode::Original` to ensure the output is a valid ISO. -//! let options = nod::OpenOptions { partition_encryption: nod::PartitionEncryptionMode::Original }; -//! let mut disc = nod::Disc::new_with_options("path/to/file.rvz", &options) -//! .expect("Failed to open disc"); +//! use nod::read::{DiscOptions, DiscReader, PartitionEncryption}; //! -//! // Read directly from the open disc and write to the output file. -//! let mut out = std::fs::File::create("output.iso") -//! .expect("Failed to create output file"); -//! std::io::copy(&mut disc, &mut out) -//! .expect("Failed to write data"); +//! let options = DiscOptions { +//! partition_encryption: PartitionEncryption::Original, +//! // Use 4 threads to preload data as the disc is read. This can speed up sequential reads, +//! // especially when the disc image format uses compression. +//! preloader_threads: 4, +//! }; +//! // Open a disc image. +//! let mut disc = DiscReader::new("path/to/file.rvz", &options).expect("Failed to open disc"); +//! +//! // Create a new output file. +//! let mut out = std::fs::File::create("output.iso").expect("Failed to create output file"); +//! // Read directly from the DiscReader and write to the output file. +//! // NOTE: Any copy method that accepts `Read` and `Write` can be used here, +//! // such as `std::io::copy`. This example utilizes `BufRead` for efficiency, +//! // since `DiscReader` has its own internal buffer. +//! nod::util::buf_copy(&mut disc, &mut out).expect("Failed to write data"); //! ``` +//! +//! Converting a disc image to RVZ: +//! +//! ```no_run +//! use std::fs::File; +//! use std::io::{Seek, Write}; +//! use nod::common::{Compression, Format}; +//! use nod::read::{DiscOptions, DiscReader, PartitionEncryption}; +//! use nod::write::{DiscWriter, DiscWriterWeight, FormatOptions, ProcessOptions}; +//! +//! let open_options = DiscOptions { +//! partition_encryption: PartitionEncryption::Original, +//! // Use 4 threads to preload data as the disc is read. This can speed up sequential reads, +//! // especially when the disc image format uses compression. +//! preloader_threads: 4, +//! }; +//! // Open a disc image. +//! let disc = DiscReader::new("path/to/file.iso", &open_options) +//! .expect("Failed to open disc"); +//! // Create a new output file. +//! let mut output_file = File::create("output.rvz") +//! .expect("Failed to create output file"); +//! +//! let options = FormatOptions { +//! format: Format::Rvz, +//! compression: Compression::Zstandard(19), +//! block_size: Format::Rvz.default_block_size(), +//! }; +//! // Create a disc writer with the desired output format. +//! let mut writer = DiscWriter::new(disc, &options) +//! .expect("Failed to create writer"); +//! +//! // Ideally we'd base this on the actual number of CPUs available. +//! // This is just an example. +//! let num_threads = match writer.weight() { +//! DiscWriterWeight::Light => 0, +//! DiscWriterWeight::Medium => 4, +//! DiscWriterWeight::Heavy => 12, +//! }; +//! let process_options = ProcessOptions { +//! processor_threads: num_threads, +//! // Enable checksum calculation for the _original_ disc data. +//! // Digests will be stored in the output file for verification, if supported. +//! // They will also be returned in the finalization result. +//! digest_crc32: true, +//! digest_md5: false, // MD5 is slow, skip it +//! digest_sha1: true, +//! digest_xxh64: true, +//! }; +//! // Start processing the disc image. +//! let finalization = writer.process( +//! |data, _progress, _total| { +//! output_file.write_all(data.as_ref())?; +//! // One could display progress here, if desired. +//! Ok(()) +//! }, +//! &process_options +//! ) +//! .expect("Failed to process disc image"); +//! +//! // Some disc writers calculate data during processing. +//! // If the finalization returns header data, seek to the beginning of the file and write it. +//! if !finalization.header.is_empty() { +//! output_file.seek(std::io::SeekFrom::Start(0)) +//! .expect("Failed to seek"); +//! output_file.write_all(finalization.header.as_ref()) +//! .expect("Failed to write header"); +//! } +//! output_file.flush().expect("Failed to flush output file"); +//! +//! // Display the calculated digests. +//! println!("CRC32: {:08X}", finalization.crc32.unwrap()); +//! // ... -use std::{ - io::{BufRead, Read, Seek}, - path::Path, -}; - -pub use disc::{ - ApploaderHeader, ContentMetadata, DiscHeader, DolHeader, FileStream, Fst, Node, NodeKind, - OwnedFileStream, PartitionBase, PartitionHeader, PartitionKind, PartitionMeta, SignedHeader, - Ticket, TicketLimit, TmdHeader, WindowedStream, BI2_SIZE, BOOT_SIZE, DL_DVD_SIZE, GCN_MAGIC, - MINI_DVD_SIZE, REGION_SIZE, SECTOR_SIZE, SL_DVD_SIZE, WII_MAGIC, -}; -pub use io::{ - block::{DiscStream, PartitionInfo}, - Compression, DiscMeta, Format, KeyBytes, MagicBytes, -}; -pub use util::lfg::LaggedFibonacci; - -mod disc; -mod io; -mod util; +pub mod build; +pub mod common; +pub mod disc; +pub(crate) mod io; +pub mod read; +pub mod util; +pub mod write; /// Error types for nod. #[derive(thiserror::Error, Debug)] @@ -91,9 +165,6 @@ pub enum Error { /// An unknown error. #[error("error: {0}")] Other(String), - /// An error occurred while allocating memory. - #[error("allocation failed")] - Alloc(zerocopy::AllocError), } impl From<&str> for Error { @@ -108,7 +179,12 @@ impl From for Error { impl From for Error { #[inline] - fn from(e: zerocopy::AllocError) -> Error { Error::Alloc(e) } + fn from(_: zerocopy::AllocError) -> Error { + Error::Io( + "allocation failed".to_string(), + std::io::Error::from(std::io::ErrorKind::OutOfMemory), + ) + } } /// Helper result type for [`Error`]. @@ -149,178 +225,3 @@ where E: ErrorContext self.map_err(|e| e.context(f())) } } - -/// Wii partition encryption mode. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] -pub enum PartitionEncryptionMode { - /// Partition data is read as it's stored in the underlying disc format. - /// For example, WIA/RVZ partitions are stored decrypted, so this avoids - /// rebuilding the partition encryption and hash data if it will only be - /// read via [`PartitionBase`]. If it's desired to read a full disc image - /// via [`Disc`], use [`PartitionEncryptionMode::Original`] instead. - #[default] - AsIs, - /// Partition encryption and hashes are rebuilt to match its original state, - /// if necessary. This is used for converting or verifying a disc image. - Original, - /// Partition data will be encrypted if reading a decrypted disc image. - /// Modifies the disc header to mark partition data as encrypted. - ForceEncrypted, - /// Partition data will be decrypted if reading an encrypted disc image. - /// Modifies the disc header to mark partition data as decrypted. - ForceDecrypted, -} - -/// Options for opening a disc image. -#[derive(Default, Debug, Clone)] -pub struct OpenOptions { - /// Wii: Partition encryption mode. By default, partitions are read as they - /// are stored in the underlying disc format, avoiding extra work when the - /// underlying format stores them decrypted (e.g. WIA/RVZ). - /// - /// This can be changed to [`PartitionEncryptionMode::Original`] to rebuild - /// partition encryption and hashes to match its original state for conversion - /// or verification. - pub partition_encryption: PartitionEncryptionMode, -} - -/// Options for opening a partition. -#[derive(Default, Debug, Clone)] -pub struct PartitionOptions { - /// Wii: Validate data hashes while reading the partition, if available. - /// To ensure hashes are present, regardless of the underlying disc format, - /// set [`OpenOptions::partition_encryption`] to [`PartitionEncryptionMode::Original`]. - pub validate_hashes: bool, -} - -/// An open disc image and read stream. -/// -/// This is the primary entry point for reading disc images. -pub struct Disc { - reader: disc::reader::DiscReader, -} - -impl Disc { - /// Opens a disc image from a file path. - #[inline] - pub fn new>(path: P) -> Result { - Disc::new_with_options(path, &OpenOptions::default()) - } - - /// Opens a disc image from a file path with custom options. - #[inline] - pub fn new_with_options>(path: P, options: &OpenOptions) -> Result { - let io = io::block::open(path.as_ref())?; - let reader = disc::reader::DiscReader::new(io, options)?; - Ok(Disc { reader }) - } - - /// Opens a disc image from a read stream. - #[inline] - pub fn new_stream(stream: Box) -> Result { - Disc::new_stream_with_options(stream, &OpenOptions::default()) - } - - /// Opens a disc image from a read stream with custom options. - #[inline] - pub fn new_stream_with_options( - stream: Box, - options: &OpenOptions, - ) -> Result { - let io = io::block::new(stream)?; - let reader = disc::reader::DiscReader::new(io, options)?; - Ok(Disc { reader }) - } - - /// Detects the format of a disc image from a read stream. - #[inline] - pub fn detect(stream: &mut R) -> std::io::Result> - where R: Read + ?Sized { - io::block::detect(stream) - } - - /// The disc's primary header. - #[inline] - pub fn header(&self) -> &DiscHeader { self.reader.header() } - - /// The Wii disc's region information. - /// - /// **GameCube**: This will return `None`. - #[inline] - pub fn region(&self) -> Option<&[u8; REGION_SIZE]> { self.reader.region() } - - /// Returns extra metadata included in the disc file format, if any. - #[inline] - pub fn meta(&self) -> DiscMeta { self.reader.meta() } - - /// The disc's size in bytes, or an estimate if not stored by the format. - #[inline] - pub fn disc_size(&self) -> u64 { self.reader.disc_size() } - - /// A list of Wii partitions on the disc. - /// - /// **GameCube**: This will return an empty slice. - #[inline] - pub fn partitions(&self) -> &[PartitionInfo] { self.reader.partitions() } - - /// Opens a decrypted partition read stream for the specified partition index. - /// - /// **GameCube**: `index` must always be 0. - #[inline] - pub fn open_partition(&self, index: usize) -> Result> { - self.open_partition_with_options(index, &PartitionOptions::default()) - } - - /// Opens a decrypted partition read stream for the specified partition index - /// with custom options. - /// - /// **GameCube**: `index` must always be 0. - #[inline] - pub fn open_partition_with_options( - &self, - index: usize, - options: &PartitionOptions, - ) -> Result> { - self.reader.open_partition(index, options) - } - - /// Opens a decrypted partition read stream for the first partition matching - /// the specified kind. - /// - /// **GameCube**: `kind` must always be [`PartitionKind::Data`]. - #[inline] - pub fn open_partition_kind(&self, kind: PartitionKind) -> Result> { - self.reader.open_partition_kind(kind, &PartitionOptions::default()) - } - - /// Opens a decrypted partition read stream for the first partition matching - /// the specified kind with custom options. - /// - /// **GameCube**: `kind` must always be [`PartitionKind::Data`]. - #[inline] - pub fn open_partition_kind_with_options( - &self, - kind: PartitionKind, - options: &PartitionOptions, - ) -> Result> { - self.reader.open_partition_kind(kind, options) - } -} - -impl BufRead for Disc { - #[inline] - fn fill_buf(&mut self) -> std::io::Result<&[u8]> { self.reader.fill_buf() } - - #[inline] - fn consume(&mut self, amt: usize) { self.reader.consume(amt) } -} - -impl Read for Disc { - #[inline] - fn read(&mut self, buf: &mut [u8]) -> std::io::Result { self.reader.read(buf) } -} - -impl Seek for Disc { - #[inline] - fn seek(&mut self, pos: std::io::SeekFrom) -> std::io::Result { self.reader.seek(pos) } -} diff --git a/nod/src/read.rs b/nod/src/read.rs new file mode 100644 index 0000000..ac82c66 --- /dev/null +++ b/nod/src/read.rs @@ -0,0 +1,376 @@ +//! [`DiscReader`] and associated types. +use std::{ + io::{BufRead, Read, Seek}, + path::Path, + sync::Arc, +}; + +use dyn_clone::DynClone; +use zerocopy::FromBytes; + +use crate::{ + common::{Compression, Format, PartitionInfo, PartitionKind}, + disc, + disc::{ + fst::{Fst, Node}, + wii::{ContentMetadata, Ticket, TmdHeader, H3_TABLE_SIZE, REGION_SIZE}, + ApploaderHeader, DiscHeader, DolHeader, PartitionHeader, BI2_SIZE, BOOT_SIZE, + }, + io::block, + util::WindowedReader, + Result, +}; + +/// Wii partition encryption mode. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default, Hash)] +pub enum PartitionEncryption { + /// Partition encryption and hashes are rebuilt to match its original state, + /// if necessary. This is used for converting or verifying a disc image. + #[default] + Original, + /// Partition data will be encrypted if reading a decrypted disc image. + /// Modifies the disc header to mark partition data as encrypted. + ForceEncrypted, + /// Partition data will be decrypted if reading an encrypted disc image. + /// Modifies the disc header to mark partition data as decrypted. + ForceDecrypted, + /// Partition data will be decrypted if reading an encrypted disc image. + /// Modifies the disc header to mark partition data as decrypted. + /// Hashes are removed from the partition data. + ForceDecryptedNoHashes, +} + +/// Options for opening a disc image. +#[derive(Default, Debug, Clone)] +pub struct DiscOptions { + /// Wii: Partition encryption mode. This affects how partition data appears when + /// reading directly from [`DiscReader`], and can be used to convert between + /// encrypted and decrypted disc images. + pub partition_encryption: PartitionEncryption, + /// Number of threads to use for preloading data as the disc is read. This + /// is particularly useful when reading the disc image sequentially, as it + /// can perform decompression and rebuilding in parallel with the main + /// read thread. The default value of 0 disables preloading. + pub preloader_threads: usize, +} + +/// Options for opening a partition. +#[derive(Default, Debug, Clone)] +pub struct PartitionOptions { + /// Wii: Validate data hashes while reading the partition, if available. + pub validate_hashes: bool, +} + +/// Required trait bounds for reading disc images. +pub trait DiscStream: Read + Seek + DynClone + Send + Sync {} + +impl DiscStream for T where T: Read + Seek + DynClone + Send + Sync + ?Sized {} + +dyn_clone::clone_trait_object!(DiscStream); + +/// An open disc image and read stream. +/// +/// This is the primary entry point for reading disc images. +#[derive(Clone)] +pub struct DiscReader { + inner: disc::reader::DiscReader, +} + +impl DiscReader { + /// Opens a disc image from a file path. + #[inline] + pub fn new>(path: P, options: &DiscOptions) -> Result { + let io = block::open(path.as_ref())?; + let inner = disc::reader::DiscReader::new(io, options)?; + Ok(DiscReader { inner }) + } + + /// Opens a disc image from a read stream. + #[inline] + pub fn new_stream(stream: Box, options: &DiscOptions) -> Result { + let io = block::new(stream)?; + let reader = disc::reader::DiscReader::new(io, options)?; + Ok(DiscReader { inner: reader }) + } + + /// Detects the format of a disc image from a read stream. + #[inline] + pub fn detect(stream: &mut R) -> std::io::Result> + where R: Read + ?Sized { + block::detect(stream) + } + + /// The disc's primary header. + #[inline] + pub fn header(&self) -> &DiscHeader { self.inner.header() } + + /// The Wii disc's region information. + /// + /// **GameCube**: This will return `None`. + #[inline] + pub fn region(&self) -> Option<&[u8; REGION_SIZE]> { self.inner.region() } + + /// Returns extra metadata included in the disc file format, if any. + #[inline] + pub fn meta(&self) -> DiscMeta { self.inner.meta() } + + /// The disc's size in bytes, or an estimate if not stored by the format. + #[inline] + pub fn disc_size(&self) -> u64 { self.inner.disc_size() } + + /// A list of Wii partitions on the disc. + /// + /// **GameCube**: This will return an empty slice. + #[inline] + pub fn partitions(&self) -> &[PartitionInfo] { self.inner.partitions() } + + /// Opens a decrypted partition read stream for the specified partition index. + /// + /// **GameCube**: `index` must always be 0. + #[inline] + pub fn open_partition( + &self, + index: usize, + options: &PartitionOptions, + ) -> Result> { + self.inner.open_partition(index, options) + } + + /// Opens a decrypted partition read stream for the first partition matching + /// the specified kind. + /// + /// **GameCube**: `kind` must always be [`PartitionKind::Data`]. + #[inline] + pub fn open_partition_kind( + &self, + kind: PartitionKind, + options: &PartitionOptions, + ) -> Result> { + self.inner.open_partition_kind(kind, options) + } + + pub(crate) fn into_inner(self) -> disc::reader::DiscReader { self.inner } +} + +impl BufRead for DiscReader { + #[inline] + fn fill_buf(&mut self) -> std::io::Result<&[u8]> { self.inner.fill_buf() } + + #[inline] + fn consume(&mut self, amt: usize) { self.inner.consume(amt) } +} + +impl Read for DiscReader { + #[inline] + fn read(&mut self, buf: &mut [u8]) -> std::io::Result { self.inner.read(buf) } +} + +impl Seek for DiscReader { + #[inline] + fn seek(&mut self, pos: std::io::SeekFrom) -> std::io::Result { self.inner.seek(pos) } +} + +/// Extra metadata about the underlying disc file format. +#[derive(Debug, Clone, Default)] +pub struct DiscMeta { + /// The disc file format. + pub format: Format, + /// The format's compression algorithm. + pub compression: Compression, + /// If the format uses blocks, the block size in bytes. + pub block_size: Option, + /// Whether Wii partitions are stored decrypted in the format. + pub decrypted: bool, + /// Whether the format omits Wii partition data hashes. + pub needs_hash_recovery: bool, + /// Whether the format supports recovering the original disc data losslessly. + pub lossless: bool, + /// The original disc's size in bytes, if stored by the format. + pub disc_size: Option, + /// The original disc's CRC32 hash, if stored by the format. + pub crc32: Option, + /// The original disc's MD5 hash, if stored by the format. + pub md5: Option<[u8; 16]>, + /// The original disc's SHA-1 hash, if stored by the format. + pub sha1: Option<[u8; 20]>, + /// The original disc's XXH64 hash, if stored by the format. + pub xxh64: Option, +} + +/// An open disc partition. +pub trait PartitionReader: DynClone + BufRead + Seek + Send + Sync { + /// Whether this is a Wii partition. (GameCube otherwise) + fn is_wii(&self) -> bool; + + /// Reads the partition header and file system table. + fn meta(&mut self) -> Result; +} + +/// A file reader borrowing a [`PartitionReader`]. +pub type FileReader<'a> = WindowedReader<&'a mut dyn PartitionReader>; + +/// A file reader owning a [`PartitionReader`]. +pub type OwnedFileReader = WindowedReader>; + +impl<'a> dyn PartitionReader + 'a { + /// Seeks the partition stream to the specified file system node + /// and returns a windowed stream. + /// + /// # Examples + /// + /// Basic usage: + /// ```no_run + /// use std::io::Read; + /// + /// use nod::read::{DiscOptions, DiscReader, PartitionKind, PartitionOptions}; + /// + /// fn main() -> nod::Result<()> { + /// let disc = DiscReader::new("path/to/file.iso", &DiscOptions::default())?; + /// let mut partition = + /// disc.open_partition_kind(PartitionKind::Data, &PartitionOptions::default())?; + /// let meta = partition.meta()?; + /// let fst = meta.fst()?; + /// if let Some((_, node)) = fst.find("/MP3/Worlds.txt") { + /// let mut s = String::new(); + /// partition + /// .open_file(node) + /// .expect("Failed to open file stream") + /// .read_to_string(&mut s) + /// .expect("Failed to read file"); + /// println!("{}", s); + /// } + /// Ok(()) + /// } + /// ``` + pub fn open_file(&mut self, node: Node) -> std::io::Result { + if !node.is_file() { + return Err(std::io::Error::new( + std::io::ErrorKind::InvalidInput, + "Node is not a file".to_string(), + )); + } + let is_wii = self.is_wii(); + FileReader::new(self, node.offset(is_wii), node.length() as u64) + } +} + +impl dyn PartitionReader { + /// Consumes the partition instance and returns a windowed stream. + /// + /// # Examples + /// + /// ```no_run + /// use std::io::Read; + /// + /// use nod::read::{DiscOptions, DiscReader, OwnedFileReader, PartitionKind, PartitionOptions}; + /// + /// fn main() -> nod::Result<()> { + /// let disc = DiscReader::new("path/to/file.iso", &DiscOptions::default())?; + /// let mut partition = + /// disc.open_partition_kind(PartitionKind::Data, &PartitionOptions::default())?; + /// let meta = partition.meta()?; + /// let fst = meta.fst()?; + /// if let Some((_, node)) = fst.find("/disc.tgc") { + /// let file: OwnedFileReader = partition + /// .clone() // Clone the Box + /// .into_open_file(node) // Get an OwnedFileStream + /// .expect("Failed to open file stream"); + /// // Open the inner disc image using the owned stream + /// let inner_disc = DiscReader::new_stream(Box::new(file), &DiscOptions::default()) + /// .expect("Failed to open inner disc"); + /// // ... + /// } + /// Ok(()) + /// } + /// ``` + pub fn into_open_file(self: Box, node: Node) -> std::io::Result { + if !node.is_file() { + return Err(std::io::Error::new( + std::io::ErrorKind::InvalidInput, + "Node is not a file".to_string(), + )); + } + let is_wii = self.is_wii(); + OwnedFileReader::new(self, node.offset(is_wii), node.length() as u64) + } +} + +dyn_clone::clone_trait_object!(PartitionReader); + +/// Extra disc partition data. (DOL, FST, etc.) +#[derive(Clone, Debug)] +pub struct PartitionMeta { + /// Disc and partition header (boot.bin) + pub raw_boot: Arc<[u8; BOOT_SIZE]>, + /// Debug and region information (bi2.bin) + pub raw_bi2: Arc<[u8; BI2_SIZE]>, + /// Apploader (apploader.bin) + pub raw_apploader: Arc<[u8]>, + /// Main binary (main.dol) + pub raw_dol: Arc<[u8]>, + /// File system table (fst.bin) + pub raw_fst: Arc<[u8]>, + /// Ticket (ticket.bin, Wii only) + pub raw_ticket: Option>, + /// TMD (tmd.bin, Wii only) + pub raw_tmd: Option>, + /// Certificate chain (cert.bin, Wii only) + pub raw_cert_chain: Option>, + /// H3 hash table (h3.bin, Wii only) + pub raw_h3_table: Option>, +} + +impl PartitionMeta { + /// A view into the disc header. + #[inline] + pub fn header(&self) -> &DiscHeader { + DiscHeader::ref_from_bytes(&self.raw_boot[..size_of::()]) + .expect("Invalid header alignment") + } + + /// A view into the partition header. + #[inline] + pub fn partition_header(&self) -> &PartitionHeader { + PartitionHeader::ref_from_bytes(&self.raw_boot[size_of::()..]) + .expect("Invalid partition header alignment") + } + + /// A view into the apploader header. + #[inline] + pub fn apploader_header(&self) -> &ApploaderHeader { + ApploaderHeader::ref_from_prefix(&self.raw_apploader) + .expect("Invalid apploader alignment") + .0 + } + + /// A view into the file system table (FST). + #[inline] + pub fn fst(&self) -> Result { Fst::new(&self.raw_fst) } + + /// A view into the DOL header. + #[inline] + pub fn dol_header(&self) -> &DolHeader { + DolHeader::ref_from_prefix(&self.raw_dol).expect("Invalid DOL alignment").0 + } + + /// A view into the ticket. (Wii only) + #[inline] + pub fn ticket(&self) -> Option<&Ticket> { + let raw_ticket = self.raw_ticket.as_deref()?; + Some(Ticket::ref_from_bytes(raw_ticket).expect("Invalid ticket alignment")) + } + + /// A view into the TMD. (Wii only) + #[inline] + pub fn tmd_header(&self) -> Option<&TmdHeader> { + let raw_tmd = self.raw_tmd.as_deref()?; + Some(TmdHeader::ref_from_prefix(raw_tmd).expect("Invalid TMD alignment").0) + } + + /// A view into the TMD content metadata. (Wii only) + #[inline] + pub fn content_metadata(&self) -> Option<&[ContentMetadata]> { + let raw_cmd = &self.raw_tmd.as_deref()?[size_of::()..]; + Some(<[ContentMetadata]>::ref_from_bytes(raw_cmd).expect("Invalid CMD alignment")) + } +} diff --git a/nod/src/util/aes.rs b/nod/src/util/aes.rs new file mode 100644 index 0000000..1acf827 --- /dev/null +++ b/nod/src/util/aes.rs @@ -0,0 +1,136 @@ +use tracing::instrument; + +use crate::{ + common::KeyBytes, + disc::{ + wii::{HASHES_SIZE, SECTOR_DATA_SIZE}, + SECTOR_SIZE, + }, + util::array_ref, +}; + +#[cfg(feature = "openssl")] +thread_local! { + static ENC_CIPHER_CTX: std::cell::RefCell = { + let cipher = openssl::cipher::Cipher::fetch(None, "AES-128-CBC", None).unwrap(); + let mut ctx = openssl::cipher_ctx::CipherCtx::new().unwrap(); + ctx.set_padding(false); + ctx.encrypt_init(Some(&cipher), None, None).unwrap(); + std::cell::RefCell::new(ctx) + }; + static DEC_CIPHER_CTX: std::cell::RefCell = { + let cipher = openssl::cipher::Cipher::fetch(None, "AES-128-CBC", None).unwrap(); + let mut ctx = openssl::cipher_ctx::CipherCtx::new().unwrap(); + ctx.set_padding(false); + ctx.decrypt_init(Some(&cipher), None, None).unwrap(); + std::cell::RefCell::new(ctx) + }; +} + +/// Encrypts data in-place using AES-128-CBC with the given key and IV. +pub fn aes_cbc_encrypt(key: &KeyBytes, iv: &KeyBytes, data: &mut [u8]) { + assert_eq!(data.len() % 16, 0); + #[cfg(not(feature = "openssl"))] + { + use aes::cipher::{block_padding::NoPadding, BlockModeEncrypt, KeyIvInit}; + >::new(key.into(), iv.into()) + .encrypt_padded::(data, data.len()) + .unwrap(); + } + #[cfg(feature = "openssl")] + ENC_CIPHER_CTX.with_borrow_mut(|ctx| { + ctx.encrypt_init(None, Some(key), Some(iv)).unwrap(); + let len = unsafe { + // The openssl crate doesn't provide a safe API for using the same inbuf/outbuf. + // However, this is valid with AES-CBC and no padding. Create a copy of the input + // slice to appease the borrow checker. + let input = std::slice::from_raw_parts(data.as_ptr(), data.len()); + ctx.cipher_update_unchecked(input, Some(data)) + } + .unwrap(); + assert_eq!(len, data.len()); + }); +} + +/// Decrypts data in-place using AES-128-CBC with the given key and IV. +pub fn aes_cbc_decrypt(key: &KeyBytes, iv: &KeyBytes, data: &mut [u8]) { + assert_eq!(data.len() % 16, 0); + #[cfg(not(feature = "openssl"))] + { + use aes::cipher::{block_padding::NoPadding, BlockModeDecrypt, KeyIvInit}; + >::new(key.into(), iv.into()) + .decrypt_padded::(data) + .unwrap(); + } + #[cfg(feature = "openssl")] + DEC_CIPHER_CTX.with_borrow_mut(|ctx| { + ctx.decrypt_init(None, Some(key), Some(iv)).unwrap(); + let len = unsafe { + // The openssl crate doesn't provide a safe API for using the same inbuf/outbuf. + // However, this is valid with AES-CBC and no padding. Create a copy of the input + // slice to appease the borrow checker. + let input = std::slice::from_raw_parts(data.as_ptr(), data.len()); + ctx.cipher_update_unchecked(input, Some(data)) + } + .unwrap(); + assert_eq!(len, data.len()); + }); +} + +/// Decrypts data buffer-to-buffer using AES-128-CBC with the given key and IV. +pub fn aes_cbc_decrypt_b2b(key: &KeyBytes, iv: &KeyBytes, data: &[u8], out: &mut [u8]) { + assert_eq!(data.len() % 16, 0); + assert_eq!(data.len(), out.len()); + #[cfg(not(feature = "openssl"))] + { + use aes::cipher::{block_padding::NoPadding, BlockModeDecrypt, KeyIvInit}; + >::new(key.into(), iv.into()) + .decrypt_padded_b2b::(data, out) + .unwrap(); + } + #[cfg(feature = "openssl")] + DEC_CIPHER_CTX.with_borrow_mut(|ctx| { + ctx.decrypt_init(None, Some(key), Some(iv)).unwrap(); + let len = unsafe { ctx.cipher_update_unchecked(data, Some(out)) }.unwrap(); + assert_eq!(len, out.len()); + }); +} + +/// Encrypts a Wii partition sector in-place. +#[instrument(skip_all)] +pub fn encrypt_sector(out: &mut [u8; SECTOR_SIZE], key: &KeyBytes) { + aes_cbc_encrypt(key, &[0u8; 16], &mut out[..HASHES_SIZE]); + // Data IV from encrypted hash block + let iv = *array_ref![out, 0x3D0, 16]; + aes_cbc_encrypt(key, &iv, &mut out[HASHES_SIZE..]); +} + +/// Decrypts a Wii partition sector in-place. +#[instrument(skip_all)] +pub fn decrypt_sector(out: &mut [u8; SECTOR_SIZE], key: &KeyBytes) { + // Data IV from encrypted hash block + let iv = *array_ref![out, 0x3D0, 16]; + aes_cbc_decrypt(key, &[0u8; 16], &mut out[..HASHES_SIZE]); + aes_cbc_decrypt(key, &iv, &mut out[HASHES_SIZE..]); +} + +/// Decrypts a Wii partition sector buffer-to-buffer. +#[instrument(skip_all)] +pub fn decrypt_sector_b2b(data: &[u8; SECTOR_SIZE], out: &mut [u8; SECTOR_SIZE], key: &KeyBytes) { + // Data IV from encrypted hash block + let iv = *array_ref![data, 0x3D0, 16]; + aes_cbc_decrypt_b2b(key, &[0u8; 16], &data[..HASHES_SIZE], &mut out[..HASHES_SIZE]); + aes_cbc_decrypt_b2b(key, &iv, &data[HASHES_SIZE..], &mut out[HASHES_SIZE..]); +} + +/// Decrypts a Wii partition sector data (excluding hashes) buffer-to-buffer. +#[instrument(skip_all)] +pub fn decrypt_sector_data_b2b( + data: &[u8; SECTOR_SIZE], + out: &mut [u8; SECTOR_DATA_SIZE], + key: &KeyBytes, +) { + // Data IV from encrypted hash block + let iv = *array_ref![data, 0x3D0, 16]; + aes_cbc_decrypt_b2b(key, &iv, &data[HASHES_SIZE..], out); +} diff --git a/nod/src/util/compress.rs b/nod/src/util/compress.rs index e081f42..7763112 100644 --- a/nod/src/util/compress.rs +++ b/nod/src/util/compress.rs @@ -1,95 +1,483 @@ -/// Decodes the LZMA Properties byte (lc/lp/pb). -/// See `lzma_lzma_lclppb_decode` in `liblzma/lzma/lzma_decoder.c`. -#[cfg(feature = "compress-lzma")] -pub fn lzma_lclppb_decode( - options: &mut liblzma::stream::LzmaOptions, - byte: u8, -) -> std::io::Result<()> { - let mut d = byte as u32; - if d >= (9 * 5 * 5) { - return Err(std::io::Error::new( - std::io::ErrorKind::InvalidData, - format!("Invalid LZMA props byte: {}", d), - )); - } - options.literal_context_bits(d % 9); - d /= 9; - options.position_bits(d / 5); - options.literal_position_bits(d % 5); - Ok(()) +use std::{io, io::Read}; + +use tracing::instrument; + +use crate::{ + common::Compression, + io::wia::{WIACompression, WIADisc}, + Error, Result, +}; + +pub struct Decompressor { + pub kind: DecompressionKind, + pub cache: DecompressorCache, } -/// Decodes LZMA properties. -/// See `lzma_lzma_props_decode` in `liblzma/lzma/lzma_decoder.c`. -#[cfg(feature = "compress-lzma")] -pub fn lzma_props_decode(props: &[u8]) -> std::io::Result { - use crate::array_ref; - if props.len() != 5 { - return Err(std::io::Error::new( - std::io::ErrorKind::InvalidData, - format!("Invalid LZMA props length: {}", props.len()), - )); +impl Clone for Decompressor { + fn clone(&self) -> Self { + Self { kind: self.kind.clone(), cache: DecompressorCache::default() } } - let mut options = liblzma::stream::LzmaOptions::new(); - lzma_lclppb_decode(&mut options, props[0])?; - options.dict_size(u32::from_le_bytes(*array_ref!(props, 1, 4))); - Ok(options) } -/// Decodes LZMA2 properties. -/// See `lzma_lzma2_props_decode` in `liblzma/lzma/lzma2_decoder.c`. -#[cfg(feature = "compress-lzma")] -pub fn lzma2_props_decode(props: &[u8]) -> std::io::Result { - use std::cmp::Ordering; - if props.len() != 1 { - return Err(std::io::Error::new( - std::io::ErrorKind::InvalidData, - format!("Invalid LZMA2 props length: {}", props.len()), - )); +#[derive(Default)] +pub enum DecompressorCache { + #[default] + None, + #[cfg(feature = "compress-zlib")] + Deflate(Box), + #[cfg(feature = "compress-zstd")] + Zstandard(zstd_safe::DCtx<'static>), +} + +impl Decompressor { + pub fn new(kind: DecompressionKind) -> Self { + Self { kind, cache: DecompressorCache::default() } } - let d = props[0] as u32; - let mut options = liblzma::stream::LzmaOptions::new(); - options.dict_size(match d.cmp(&40) { - Ordering::Greater => { - return Err(std::io::Error::new( - std::io::ErrorKind::InvalidData, - format!("Invalid LZMA2 props byte: {}", d), + + #[instrument(name = "Decompressor::decompress", skip_all)] + pub fn decompress(&mut self, buf: &[u8], out: &mut [u8]) -> io::Result { + match &self.kind { + DecompressionKind::None => { + out.copy_from_slice(buf); + Ok(buf.len()) + } + #[cfg(feature = "compress-zlib")] + DecompressionKind::Deflate => { + let decompressor = match &mut self.cache { + DecompressorCache::Deflate(decompressor) => decompressor, + _ => { + self.cache = DecompressorCache::Deflate(Box::new( + miniz_oxide::inflate::core::DecompressorOxide::new(), + )); + match &mut self.cache { + DecompressorCache::Deflate(decompressor) => decompressor, + _ => unreachable!(), + } + } + }; + decompressor.init(); + let (status, in_size, out_size) = miniz_oxide::inflate::core::decompress( + decompressor.as_mut(), + buf, + out, + 0, + miniz_oxide::inflate::core::inflate_flags::TINFL_FLAG_PARSE_ZLIB_HEADER + | miniz_oxide::inflate::core::inflate_flags::TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF, + ); + match status { + miniz_oxide::inflate::TINFLStatus::Done => Ok(out_size), + _ => Err(io::Error::new( + io::ErrorKind::InvalidData, + format!( + "Deflate decompression status {:?} (in: {}, out: {})", + status, in_size, out_size + ), + )), + } + } + #[cfg(feature = "compress-bzip2")] + DecompressionKind::Bzip2 => { + let mut decoder = bzip2::Decompress::new(false); + let status = decoder.decompress(buf, out)?; + match status { + bzip2::Status::StreamEnd => Ok(decoder.total_out() as usize), + _ => Err(io::Error::new( + io::ErrorKind::InvalidData, + format!("Bzip2 decompression status {:?}", status), + )), + } + } + #[cfg(feature = "compress-lzma")] + DecompressionKind::Lzma(data) => { + use lzma_util::{lzma_props_decode, new_lzma_decoder}; + let mut decoder = new_lzma_decoder(&lzma_props_decode(data)?)?; + let status = decoder.process(buf, out, liblzma::stream::Action::Finish)?; + match status { + liblzma::stream::Status::StreamEnd => Ok(decoder.total_out() as usize), + _ => Err(io::Error::new( + io::ErrorKind::InvalidData, + format!("LZMA decompression status {:?}", status), + )), + } + } + #[cfg(feature = "compress-lzma")] + DecompressionKind::Lzma2(data) => { + use lzma_util::{lzma2_props_decode, new_lzma2_decoder}; + let mut decoder = new_lzma2_decoder(&lzma2_props_decode(data)?)?; + let status = decoder.process(buf, out, liblzma::stream::Action::Finish)?; + match status { + liblzma::stream::Status::StreamEnd => Ok(decoder.total_out() as usize), + _ => Err(io::Error::new( + io::ErrorKind::InvalidData, + format!("LZMA2 decompression status {:?}", status), + )), + } + } + #[cfg(feature = "compress-zstd")] + DecompressionKind::Zstandard => { + let ctx = match &mut self.cache { + DecompressorCache::Zstandard(ctx) => ctx, + _ => { + let ctx = zstd_safe::DCtx::create(); + self.cache = DecompressorCache::Zstandard(ctx); + match &mut self.cache { + DecompressorCache::Zstandard(ctx) => ctx, + _ => unreachable!(), + } + } + }; + ctx.decompress(out, buf).map_err(zstd_util::map_error_code) + } + } + } +} + +#[derive(Debug, Clone)] +pub enum DecompressionKind { + None, + #[cfg(feature = "compress-zlib")] + Deflate, + #[cfg(feature = "compress-bzip2")] + Bzip2, + #[cfg(feature = "compress-lzma")] + Lzma(Box<[u8]>), + #[cfg(feature = "compress-lzma")] + Lzma2(Box<[u8]>), + #[cfg(feature = "compress-zstd")] + Zstandard, +} + +impl DecompressionKind { + pub fn from_wia(disc: &WIADisc) -> Result { + let _data = &disc.compr_data[..disc.compr_data_len as usize]; + match disc.compression() { + WIACompression::None => Ok(Self::None), + #[cfg(feature = "compress-bzip2")] + WIACompression::Bzip2 => Ok(Self::Bzip2), + #[cfg(feature = "compress-lzma")] + WIACompression::Lzma => Ok(Self::Lzma(Box::from(_data))), + #[cfg(feature = "compress-lzma")] + WIACompression::Lzma2 => Ok(Self::Lzma2(Box::from(_data))), + #[cfg(feature = "compress-zstd")] + WIACompression::Zstandard => Ok(Self::Zstandard), + comp => Err(Error::DiscFormat(format!("Unsupported WIA/RVZ compression: {:?}", comp))), + } + } + + pub fn wrap<'a, R>(&mut self, reader: R) -> io::Result> + where R: Read + 'a { + Ok(match self { + DecompressionKind::None => Box::new(reader), + #[cfg(feature = "compress-zlib")] + DecompressionKind::Deflate => unimplemented!("DecompressionKind::Deflate.wrap"), + #[cfg(feature = "compress-bzip2")] + DecompressionKind::Bzip2 => Box::new(bzip2::read::BzDecoder::new(reader)), + #[cfg(feature = "compress-lzma")] + DecompressionKind::Lzma(data) => { + use lzma_util::{lzma_props_decode, new_lzma_decoder}; + let stream = new_lzma_decoder(&lzma_props_decode(data)?)?; + Box::new(liblzma::read::XzDecoder::new_stream(reader, stream)) + } + #[cfg(feature = "compress-lzma")] + DecompressionKind::Lzma2(data) => { + use lzma_util::{lzma2_props_decode, new_lzma2_decoder}; + let stream = new_lzma2_decoder(&lzma2_props_decode(data)?)?; + Box::new(liblzma::read::XzDecoder::new_stream(reader, stream)) + } + #[cfg(feature = "compress-zstd")] + DecompressionKind::Zstandard => Box::new(zstd::stream::Decoder::new(reader)?), + }) + } +} + +pub struct Compressor { + pub kind: Compression, + pub cache: CompressorCache, + pub buffer: Vec, +} + +impl Clone for Compressor { + fn clone(&self) -> Self { + Self { + kind: self.kind, + cache: CompressorCache::default(), + buffer: Vec::with_capacity(self.buffer.capacity()), + } + } +} + +#[derive(Default)] +pub enum CompressorCache { + #[default] + None, + #[cfg(feature = "compress-zlib")] + Deflate(Box), + #[cfg(feature = "compress-zstd")] + Zstandard(zstd_safe::CCtx<'static>), +} + +impl Compressor { + pub fn new(kind: Compression, buffer_size: usize) -> Self { + Self { kind, cache: CompressorCache::default(), buffer: Vec::with_capacity(buffer_size) } + } + + /// Compresses the given buffer into `out`. `out`'s capacity will not be extended. Instead, if + /// the compressed data is larger than `out`, this function will bail and return `false`. + #[instrument(name = "Compressor::compress", skip_all)] + pub fn compress(&mut self, buf: &[u8]) -> io::Result { + self.buffer.clear(); + match self.kind { + #[cfg(feature = "compress-zlib")] + Compression::Deflate(level) => { + let compressor = match &mut self.cache { + CompressorCache::Deflate(compressor) => compressor, + _ => { + self.cache = CompressorCache::Deflate(Box::new( + miniz_oxide::deflate::core::CompressorOxide::new( + miniz_oxide::deflate::core::create_comp_flags_from_zip_params( + level as i32, + 15, + 0, + ), + ), + )); + match &mut self.cache { + CompressorCache::Deflate(compressor) => compressor, + _ => unreachable!(), + } + } + }; + self.buffer.resize(self.buffer.capacity(), 0); + compressor.reset(); + let (status, _, out_size) = miniz_oxide::deflate::core::compress( + compressor.as_mut(), + buf, + self.buffer.as_mut_slice(), + miniz_oxide::deflate::core::TDEFLFlush::Finish, + ); + self.buffer.truncate(out_size); + Ok(status == miniz_oxide::deflate::core::TDEFLStatus::Done) + } + #[cfg(feature = "compress-bzip2")] + Compression::Bzip2(level) => { + let compression = bzip2::Compression::new(level as u32); + let mut compress = bzip2::Compress::new(compression, 30); + let status = compress.compress_vec(buf, &mut self.buffer, bzip2::Action::Finish)?; + Ok(status == bzip2::Status::StreamEnd) + } + #[cfg(feature = "compress-lzma")] + Compression::Lzma(level) => { + let options = liblzma::stream::LzmaOptions::new_preset(level as u32)?; + let mut encoder = lzma_util::new_lzma_encoder(&options)?; + let status = + encoder.process_vec(buf, &mut self.buffer, liblzma::stream::Action::Finish)?; + Ok(status == liblzma::stream::Status::StreamEnd) + } + #[cfg(feature = "compress-lzma")] + Compression::Lzma2(level) => { + let options = liblzma::stream::LzmaOptions::new_preset(level as u32)?; + let mut encoder = lzma_util::new_lzma2_encoder(&options)?; + let status = + encoder.process_vec(buf, &mut self.buffer, liblzma::stream::Action::Finish)?; + Ok(status == liblzma::stream::Status::StreamEnd) + } + #[cfg(feature = "compress-zstd")] + Compression::Zstandard(level) => { + let ctx = match &mut self.cache { + CompressorCache::Zstandard(compressor) => compressor, + _ => { + let mut ctx = zstd_safe::CCtx::create(); + ctx.init(level as i32).map_err(zstd_util::map_error_code)?; + self.cache = CompressorCache::Zstandard(ctx); + match &mut self.cache { + CompressorCache::Zstandard(compressor) => compressor, + _ => unreachable!(), + } + } + }; + match ctx.compress2(&mut self.buffer, buf) { + Ok(_) => Ok(true), + // dstSize_tooSmall + Err(e) if e == -70isize as usize => Ok(false), + Err(e) => Err(zstd_util::map_error_code(e)), + } + } + _ => Err(io::Error::new( + io::ErrorKind::Other, + format!("Unsupported compression: {:?}", self.kind), + )), + } + } +} + +#[cfg(feature = "compress-lzma")] +pub mod lzma_util { + use std::{ + cmp::Ordering, + io::{Error, ErrorKind, Result}, + }; + + use liblzma::stream::{Filters, LzmaOptions, Stream}; + + use crate::util::{array_ref, array_ref_mut, static_assert}; + + /// Decodes the LZMA Properties byte (lc/lp/pb). + /// See `lzma_lzma_lclppb_decode` in `liblzma/lzma/lzma_decoder.c`. + pub fn lzma_lclppb_decode(options: &mut LzmaOptions, byte: u8) -> Result<()> { + let mut d = byte as u32; + if d >= (9 * 5 * 5) { + return Err(Error::new( + ErrorKind::InvalidData, + format!("Invalid LZMA props byte: {}", d), )); } - Ordering::Equal => u32::MAX, - Ordering::Less => (2 | (d & 1)) << (d / 2 + 11), - }); - Ok(options) + options.literal_context_bits(d % 9); + d /= 9; + options.position_bits(d / 5); + options.literal_position_bits(d % 5); + Ok(()) + } + + /// Encodes the LZMA Properties byte (lc/lp/pb). + /// See `lzma_lzma_lclppb_encode` in `liblzma/lzma/lzma_encoder.c`. + pub fn lzma_lclppb_encode(options: &LzmaOptions) -> Result { + let options = get_options_sys(options); + let byte = (options.pb * 5 + options.lp) * 9 + options.lc; + if byte >= (9 * 5 * 5) { + return Err(Error::new( + ErrorKind::InvalidData, + format!("Invalid LZMA props byte: {}", byte), + )); + } + Ok(byte as u8) + } + + /// Decodes LZMA properties. + /// See `lzma_lzma_props_decode` in `liblzma/lzma/lzma_decoder.c`. + pub fn lzma_props_decode(props: &[u8]) -> Result { + if props.len() != 5 { + return Err(Error::new( + ErrorKind::InvalidData, + format!("Invalid LZMA props length: {}", props.len()), + )); + } + let mut options = LzmaOptions::new(); + lzma_lclppb_decode(&mut options, props[0])?; + options.dict_size(u32::from_le_bytes(*array_ref![props, 1, 4])); + Ok(options) + } + + /// Encodes LZMA properties. + /// See `lzma_lzma_props_encode` in `liblzma/lzma/lzma_encoder.c`. + pub fn lzma_props_encode(options: &LzmaOptions) -> Result<[u8; 5]> { + let mut props = [0u8; 5]; + props[0] = lzma_lclppb_encode(options)?; + *array_ref_mut![props, 1, 4] = get_options_sys(options).dict_size.to_le_bytes(); + Ok(props) + } + + /// Decodes LZMA2 properties. + /// See `lzma_lzma2_props_decode` in `liblzma/lzma/lzma2_decoder.c`. + pub fn lzma2_props_decode(props: &[u8]) -> Result { + if props.len() != 1 { + return Err(Error::new( + ErrorKind::InvalidData, + format!("Invalid LZMA2 props length: {}", props.len()), + )); + } + let d = props[0] as u32; + let mut options = LzmaOptions::new(); + options.dict_size(match d.cmp(&40) { + Ordering::Greater => { + return Err(Error::new( + ErrorKind::InvalidData, + format!("Invalid LZMA2 props byte: {}", d), + )); + } + Ordering::Equal => u32::MAX, + Ordering::Less => (2 | (d & 1)) << (d / 2 + 11), + }); + Ok(options) + } + + /// Encodes LZMA2 properties. + /// See `lzma_lzma2_props_encode` in `liblzma/lzma/lzma2_encoder.c`. + pub fn lzma2_props_encode(options: &LzmaOptions) -> Result<[u8; 1]> { + let options = get_options_sys(options); + let mut d = options.dict_size.max(liblzma_sys::LZMA_DICT_SIZE_MIN); + + // Round up to the next 2^n - 1 or 2^n + 2^(n - 1) - 1 depending + // on which one is the next: + d -= 1; + d |= d >> 2; + d |= d >> 3; + d |= d >> 4; + d |= d >> 8; + d |= d >> 16; + + // Get the highest two bits using the proper encoding: + if d == u32::MAX { + d = 40; + } else { + d = get_dist_slot(d + 1) - 24; + } + + Ok([d as u8]) + } + + /// Creates a new raw LZMA decoder with the given options. + pub fn new_lzma_decoder(options: &LzmaOptions) -> Result { + let mut filters = Filters::new(); + filters.lzma1(options); + Stream::new_raw_decoder(&filters).map_err(Error::from) + } + + /// Creates a new raw LZMA encoder with the given options. + pub fn new_lzma_encoder(options: &LzmaOptions) -> Result { + let mut filters = Filters::new(); + filters.lzma1(options); + Stream::new_raw_encoder(&filters).map_err(Error::from) + } + + /// Creates a new raw LZMA2 decoder with the given options. + pub fn new_lzma2_decoder(options: &LzmaOptions) -> Result { + let mut filters = Filters::new(); + filters.lzma2(options); + Stream::new_raw_decoder(&filters).map_err(Error::from) + } + + /// Creates a new raw LZMA2 encoder with the given options. + pub fn new_lzma2_encoder(options: &LzmaOptions) -> Result { + let mut filters = Filters::new(); + filters.lzma2(options); + Stream::new_raw_encoder(&filters).map_err(Error::from) + } + + /// liblzma does not expose any accessors for `LzmaOptions`, so we have to + /// cast it into the internal `lzma_options_lzma` struct. + #[inline] + fn get_options_sys(options: &LzmaOptions) -> &liblzma_sys::lzma_options_lzma { + static_assert!(size_of::() == size_of::()); + unsafe { &*(options as *const LzmaOptions as *const liblzma_sys::lzma_options_lzma) } + } + + /// See `get_dist_slot` in `liblzma/lzma/fastpos.h`. + fn get_dist_slot(dist: u32) -> u32 { + if dist <= 4 { + dist + } else { + let i = dist.leading_zeros() ^ 31; + (i + i) + ((dist >> (i - 1)) & 1) + } + } } -/// Creates a new raw LZMA decoder with the given options. -#[cfg(feature = "compress-lzma")] -pub fn new_lzma_decoder( - reader: R, - options: &liblzma::stream::LzmaOptions, -) -> std::io::Result> -where - R: std::io::Read, -{ - let mut filters = liblzma::stream::Filters::new(); - filters.lzma1(options); - let stream = - liblzma::stream::Stream::new_raw_decoder(&filters).map_err(std::io::Error::from)?; - Ok(liblzma::read::XzDecoder::new_stream(reader, stream)) -} +#[cfg(feature = "compress-zstd")] +mod zstd_util { + use std::io; -/// Creates a new raw LZMA2 decoder with the given options. -#[cfg(feature = "compress-lzma")] -pub fn new_lzma2_decoder( - reader: R, - options: &liblzma::stream::LzmaOptions, -) -> std::io::Result> -where - R: std::io::Read, -{ - let mut filters = liblzma::stream::Filters::new(); - filters.lzma2(options); - let stream = - liblzma::stream::Stream::new_raw_decoder(&filters).map_err(std::io::Error::from)?; - Ok(liblzma::read::XzDecoder::new_stream(reader, stream)) + pub fn map_error_code(code: usize) -> io::Error { + let msg = zstd_safe::get_error_name(code); + io::Error::new(io::ErrorKind::Other, msg.to_string()) + } } diff --git a/nod/src/util/digest.rs b/nod/src/util/digest.rs new file mode 100644 index 0000000..06a40d3 --- /dev/null +++ b/nod/src/util/digest.rs @@ -0,0 +1,253 @@ +use std::{thread, thread::JoinHandle}; + +use bytes::Bytes; +use crossbeam_channel::Sender; +use digest::Digest; +use tracing::instrument; + +use crate::{ + io::nkit::NKitHeader, + write::{DiscFinalization, ProcessOptions}, +}; + +pub type DigestThread = (Sender, JoinHandle); + +pub fn digest_thread() -> DigestThread +where H: Hasher + Send + 'static { + let (tx, rx) = crossbeam_channel::bounded::(1); + let handle = thread::Builder::new() + .name(format!("Digest {}", H::NAME)) + .spawn(move || { + let mut hasher = H::new(); + while let Ok(data) = rx.recv() { + hasher.update(data.as_ref()); + } + hasher.finalize() + }) + .expect("Failed to spawn digest thread"); + (tx, handle) +} + +pub struct DigestManager { + threads: Vec, +} + +impl DigestManager { + pub fn new(options: &ProcessOptions) -> Self { + let mut threads = Vec::new(); + if options.digest_crc32 { + threads.push(digest_thread::()); + } + if options.digest_md5 { + #[cfg(feature = "openssl")] + threads.push(digest_thread::()); + #[cfg(not(feature = "openssl"))] + threads.push(digest_thread::()); + } + if options.digest_sha1 { + #[cfg(feature = "openssl")] + threads.push(digest_thread::()); + #[cfg(not(feature = "openssl"))] + threads.push(digest_thread::()); + } + if options.digest_xxh64 { + threads.push(digest_thread::()); + } + DigestManager { threads } + } + + #[instrument(name = "DigestManager::send", skip_all)] + pub fn send(&self, data: Bytes) { + let mut sent = 0usize; + // Non-blocking send to all threads + for (idx, (tx, _)) in self.threads.iter().enumerate() { + if tx.try_send(data.clone()).is_ok() { + sent |= 1 << idx; + } + } + // Blocking send to any remaining threads + for (idx, (tx, _)) in self.threads.iter().enumerate() { + if sent & (1 << idx) == 0 { + tx.send(data.clone()).expect("Failed to send data to digest thread"); + } + } + } + + #[instrument(name = "DigestManager::finish", skip_all)] + pub fn finish(self) -> DigestResults { + let mut results = DigestResults { crc32: None, md5: None, sha1: None, xxh64: None }; + for (tx, handle) in self.threads { + drop(tx); // Close channel + match handle.join().unwrap() { + DigestResult::Crc32(v) => results.crc32 = Some(v), + DigestResult::Md5(v) => results.md5 = Some(v), + DigestResult::Sha1(v) => results.sha1 = Some(v), + DigestResult::Xxh64(v) => results.xxh64 = Some(v), + } + } + results + } +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum DigestResult { + Crc32(u32), + Md5([u8; 16]), + Sha1([u8; 20]), + Xxh64(u64), +} + +pub trait Hasher { + const NAME: &'static str; + + fn new() -> Self; + fn finalize(self) -> DigestResult; + fn update(&mut self, data: &[u8]); +} + +impl Hasher for md5::Md5 { + const NAME: &'static str = "MD5"; + + fn new() -> Self { Digest::new() } + + fn finalize(self) -> DigestResult { DigestResult::Md5(Digest::finalize(self).into()) } + + #[allow(unused_braces)] // https://github.com/rust-lang/rust/issues/116347 + #[instrument(name = "md5::Md5::update", skip_all)] + fn update(&mut self, data: &[u8]) { Digest::update(self, data) } +} + +impl Hasher for sha1::Sha1 { + const NAME: &'static str = "SHA-1"; + + fn new() -> Self { Digest::new() } + + fn finalize(self) -> DigestResult { DigestResult::Sha1(Digest::finalize(self).into()) } + + #[allow(unused_braces)] // https://github.com/rust-lang/rust/issues/116347 + #[instrument(name = "sha1::Sha1::update", skip_all)] + fn update(&mut self, data: &[u8]) { Digest::update(self, data) } +} + +impl Hasher for crc32fast::Hasher { + const NAME: &'static str = "CRC32"; + + fn new() -> Self { crc32fast::Hasher::new() } + + fn finalize(self) -> DigestResult { DigestResult::Crc32(crc32fast::Hasher::finalize(self)) } + + #[allow(unused_braces)] // https://github.com/rust-lang/rust/issues/116347 + #[instrument(name = "crc32fast::Hasher::update", skip_all)] + fn update(&mut self, data: &[u8]) { crc32fast::Hasher::update(self, data) } +} + +impl Hasher for xxhash_rust::xxh64::Xxh64 { + const NAME: &'static str = "XXH64"; + + fn new() -> Self { xxhash_rust::xxh64::Xxh64::new(0) } + + fn finalize(self) -> DigestResult { + DigestResult::Xxh64(xxhash_rust::xxh64::Xxh64::digest(&self)) + } + + #[allow(unused_braces)] // https://github.com/rust-lang/rust/issues/116347 + #[instrument(name = "xxhash_rust::xxh64::Xxh64::update", skip_all)] + fn update(&mut self, data: &[u8]) { xxhash_rust::xxh64::Xxh64::update(self, data) } +} + +#[cfg(feature = "openssl")] +mod ossl { + use tracing::instrument; + + use super::{DigestResult, Hasher}; + + pub type HasherMD5 = HashWrapper; + pub type HasherSHA1 = HashWrapper; + + pub struct HashWrapper + where T: MessageDigest + { + hasher: openssl::hash::Hasher, + _marker: std::marker::PhantomData, + } + + impl HashWrapper + where T: MessageDigest + { + fn new() -> Self { + Self { + hasher: openssl::hash::Hasher::new(T::new()).unwrap(), + _marker: Default::default(), + } + } + } + + pub trait MessageDigest { + fn new() -> openssl::hash::MessageDigest; + } + + pub struct MessageDigestMD5; + + impl MessageDigest for MessageDigestMD5 { + fn new() -> openssl::hash::MessageDigest { openssl::hash::MessageDigest::md5() } + } + + pub struct MessageDigestSHA1; + + impl MessageDigest for MessageDigestSHA1 { + fn new() -> openssl::hash::MessageDigest { openssl::hash::MessageDigest::sha1() } + } + + impl Hasher for HasherMD5 { + const NAME: &'static str = "MD5"; + + fn new() -> Self { Self::new() } + + fn finalize(mut self) -> DigestResult { + DigestResult::Md5((*self.hasher.finish().unwrap()).try_into().unwrap()) + } + + #[allow(unused_braces)] // https://github.com/rust-lang/rust/issues/116347 + #[instrument(name = "ossl::HasherMD5::update", skip_all)] + fn update(&mut self, data: &[u8]) { self.hasher.update(data).unwrap() } + } + + impl Hasher for HasherSHA1 { + const NAME: &'static str = "SHA-1"; + + fn new() -> Self { Self::new() } + + fn finalize(mut self) -> DigestResult { + DigestResult::Sha1((*self.hasher.finish().unwrap()).try_into().unwrap()) + } + + #[allow(unused_braces)] // https://github.com/rust-lang/rust/issues/116347 + #[instrument(name = "ossl::HasherSHA1::update", skip_all)] + fn update(&mut self, data: &[u8]) { self.hasher.update(data).unwrap() } + } +} + +pub struct DigestResults { + pub crc32: Option, + pub md5: Option<[u8; 16]>, + pub sha1: Option<[u8; 20]>, + pub xxh64: Option, +} + +impl DiscFinalization { + pub(crate) fn apply_digests(&mut self, results: &DigestResults) { + self.crc32 = results.crc32; + self.md5 = results.md5; + self.sha1 = results.sha1; + self.xxh64 = results.xxh64; + } +} + +impl NKitHeader { + pub(crate) fn apply_digests(&mut self, results: &DigestResults) { + self.crc32 = results.crc32; + self.md5 = results.md5; + self.sha1 = results.sha1; + self.xxh64 = results.xxh64; + } +} diff --git a/nod/src/util/lfg.rs b/nod/src/util/lfg.rs index 1e43abd..ec14cbc 100644 --- a/nod/src/util/lfg.rs +++ b/nod/src/util/lfg.rs @@ -1,22 +1,30 @@ +//! Lagged Fibonacci generator for GC / Wii partition junk data. + use std::{ cmp::min, io, io::{Read, Write}, }; +use bytes::Buf; use zerocopy::{transmute_ref, IntoBytes}; use crate::disc::SECTOR_SIZE; +/// Value of `k` for the LFG. pub const LFG_K: usize = 521; + +/// Value of `j` for the LFG. pub const LFG_J: usize = 32; + +/// Number of 32-bit words in the seed. pub const SEED_SIZE: usize = 17; /// Lagged Fibonacci generator for GC / Wii partition junk data. /// /// References (license CC0-1.0): -/// https://github.com/dolphin-emu/dolphin/blob/a0f555648c27ec0c928f6b1e1fcad5e2d7c4d0c4/docs/WiaAndRvz.md -/// https://github.com/dolphin-emu/dolphin/blob/a0f555648c27ec0c928f6b1e1fcad5e2d7c4d0c4/Source/Core/DiscIO/LaggedFibonacciGenerator.cpp +/// - [WiaAndRvz.md](https://github.com/dolphin-emu/dolphin/blob/a0f555648c27ec0c928f6b1e1fcad5e2d7c4d0c4/docs/WiaAndRvz.md) +/// - [LaggedFibonacciGenerator.cpp](https://github.com/dolphin-emu/dolphin/blob/a0f555648c27ec0c928f6b1e1fcad5e2d7c4d0c4/Source/Core/DiscIO/LaggedFibonacciGenerator.cpp) pub struct LaggedFibonacci { buffer: [u32; LFG_K], position: usize, @@ -46,7 +54,6 @@ impl LaggedFibonacci { /// Initializes the LFG with the standard seed for a given disc ID, disc number, and sector. /// The partition offset is used to determine the sector and how many bytes to skip within the /// sector. - #[allow(clippy::missing_inline_in_public_items)] pub fn init_with_seed(&mut self, disc_id: [u8; 4], disc_num: u8, partition_offset: u64) { let seed = u32::from_be_bytes([ disc_id[2], @@ -73,7 +80,6 @@ impl LaggedFibonacci { /// Initializes the LFG with the seed read from a reader. The seed is assumed to be big-endian. /// This is used for rebuilding junk data in WIA/RVZ files. - #[allow(clippy::missing_inline_in_public_items)] pub fn init_with_reader(&mut self, reader: &mut R) -> io::Result<()> where R: Read + ?Sized { reader.read_exact(self.buffer[..SEED_SIZE].as_mut_bytes())?; @@ -85,6 +91,22 @@ impl LaggedFibonacci { Ok(()) } + /// Initializes the LFG with the seed read from a [`Buf`]. The seed is assumed to be big-endian. + /// This is used for rebuilding junk data in WIA/RVZ files. + pub fn init_with_buf(&mut self, reader: &mut impl Buf) -> io::Result<()> { + let out = self.buffer[..SEED_SIZE].as_mut_bytes(); + if reader.remaining() < out.len() { + return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "Filling LFG seed")); + } + reader.copy_to_slice(out); + for x in self.buffer[..SEED_SIZE].iter_mut() { + *x = u32::from_be(*x); + } + self.position = 0; + self.init(); + Ok(()) + } + /// Advances the LFG by one step. fn forward(&mut self) { for i in 0..LFG_J { @@ -96,7 +118,6 @@ impl LaggedFibonacci { } /// Skips `n` bytes of junk data. - #[allow(clippy::missing_inline_in_public_items)] pub fn skip(&mut self, n: usize) { self.position += n; while self.position >= LFG_K * 4 { @@ -105,8 +126,22 @@ impl LaggedFibonacci { } } + // pub fn backward(&mut self) { + // for i in (LFG_J..LFG_K).rev() { + // self.buffer[i] ^= self.buffer[i - LFG_J]; + // } + // for i in (0..LFG_J).rev() { + // self.buffer[i] ^= self.buffer[i + LFG_K - LFG_J]; + // } + // } + + // pub fn get_seed(&mut self, seed: &mut [u8; SEED_SIZE]) { + // for i in 0..SEED_SIZE { + // seed[i] = self.buffer[i].to_be_bytes()[3]; + // } + // } + /// Fills the buffer with junk data. - #[allow(clippy::missing_inline_in_public_items)] pub fn fill(&mut self, mut buf: &mut [u8]) { while !buf.is_empty() { let len = min(buf.len(), LFG_K * 4 - self.position); @@ -122,7 +157,6 @@ impl LaggedFibonacci { } /// Writes junk data to the output stream. - #[allow(clippy::missing_inline_in_public_items)] pub fn write(&mut self, w: &mut W, mut len: u64) -> io::Result<()> where W: Write + ?Sized { while len > 0 { @@ -141,7 +175,6 @@ impl LaggedFibonacci { /// The junk data on GC / Wii discs is reinitialized every 32KB. This functions handles the /// wrapping logic and reinitializes the LFG at sector boundaries. - #[allow(clippy::missing_inline_in_public_items)] pub fn fill_sector_chunked( &mut self, mut buf: &mut [u8], @@ -161,7 +194,6 @@ impl LaggedFibonacci { /// The junk data on GC / Wii discs is reinitialized every 32KB. This functions handles the /// wrapping logic and reinitializes the LFG at sector boundaries. - #[allow(clippy::missing_inline_in_public_items)] pub fn write_sector_chunked( &mut self, w: &mut W, @@ -182,6 +214,33 @@ impl LaggedFibonacci { } Ok(()) } + + /// Checks if the data matches the junk data generated by the LFG. This function handles the + /// wrapping logic and reinitializes the LFG at sector boundaries. + pub fn check_sector_chunked( + &mut self, + mut buf: &[u8], + disc_id: [u8; 4], + disc_num: u8, + mut partition_offset: u64, + ) -> bool { + if buf.is_empty() { + return false; + } + let mut lfg_buf = [0u8; SECTOR_SIZE]; + while !buf.is_empty() { + self.init_with_seed(disc_id, disc_num, partition_offset); + let len = + (SECTOR_SIZE - (partition_offset % SECTOR_SIZE as u64) as usize).min(buf.len()); + self.fill(&mut lfg_buf[..len]); + if buf[..len] != lfg_buf[..len] { + return false; + } + buf = &buf[len..]; + partition_offset += len as u64; + } + true + } } #[cfg(test)] diff --git a/nod/src/util/mod.rs b/nod/src/util/mod.rs index 5d558b0..9610905 100644 --- a/nod/src/util/mod.rs +++ b/nod/src/util/mod.rs @@ -1,20 +1,139 @@ -use std::ops::{Div, Rem}; +//! Utility functions and types. +use std::{ + io, + io::{Read, Seek, SeekFrom}, + ops::{Div, Rem}, +}; + +use io::{BufRead, Write}; + +pub(crate) mod aes; pub(crate) mod compress; -pub(crate) mod lfg; +pub(crate) mod digest; +pub mod lfg; pub(crate) mod read; -pub(crate) mod take_seek; + +/// Copies from a buffered reader to a writer without extra allocations. +pub fn buf_copy(reader: &mut R, writer: &mut W) -> io::Result +where + R: BufRead + ?Sized, + W: Write + ?Sized, +{ + let mut copied = 0; + loop { + let buf = reader.fill_buf()?; + let len = buf.len(); + if len == 0 { + break; + } + writer.write_all(buf)?; + reader.consume(len); + copied += len as u64; + } + Ok(copied) +} + +/// A reader with a fixed window. +#[derive(Clone)] +pub struct WindowedReader +where T: BufRead + Seek +{ + base: T, + pos: u64, + begin: u64, + end: u64, +} + +impl WindowedReader +where T: BufRead + Seek +{ + /// Creates a new windowed stream with offset and size. + /// + /// Seeks underlying stream immediately. + #[inline] + pub fn new(mut base: T, offset: u64, size: u64) -> io::Result { + base.seek(SeekFrom::Start(offset))?; + Ok(Self { base, pos: offset, begin: offset, end: offset + size }) + } + + /// Returns the length of the window. + #[inline] + #[allow(clippy::len_without_is_empty)] + pub fn len(&self) -> u64 { self.end - self.begin } +} + +impl Read for WindowedReader +where T: BufRead + Seek +{ + #[inline] + fn read(&mut self, out: &mut [u8]) -> io::Result { + let buf = self.fill_buf()?; + let len = buf.len().min(out.len()); + out[..len].copy_from_slice(&buf[..len]); + self.consume(len); + Ok(len) + } +} + +impl BufRead for WindowedReader +where T: BufRead + Seek +{ + #[inline] + fn fill_buf(&mut self) -> io::Result<&[u8]> { + let limit = self.end.saturating_sub(self.pos); + if limit == 0 { + return Ok(&[]); + } + let buf = self.base.fill_buf()?; + let max = (buf.len() as u64).min(limit) as usize; + Ok(&buf[..max]) + } + + #[inline] + fn consume(&mut self, amt: usize) { + self.base.consume(amt); + self.pos += amt as u64; + } +} + +impl Seek for WindowedReader +where T: BufRead + Seek +{ + #[inline] + fn seek(&mut self, pos: SeekFrom) -> io::Result { + let mut pos = match pos { + SeekFrom::Start(p) => self.begin + p, + SeekFrom::End(p) => self.end.saturating_add_signed(p), + SeekFrom::Current(p) => self.pos.saturating_add_signed(p), + }; + if pos < self.begin { + pos = self.begin; + } else if pos > self.end { + pos = self.end; + } + let result = self.base.seek(SeekFrom::Start(pos))?; + self.pos = result; + Ok(result - self.begin) + } + + #[inline] + fn stream_position(&mut self) -> io::Result { Ok(self.pos) } +} #[inline(always)] pub(crate) fn div_rem(x: T, y: T) -> (T, T) where T: Div + Rem + Copy { - let quot = x / y; - let rem = x % y; - (quot, rem) + (x / y, x % y) } +#[inline] +pub(crate) fn align_up_32(n: u32, align: u32) -> u32 { (n + align - 1) & !(align - 1) } + +#[inline] +pub(crate) fn align_up_64(n: u64, align: u64) -> u64 { (n + align - 1) & !(align - 1) } + /// Creates a fixed-size array reference from a slice. -#[macro_export] macro_rules! array_ref { ($slice:expr, $offset:expr, $size:expr) => {{ #[inline(always)] @@ -24,9 +143,9 @@ macro_rules! array_ref { to_array(&$slice[$offset..$offset + $size]) }}; } +pub(crate) use array_ref; /// Creates a mutable fixed-size array reference from a slice. -#[macro_export] macro_rules! array_ref_mut { ($slice:expr, $offset:expr, $size:expr) => {{ #[inline(always)] @@ -36,11 +155,28 @@ macro_rules! array_ref_mut { to_array(&mut $slice[$offset..$offset + $size]) }}; } +pub(crate) use array_ref_mut; /// Compile-time assertion. -#[macro_export] macro_rules! static_assert { ($condition:expr) => { const _: () = core::assert!($condition); }; } +pub(crate) use static_assert; + +macro_rules! impl_read_for_bufread { + ($ty:ident) => { + impl std::io::Read for $ty { + fn read(&mut self, out: &mut [u8]) -> std::io::Result { + use std::io::BufRead; + let buf = self.fill_buf()?; + let len = buf.len().min(out.len()); + out[..len].copy_from_slice(&buf[..len]); + self.consume(len); + Ok(len) + } + } + }; +} +pub(crate) use impl_read_for_bufread; diff --git a/nod/src/util/read.rs b/nod/src/util/read.rs index b0b30b8..51052e0 100644 --- a/nod/src/util/read.rs +++ b/nod/src/util/read.rs @@ -1,4 +1,4 @@ -use std::{io, io::Read}; +use std::{io, io::Read, sync::Arc}; use zerocopy::{FromBytes, FromZeros, IntoBytes}; @@ -36,6 +36,16 @@ where Ok(ret) } +#[inline(always)] +pub fn read_arc(reader: &mut R) -> io::Result> +where + T: FromBytes + IntoBytes, + R: Read + ?Sized, +{ + // TODO use Arc::new_zeroed once it's stable + read_box(reader).map(Arc::from) +} + #[inline(always)] pub fn read_box_slice(reader: &mut R, count: usize) -> io::Result> where @@ -48,6 +58,16 @@ where Ok(ret) } +#[inline(always)] +pub fn read_arc_slice(reader: &mut R, count: usize) -> io::Result> +where + T: FromBytes + IntoBytes, + R: Read + ?Sized, +{ + // TODO use Arc::new_zeroed once it's stable + read_box_slice(reader, count).map(Arc::from) +} + #[inline(always)] pub fn read_u16_be(reader: &mut R) -> io::Result where R: Read + ?Sized { @@ -71,3 +91,26 @@ where R: Read + ?Sized { reader.read_exact(&mut buf)?; Ok(u64::from_be_bytes(buf)) } + +pub fn read_with_zero_fill(r: &mut R, mut buf: &mut [u8]) -> io::Result +where R: Read + ?Sized { + let mut total = 0; + while !buf.is_empty() { + let read = r.read(buf)?; + if read == 0 { + // Fill remaining block with zeroes + buf.fill(0); + break; + } + buf = &mut buf[read..]; + total += read; + } + Ok(total) +} + +pub fn box_to_bytes(b: Box) -> Box<[u8]> +where T: IntoBytes { + let p = Box::into_raw(b); + let sp = unsafe { std::slice::from_raw_parts_mut(p as *mut u8, size_of::()) }; + unsafe { Box::from_raw(sp) } +} diff --git a/nod/src/util/take_seek.rs b/nod/src/util/take_seek.rs deleted file mode 100644 index 1f3710c..0000000 --- a/nod/src/util/take_seek.rs +++ /dev/null @@ -1,127 +0,0 @@ -// From https://github.com/jam1garner/binrw/blob/e96a1320287ec83d1f471525ffa380800ec9e124/binrw/src/io/take_seek.rs -// MIT License -// -// Copyright (c) jam1garner and other contributors -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. -#![allow(dead_code)] -//! Types for seekable reader adapters which limit the number of bytes read from -//! the underlying reader. - -use std::io::{Read, Result, Seek, SeekFrom}; - -/// Read adapter which limits the bytes read from an underlying reader, with -/// seek support. -/// -/// This struct is generally created by importing the [`TakeSeekExt`] extension -/// and calling [`take_seek`] on a reader. -/// -/// [`take_seek`]: TakeSeekExt::take_seek -#[derive(Debug)] -pub struct TakeSeek { - inner: T, - pos: u64, - end: u64, -} - -impl TakeSeek { - /// Gets a reference to the underlying reader. - pub fn get_ref(&self) -> &T { &self.inner } - - /// Gets a mutable reference to the underlying reader. - /// - /// Care should be taken to avoid modifying the internal I/O state of the - /// underlying reader as doing so may corrupt the internal limit of this - /// `TakeSeek`. - pub fn get_mut(&mut self) -> &mut T { &mut self.inner } - - /// Consumes this wrapper, returning the wrapped value. - pub fn into_inner(self) -> T { self.inner } - - /// Returns the number of bytes that can be read before this instance will - /// return EOF. - /// - /// # Note - /// - /// This instance may reach EOF after reading fewer bytes than indicated by - /// this method if the underlying [`Read`] instance reaches EOF. - pub fn limit(&self) -> u64 { self.end.saturating_sub(self.pos) } -} - -impl TakeSeek { - /// Sets the number of bytes that can be read before this instance will - /// return EOF. This is the same as constructing a new `TakeSeek` instance, - /// so the amount of bytes read and the previous limit value don’t matter - /// when calling this method. - /// - /// # Panics - /// - /// Panics if the inner stream returns an error from `stream_position`. - pub fn set_limit(&mut self, limit: u64) { - let pos = self.inner.stream_position().expect("cannot get position for `set_limit`"); - self.pos = pos; - self.end = pos + limit; - } -} - -impl Read for TakeSeek { - fn read(&mut self, buf: &mut [u8]) -> Result { - let limit = self.limit(); - - // Don't call into inner reader at all at EOF because it may still block - if limit == 0 { - return Ok(0); - } - - // Lint: It is impossible for this cast to truncate because the value - // being cast is the minimum of two values, and one of the value types - // is already `usize`. - #[allow(clippy::cast_possible_truncation)] - let max = (buf.len() as u64).min(limit) as usize; - let n = self.inner.read(&mut buf[0..max])?; - self.pos += n as u64; - Ok(n) - } -} - -impl Seek for TakeSeek { - fn seek(&mut self, pos: SeekFrom) -> Result { - self.pos = self.inner.seek(pos)?; - Ok(self.pos) - } - - fn stream_position(&mut self) -> Result { Ok(self.pos) } -} - -/// An extension trait that implements `take_seek()` for compatible streams. -pub trait TakeSeekExt { - /// Creates an adapter which will read at most `limit` bytes from the - /// wrapped stream. - fn take_seek(self, limit: u64) -> TakeSeek - where Self: Sized; -} - -impl TakeSeekExt for T { - fn take_seek(mut self, limit: u64) -> TakeSeek - where Self: Sized { - let pos = self.stream_position().expect("cannot get position for `take_seek`"); - - TakeSeek { inner: self, pos, end: pos + limit } - } -} diff --git a/nod/src/write.rs b/nod/src/write.rs new file mode 100644 index 0000000..7131e48 --- /dev/null +++ b/nod/src/write.rs @@ -0,0 +1,163 @@ +//! [`DiscWriter`] and associated types. + +use bytes::Bytes; + +use crate::{ + common::{Compression, Format}, + disc, + read::DiscReader, + Error, Result, +}; + +/// Options for writing a disc image. +#[derive(Default, Debug, Clone)] +pub struct FormatOptions { + /// The disc format to write. + pub format: Format, + /// The compression algorithm to use for the output format, if supported. + /// + /// If unsure, use [`Format::default_compression`] to get the default compression for the format. + pub compression: Compression, + /// Block size to use. + /// + /// If unsure, use [`Format::default_block_size`] to get the default block size for the format. + pub block_size: u32, +} + +impl FormatOptions { + /// Creates options for the specified format. + /// Uses the default compression and block size for the format. + #[inline] + pub fn new(format: Format) -> FormatOptions { + FormatOptions { + format, + compression: format.default_compression(), + block_size: format.default_block_size(), + } + } +} + +/// Options for processing a disc image writer. +#[derive(Default, Debug, Clone)] +pub struct ProcessOptions { + /// If the output format supports multithreaded processing, this sets the number of threads to + /// use for processing data. This is particularly useful for formats that compress data or + /// perform other transformations. The default value of 0 disables multithreading. + pub processor_threads: usize, + /// Enables CRC32 checksum calculation for the disc data. + /// + /// If the output format supports it, this will be stored in the disc data. (NKit 2 compatible) + /// Each digest calculation will run on a separate thread, unaffected by the processor thread + /// count. + pub digest_crc32: bool, + /// Enables MD5 checksum calculation for the disc data. (Slow!) + /// + /// If the output format supports it, this will be stored in the disc data. (NKit 2 compatible) + /// Each digest calculation will run on a separate thread, unaffected by the processor thread + /// count. + pub digest_md5: bool, + /// Enables SHA-1 checksum calculation for the disc data. + /// + /// If the output format supports it, this will be stored in the disc data. (NKit 2 compatible) + /// Each digest calculation will run on a separate thread, unaffected by the processor thread + /// count. + pub digest_sha1: bool, + /// Enables XXH64 checksum calculation for the disc data. + /// + /// If the output format supports it, this will be stored in the disc data. (NKit 2 compatible) + /// Each digest calculation will run on a separate thread, unaffected by the processor thread + /// count. + pub digest_xxh64: bool, +} + +/// A constructed disc writer. +/// +/// This is the primary entry point for writing disc images. +#[derive(Clone)] +pub struct DiscWriter { + inner: Box, +} + +impl DiscWriter { + /// Creates a new disc writer with the specified format options. + #[inline] + pub fn new(disc: DiscReader, options: &FormatOptions) -> Result { + let mut options = options.clone(); + options.compression.validate_level()?; + let mut reader = disc.into_inner(); + reader.reset(); + let inner = match options.format { + Format::Iso => { + if options.compression != Compression::None { + return Err(Error::Other("ISO/GCM does not support compression".to_string())); + } + Box::new(reader) + } + Format::Ciso => crate::io::ciso::DiscWriterCISO::new(reader, &options)?, + #[cfg(feature = "compress-zlib")] + Format::Gcz => crate::io::gcz::DiscWriterGCZ::new(reader, &options)?, + Format::Tgc => crate::io::tgc::DiscWriterTGC::new(reader, &options)?, + Format::Wbfs => crate::io::wbfs::DiscWriterWBFS::new(reader, &options)?, + Format::Wia | Format::Rvz => crate::io::wia::DiscWriterWIA::new(reader, &options)?, + format => return Err(Error::Other(format!("Unsupported write format: {format}"))), + }; + Ok(DiscWriter { inner }) + } + + /// Processes the disc writer to completion, calling the data callback, in order, for each block + /// of data to write to the output file. The callback should write all data before returning, or + /// return an error if writing fails. + #[inline] + pub fn process( + &self, + mut data_callback: impl FnMut(Bytes, u64, u64) -> std::io::Result<()> + Send, + options: &ProcessOptions, + ) -> Result { + self.inner.process(&mut data_callback, options) + } + + /// Returns the progress upper bound for the disc writer. For most formats, this has no + /// relation to the written disc size, but can be used to display progress. + #[inline] + pub fn progress_bound(&self) -> u64 { self.inner.progress_bound() } + + /// Returns the weight of the disc writer, which can help determine the number of threads to + /// dedicate for output processing. This may depend on the format's configuration, such as + /// whether compression is enabled. + #[inline] + pub fn weight(&self) -> DiscWriterWeight { self.inner.weight() } +} + +/// Data returned by the disc writer after processing. +/// +/// If header data is provided, the consumer should seek to the beginning of the output stream and +/// write the header data, overwriting any existing data. Otherwise, the output disc will be +/// invalid. +#[derive(Default, Clone)] +pub struct DiscFinalization { + /// Header data to write to the beginning of the output stream, if any. + pub header: Bytes, + /// The calculated CRC32 checksum of the input disc data, if any. + pub crc32: Option, + /// The calculated MD5 hash of the input disc data, if any. + pub md5: Option<[u8; 16]>, + /// The calculated SHA-1 hash of the input disc data, if any. + pub sha1: Option<[u8; 20]>, + /// The calculated SHA-256 hash of the input disc data, if any. + pub xxh64: Option, +} + +/// The weight of a disc writer, which can help determine the number of threads to use for +/// processing. +pub enum DiscWriterWeight { + /// The writer performs little to no processing of the input data, and is mostly I/O bound. + /// This means that this writer does not benefit from parallelization, and will ignore the + /// number of threads specified. + Light, + /// The writer performs some processing of the input data, and is somewhat CPU bound. This means + /// that this writer benefits from parallelization, but not as much as a heavy writer. + Medium, + /// The writer performs significant processing of the input data, and is mostly CPU bound. This + /// means that this writer benefits from parallelization. + Heavy, +} diff --git a/nodtool/Cargo.toml b/nodtool/Cargo.toml index 6764958..69f5a4e 100644 --- a/nodtool/Cargo.toml +++ b/nodtool/Cargo.toml @@ -16,31 +16,30 @@ categories = ["command-line-utilities", "parser-implementations"] build = "build.rs" [features] -asm = ["md-5/asm", "nod/asm", "sha1/asm"] -nightly = ["crc32fast/nightly"] +openssl = ["nod/openssl"] +openssl-vendored = ["nod/openssl-vendored"] +tracy = ["dep:tracing-tracy"] [dependencies] argp = "0.3" -base16ct = "0.2" crc32fast = "1.4" -digest = "0.10" +digest = { workspace = true } enable-ansi-support = "0.2" hex = { version = "0.4", features = ["serde"] } indicatif = "0.17" -itertools = "0.13" -log = "0.4" -md-5 = "0.10" +md-5 = { workspace = true } nod = { version = "2.0.0-alpha", path = "../nod" } -quick-xml = { version = "0.36", features = ["serialize"] } +num_cpus = "1.16" +quick-xml = { version = "0.37", features = ["serialize"] } serde = { version = "1.0", features = ["derive"] } -sha1 = "0.10" +sha1 = { workspace = true } size = "0.4" supports-color = "3.0" -tracing = "0.1" +tracing = { workspace = true } tracing-attributes = "0.1" tracing-subscriber = { version = "0.3", features = ["env-filter"] } -xxhash-rust = { version = "0.8", features = ["xxh64"] } -zerocopy = { version = "0.8", features = ["alloc", "derive"] } +tracing-tracy = { version = "0.11", features = ["flush-on-exit"], optional = true } +zerocopy = { workspace = true } zstd = "0.13" [target.'cfg(target_env = "musl")'.dependencies] @@ -48,7 +47,7 @@ mimalloc = "0.1" [build-dependencies] hex = { version = "0.4", features = ["serde"] } -quick-xml = { version = "0.36", features = ["serialize"] } +quick-xml = { version = "0.37", features = ["serialize"] } serde = { version = "1.0", features = ["derive"] } zerocopy = { version = "0.8", features = ["alloc", "derive"] } zstd = "0.13" diff --git a/nodtool/src/cmd/convert.rs b/nodtool/src/cmd/convert.rs index 8af1e42..a720607 100644 --- a/nodtool/src/cmd/convert.rs +++ b/nodtool/src/cmd/convert.rs @@ -1,9 +1,13 @@ -use std::path::PathBuf; +use std::{ffi::OsStr, path::PathBuf}; use argp::FromArgs; -use nod::OpenOptions; +use nod::{ + common::Format, + read::{DiscOptions, PartitionEncryption}, + write::FormatOptions, +}; -use crate::util::{redump, shared::convert_and_verify}; +use crate::util::{path_display, redump, shared::convert_and_verify}; #[derive(FromArgs, Debug)] /// Converts a disc image to ISO. @@ -27,6 +31,9 @@ pub struct Args { #[argp(switch)] /// encrypt Wii partition data encrypt: bool, + #[argp(option, short = 'c')] + /// compression format and level (e.g. "zstd:19") + compress: Option, } pub fn run(args: Args) -> nod::Result<()> { @@ -34,15 +41,46 @@ pub fn run(args: Args) -> nod::Result<()> { println!("Loading dat files..."); redump::load_dats(args.dat.iter().map(PathBuf::as_ref))?; } - let options = OpenOptions { + let options = DiscOptions { partition_encryption: match (args.decrypt, args.encrypt) { - (true, false) => nod::PartitionEncryptionMode::ForceDecrypted, - (false, true) => nod::PartitionEncryptionMode::ForceEncrypted, - (false, false) => nod::PartitionEncryptionMode::Original, + (true, false) => PartitionEncryption::ForceDecrypted, + (false, true) => PartitionEncryption::ForceEncrypted, + (false, false) => PartitionEncryption::Original, (true, true) => { return Err(nod::Error::Other("Both --decrypt and --encrypt specified".to_string())) } }, + preloader_threads: 4, }; - convert_and_verify(&args.file, Some(&args.out), args.md5, &options) + let format = match args.out.extension() { + Some(ext) + if ext.eq_ignore_ascii_case(OsStr::new("iso")) + || ext.eq_ignore_ascii_case(OsStr::new("gcm")) => + { + Format::Iso + } + Some(ext) if ext.eq_ignore_ascii_case(OsStr::new("ciso")) => Format::Ciso, + Some(ext) if ext.eq_ignore_ascii_case(OsStr::new("gcz")) => Format::Gcz, + Some(ext) if ext.eq_ignore_ascii_case(OsStr::new("nfs")) => Format::Nfs, + Some(ext) if ext.eq_ignore_ascii_case(OsStr::new("rvz")) => Format::Rvz, + Some(ext) if ext.eq_ignore_ascii_case(OsStr::new("wbfs")) => Format::Wbfs, + Some(ext) if ext.eq_ignore_ascii_case(OsStr::new("wia")) => Format::Wia, + Some(ext) if ext.eq_ignore_ascii_case(OsStr::new("tgc")) => Format::Tgc, + Some(_) => { + return Err(nod::Error::Other(format!( + "Unknown file extension: {}", + path_display(&args.out) + ))) + } + None => Format::Iso, + }; + let mut compression = if let Some(compress) = args.compress { + compress.parse()? + } else { + format.default_compression() + }; + compression.validate_level()?; + let format_options = + FormatOptions { format, compression, block_size: format.default_block_size() }; + convert_and_verify(&args.file, Some(&args.out), args.md5, &options, &format_options) } diff --git a/nodtool/src/cmd/dat.rs b/nodtool/src/cmd/dat.rs index c306050..f974c65 100644 --- a/nodtool/src/cmd/dat.rs +++ b/nodtool/src/cmd/dat.rs @@ -1,24 +1,19 @@ use std::{ - cmp::min, collections::BTreeMap, fmt, - io::Read, path::{Path, PathBuf}, - sync::{mpsc::sync_channel, Arc}, - thread, }; use argp::FromArgs; use indicatif::{ProgressBar, ProgressState, ProgressStyle}; -use nod::{Disc, OpenOptions, PartitionEncryptionMode, Result, ResultContext}; -use zerocopy::FromZeros; - -use crate::util::{ - digest::{digest_thread, DigestResult}, - redump, - redump::GameResult, +use nod::{ + read::{DiscOptions, DiscReader, PartitionEncryption}, + write::{DiscWriter, FormatOptions, ProcessOptions}, + Result, ResultContext, }; +use crate::util::{redump, redump::GameResult}; + #[derive(FromArgs, Debug)] /// Commands related to DAT files. #[argp(subcommand, name = "dat")] @@ -165,9 +160,9 @@ struct DiscHashes { } fn load_disc(path: &Path, name: &str, full_verify: bool) -> Result { - let options = OpenOptions { partition_encryption: PartitionEncryptionMode::Original }; - let mut disc = Disc::new_with_options(path, &options)?; - let disc_size = disc.disc_size(); + let options = + DiscOptions { partition_encryption: PartitionEncryption::Original, preloader_threads: 4 }; + let disc = DiscReader::new(path, &options)?; if !full_verify { let meta = disc.meta(); if let (Some(crc32), Some(sha1)) = (meta.crc32, meta.sha1) { @@ -175,7 +170,8 @@ fn load_disc(path: &Path, name: &str, full_verify: bool) -> Result { } } - let pb = ProgressBar::new(disc_size).with_message(format!("{}:", name)); + let disc_writer = DiscWriter::new(disc, &FormatOptions::default())?; + let pb = ProgressBar::new(disc_writer.progress_bound()).with_message(format!("{}:", name)); pb.set_style(ProgressStyle::with_template("{msg} {spinner:.green} [{elapsed_precise}] [{wide_bar:.cyan/blue}] {bytes}/{total_bytes} ({bytes_per_sec}, {eta})") .unwrap() .with_key("eta", |state: &ProgressState, w: &mut dyn fmt::Write| { @@ -183,47 +179,22 @@ fn load_disc(path: &Path, name: &str, full_verify: bool) -> Result { }) .progress_chars("#>-")); - const BUFFER_SIZE: usize = 1015808; // LCM(0x8000, 0x7C00) - let digest_threads = [digest_thread::(), digest_thread::()]; - - let (w_tx, w_rx) = sync_channel::>(1); - let w_thread = thread::spawn(move || { - let mut total_written = 0u64; - while let Ok(data) = w_rx.recv() { + let mut total_written = 0u64; + let finalization = disc_writer.process( + |data, pos, _| { total_written += data.len() as u64; - pb.set_position(total_written); - } - pb.finish_and_clear(); - }); + pb.set_position(pos); + Ok(()) + }, + &ProcessOptions { + processor_threads: 12, // TODO + digest_crc32: true, + digest_md5: false, + digest_sha1: true, + digest_xxh64: false, + }, + )?; + pb.finish(); - let mut total_read = 0u64; - let mut buf = <[u8]>::new_box_zeroed_with_elems(BUFFER_SIZE)?; - while total_read < disc_size { - let read = min(BUFFER_SIZE as u64, disc_size - total_read) as usize; - disc.read_exact(&mut buf[..read]).with_context(|| { - format!("Reading {} bytes at disc offset {}", BUFFER_SIZE, total_read) - })?; - - let arc = Arc::<[u8]>::from(&buf[..read]); - for (tx, _) in &digest_threads { - tx.send(arc.clone()).map_err(|_| "Sending data to hash thread")?; - } - w_tx.send(arc).map_err(|_| "Sending data to write thread")?; - total_read += read as u64; - } - drop(w_tx); // Close channel - w_thread.join().unwrap(); - - let mut crc32 = None; - let mut sha1 = None; - for (tx, handle) in digest_threads { - drop(tx); // Close channel - match handle.join().unwrap() { - DigestResult::Crc32(v) => crc32 = Some(v), - DigestResult::Sha1(v) => sha1 = Some(v), - _ => {} - } - } - - Ok(DiscHashes { crc32: crc32.unwrap(), sha1: sha1.unwrap() }) + Ok(DiscHashes { crc32: finalization.crc32.unwrap(), sha1: finalization.sha1.unwrap() }) } diff --git a/nodtool/src/cmd/extract.rs b/nodtool/src/cmd/extract.rs index 02be1a6..7990d6b 100644 --- a/nodtool/src/cmd/extract.rs +++ b/nodtool/src/cmd/extract.rs @@ -1,5 +1,4 @@ use std::{ - borrow::Cow, fs, fs::File, io::{BufRead, Write}, @@ -7,15 +6,16 @@ use std::{ }; use argp::FromArgs; -use itertools::Itertools; use nod::{ - Disc, Fst, Node, OpenOptions, PartitionBase, PartitionKind, PartitionMeta, PartitionOptions, + common::PartitionKind, + disc::fst::{Fst, Node}, + read::{DiscOptions, DiscReader, PartitionMeta, PartitionOptions, PartitionReader}, ResultContext, }; use size::{Base, Size}; use zerocopy::IntoBytes; -use crate::util::{display, has_extension}; +use crate::util::{has_extension, path_display}; #[derive(FromArgs, Debug)] /// Extracts a disc image. @@ -53,77 +53,57 @@ pub fn run(args: Args) -> nod::Result<()> { } else { output_dir = args.file.with_extension(""); } - let disc = Disc::new_with_options(&args.file, &OpenOptions::default())?; + let disc = + DiscReader::new(&args.file, &DiscOptions { preloader_threads: 4, ..Default::default() })?; let header = disc.header(); let is_wii = header.is_wii(); - let partition_options = PartitionOptions { validate_hashes: args.validate }; + let options = PartitionOptions { validate_hashes: args.validate }; if let Some(partition) = args.partition { if partition.eq_ignore_ascii_case("all") { for info in disc.partitions() { let mut out_dir = output_dir.clone(); out_dir.push(info.kind.dir_name().as_ref()); - let mut partition = - disc.open_partition_with_options(info.index, &partition_options)?; + let mut partition = disc.open_partition(info.index, &options)?; extract_partition(&disc, partition.as_mut(), &out_dir, is_wii, args.quiet)?; } } else if partition.eq_ignore_ascii_case("data") { - let mut partition = - disc.open_partition_kind_with_options(PartitionKind::Data, &partition_options)?; + let mut partition = disc.open_partition_kind(PartitionKind::Data, &options)?; extract_partition(&disc, partition.as_mut(), &output_dir, is_wii, args.quiet)?; } else if partition.eq_ignore_ascii_case("update") { - let mut partition = - disc.open_partition_kind_with_options(PartitionKind::Update, &partition_options)?; + let mut partition = disc.open_partition_kind(PartitionKind::Update, &options)?; extract_partition(&disc, partition.as_mut(), &output_dir, is_wii, args.quiet)?; } else if partition.eq_ignore_ascii_case("channel") { - let mut partition = - disc.open_partition_kind_with_options(PartitionKind::Channel, &partition_options)?; + let mut partition = disc.open_partition_kind(PartitionKind::Channel, &options)?; extract_partition(&disc, partition.as_mut(), &output_dir, is_wii, args.quiet)?; } else { let idx = partition.parse::().map_err(|_| "Invalid partition index")?; - let mut partition = disc.open_partition_with_options(idx, &partition_options)?; + let mut partition = disc.open_partition(idx, &options)?; extract_partition(&disc, partition.as_mut(), &output_dir, is_wii, args.quiet)?; } } else { - let mut partition = - disc.open_partition_kind_with_options(PartitionKind::Data, &partition_options)?; + let mut partition = disc.open_partition_kind(PartitionKind::Data, &options)?; extract_partition(&disc, partition.as_mut(), &output_dir, is_wii, args.quiet)?; } Ok(()) } fn extract_partition( - disc: &Disc, - partition: &mut dyn PartitionBase, + disc: &DiscReader, + partition: &mut dyn PartitionReader, out_dir: &Path, is_wii: bool, quiet: bool, ) -> nod::Result<()> { let meta = partition.meta()?; - extract_sys_files(disc, meta.as_ref(), out_dir, quiet)?; + extract_sys_files(disc, &meta, out_dir, quiet)?; // Extract FST let files_dir = out_dir.join("files"); fs::create_dir_all(&files_dir) - .with_context(|| format!("Creating directory {}", display(&files_dir)))?; + .with_context(|| format!("Creating directory {}", path_display(&files_dir)))?; let fst = Fst::new(&meta.raw_fst)?; - let mut path_segments = Vec::<(Cow, usize)>::new(); - for (idx, node, name) in fst.iter() { - // Remove ended path segments - let mut new_size = 0; - for (_, end) in path_segments.iter() { - if *end == idx { - break; - } - new_size += 1; - } - path_segments.truncate(new_size); - - // Add the new path segment - let end = if node.is_dir() { node.length() as usize } else { idx + 1 }; - path_segments.push((name?, end)); - - let path = path_segments.iter().map(|(name, _)| name.as_ref()).join("/"); + for (_, node, path) in fst.iter() { if node.is_dir() { fs::create_dir_all(files_dir.join(&path)) .with_context(|| format!("Creating directory {}", path))?; @@ -135,14 +115,14 @@ fn extract_partition( } fn extract_sys_files( - disc: &Disc, + disc: &DiscReader, data: &PartitionMeta, out_dir: &Path, quiet: bool, ) -> nod::Result<()> { let sys_dir = out_dir.join("sys"); fs::create_dir_all(&sys_dir) - .with_context(|| format!("Creating directory {}", display(&sys_dir)))?; + .with_context(|| format!("Creating directory {}", path_display(&sys_dir)))?; extract_file(data.raw_boot.as_ref(), &sys_dir.join("boot.bin"), quiet)?; extract_file(data.raw_bi2.as_ref(), &sys_dir.join("bi2.bin"), quiet)?; extract_file(data.raw_apploader.as_ref(), &sys_dir.join("apploader.img"), quiet)?; @@ -154,7 +134,7 @@ fn extract_sys_files( if disc_header.is_wii() { let disc_dir = out_dir.join("disc"); fs::create_dir_all(&disc_dir) - .with_context(|| format!("Creating directory {}", display(&disc_dir)))?; + .with_context(|| format!("Creating directory {}", path_display(&disc_dir)))?; extract_file(&disc_header.as_bytes()[..0x100], &disc_dir.join("header.bin"), quiet)?; if let Some(region) = disc.region() { extract_file(region, &disc_dir.join("region.bin"), quiet)?; @@ -179,17 +159,18 @@ fn extract_file(bytes: &[u8], out_path: &Path, quiet: bool) -> nod::Result<()> { if !quiet { println!( "Extracting {} (size: {})", - display(out_path), + path_display(out_path), Size::from_bytes(bytes.len()).format().with_base(Base::Base10) ); } - fs::write(out_path, bytes).with_context(|| format!("Writing file {}", display(out_path)))?; + fs::write(out_path, bytes) + .with_context(|| format!("Writing file {}", path_display(out_path)))?; Ok(()) } fn extract_node( node: Node, - partition: &mut dyn PartitionBase, + partition: &mut dyn PartitionReader, base_path: &Path, name: &str, is_wii: bool, @@ -199,12 +180,12 @@ fn extract_node( if !quiet { println!( "Extracting {} (size: {})", - display(&file_path), + path_display(&file_path), Size::from_bytes(node.length()).format().with_base(Base::Base10) ); } let mut file = File::create(&file_path) - .with_context(|| format!("Creating file {}", display(&file_path)))?; + .with_context(|| format!("Creating file {}", path_display(&file_path)))?; let mut r = partition.open_file(node).with_context(|| { format!( "Opening file {} on disc for reading (offset {}, size {})", @@ -214,15 +195,17 @@ fn extract_node( ) })?; loop { - let buf = - r.fill_buf().with_context(|| format!("Extracting file {}", display(&file_path)))?; + let buf = r + .fill_buf() + .with_context(|| format!("Extracting file {}", path_display(&file_path)))?; let len = buf.len(); if len == 0 { break; } - file.write_all(buf).with_context(|| format!("Writing file {}", display(&file_path)))?; + file.write_all(buf) + .with_context(|| format!("Writing file {}", path_display(&file_path)))?; r.consume(len); } - file.flush().with_context(|| format!("Flushing file {}", display(&file_path)))?; + file.flush().with_context(|| format!("Flushing file {}", path_display(&file_path)))?; Ok(()) } diff --git a/nodtool/src/cmd/gen.rs b/nodtool/src/cmd/gen.rs new file mode 100644 index 0000000..77d3806 --- /dev/null +++ b/nodtool/src/cmd/gen.rs @@ -0,0 +1,771 @@ +use std::{ + fs, + fs::File, + io, + io::{BufRead, Read, Seek, SeekFrom, Write}, + path::{Path, PathBuf}, + str::from_utf8, + time::Instant, +}; + +use argp::FromArgs; +use nod::{ + build::gc::{FileCallback, FileInfo, GCPartitionBuilder, PartitionOverrides}, + common::PartitionKind, + disc::{ + fst::Fst, DiscHeader, PartitionHeader, BI2_SIZE, BOOT_SIZE, MINI_DVD_SIZE, SECTOR_SIZE, + }, + read::{ + DiscOptions, DiscReader, PartitionEncryption, PartitionMeta, PartitionOptions, + PartitionReader, + }, + util::lfg::LaggedFibonacci, + write::{DiscWriter, FormatOptions, ProcessOptions}, + ResultContext, +}; +use tracing::{debug, error, info, warn}; +use zerocopy::{FromBytes, FromZeros}; + +use crate::util::{array_ref, redump, shared::convert_and_verify}; + +#[derive(FromArgs, Debug)] +/// Generates a disc image. +#[argp(subcommand, name = "gen")] +pub struct Args { + #[argp(positional)] + /// Path to extracted disc image + dir: PathBuf, + #[argp(positional)] + /// Output ISO file + out: PathBuf, +} + +#[derive(FromArgs, Debug)] +/// Test disc image generation. +#[argp(subcommand, name = "gentest")] +pub struct TestArgs { + #[argp(positional)] + /// Path to original disc images + inputs: Vec, + #[argp(option, short = 'o')] + /// Output ISO file + output: Option, + #[argp(option, short = 't')] + /// Output original ISO for comparison + test_output: Option, +} + +fn read_fixed(path: &Path) -> nod::Result> { + let mut buf = <[u8; N]>::new_box_zeroed()?; + File::open(path) + .with_context(|| format!("Failed to open {}", path.display()))? + .read_exact(buf.as_mut()) + .with_context(|| format!("Failed to read {}", path.display()))?; + Ok(buf) +} + +fn read_all(path: &Path) -> nod::Result> { + let mut buf = Vec::new(); + File::open(path) + .with_context(|| format!("Failed to open {}", path.display()))? + .read_to_end(&mut buf) + .with_context(|| format!("Failed to read {}", path.display()))?; + Ok(buf.into_boxed_slice()) +} + +struct FileWriteInfo { + name: String, + offset: u64, + length: u64, +} + +fn file_size(path: &Path) -> nod::Result { + Ok(fs::metadata(path) + .with_context(|| format!("Failed to get metadata for {}", path.display()))? + .len()) +} + +fn check_file_size(path: &Path, expected: u64) -> nod::Result<()> { + let actual = file_size(path)?; + if actual != expected { + return Err(nod::Error::DiscFormat(format!( + "File {} has size {}, expected {}", + path.display(), + actual, + expected + ))); + } + Ok(()) +} + +pub fn run(args: Args) -> nod::Result<()> { + let start = Instant::now(); + + // Validate file sizes + let boot_path = args.dir.join("sys/boot.bin"); + check_file_size(&boot_path, BOOT_SIZE as u64)?; + let bi2_path = args.dir.join("sys/bi2.bin"); + check_file_size(&bi2_path, BI2_SIZE as u64)?; + let apploader_path = args.dir.join("sys/apploader.img"); + let apploader_size = file_size(&apploader_path)?; + let dol_path = args.dir.join("sys/main.dol"); + let dol_size = file_size(&dol_path)?; + + // Build metadata + let mut file_infos = Vec::new(); + let boot_data: Box<[u8; BOOT_SIZE]> = read_fixed(&boot_path)?; + let header = DiscHeader::ref_from_bytes(&boot_data[..size_of::()]) + .expect("Failed to read disc header"); + let junk_id = get_junk_id(header); + let partition_header = PartitionHeader::ref_from_bytes(&boot_data[size_of::()..]) + .expect("Failed to read partition header"); + let fst_path = args.dir.join("sys/fst.bin"); + let fst_data = read_all(&fst_path)?; + let fst = Fst::new(&fst_data).expect("Failed to parse FST"); + + file_infos.push(FileWriteInfo { + name: "sys/boot.bin".to_string(), + offset: 0, + length: BOOT_SIZE as u64, + }); + file_infos.push(FileWriteInfo { + name: "sys/bi2.bin".to_string(), + offset: BOOT_SIZE as u64, + length: BI2_SIZE as u64, + }); + file_infos.push(FileWriteInfo { + name: "sys/apploader.img".to_string(), + offset: BOOT_SIZE as u64 + BI2_SIZE as u64, + length: apploader_size, + }); + let fst_offset = partition_header.fst_offset(false); + let dol_offset = partition_header.dol_offset(false); + if dol_offset < fst_offset { + file_infos.push(FileWriteInfo { + name: "sys/main.dol".to_string(), + offset: dol_offset, + length: dol_size, + }); + } else { + let mut found = false; + for (_, node, path) in fst.iter() { + if !node.is_file() { + continue; + } + let offset = node.offset(false); + if offset == dol_offset { + info!("Using DOL from FST: {}", path); + found = true; + } + } + if !found { + return Err(nod::Error::DiscFormat("DOL not found in FST".to_string())); + } + } + let fst_size = partition_header.fst_size(false); + file_infos.push(FileWriteInfo { + name: "sys/fst.bin".to_string(), + offset: fst_offset, + length: fst_size, + }); + + // Collect files + for (_, node, path) in fst.iter() { + let length = node.length() as u64; + if node.is_dir() { + continue; + } + + let mut file_path = args.dir.join("files"); + file_path.extend(path.split('/')); + let metadata = match fs::metadata(&file_path) { + Ok(meta) => meta, + Err(e) if e.kind() == io::ErrorKind::NotFound => { + warn!("File not found: {}", file_path.display()); + continue; + } + Err(e) => { + return Err(e) + .context(format!("Failed to get metadata for {}", file_path.display())) + } + }; + if metadata.is_dir() { + return Err(nod::Error::Other(format!("Path {} is a directory", file_path.display()))); + } + if metadata.len() != length { + return Err(nod::Error::Other(format!( + "File {} has size {}, expected {}", + file_path.display(), + metadata.len(), + length + ))); + } + let offset = node.offset(false); + file_infos.push(FileWriteInfo { + name: file_path.into_os_string().into_string().unwrap(), + offset, + length, + }); + } + sort_files(&mut file_infos)?; + + // Write files + let mut out = File::create(&args.out) + .with_context(|| format!("Failed to create {}", args.out.display()))?; + info!("Writing disc image to {} ({} files)", args.out.display(), file_infos.len()); + let crc = write_files( + &mut out, + &file_infos, + header, + partition_header, + junk_id, + |out, name| match name { + "sys/boot.bin" => out.write_all(boot_data.as_ref()), + "sys/fst.bin" => out.write_all(fst_data.as_ref()), + path => { + let mut in_file = File::open(args.dir.join(path))?; + io::copy(&mut in_file, out).map(|_| ()) + } + }, + )?; + out.flush().context("Failed to flush output file")?; + info!("Generated disc image in {:?} (CRC32: {:08X})", start.elapsed(), crc); + let redump_entry = redump::find_by_crc32(crc); + if let Some(entry) = &redump_entry { + println!("Redump: {} ✅", entry.name); + } else { + println!("Redump: Not found ❌"); + } + Ok(()) +} + +#[inline] +fn align_up(n: u64) -> u64 { (n + N - 1) & !(N - 1) } + +#[inline] +fn gcm_align(n: u64) -> u64 { (n + 31) & !3 } + +/// Files can be located on the inner rim of the disc (closer to the center) or the outer rim +/// (closer to the edge). The inner rim is slower to read, so developers often configured certain +/// files to be located on the outer rim. This function attempts to find a gap in the file offsets +/// between the inner and outer rim, which we need to recreate junk data properly. +fn find_file_gap(file_infos: &[FileWriteInfo], fst_end: u64) -> Option { + let mut last_offset = 0; + for info in file_infos { + if last_offset > fst_end && info.offset > last_offset + SECTOR_SIZE as u64 { + debug!("Found file gap at {:X} -> {:X}", last_offset, info.offset); + return Some(last_offset); + } + last_offset = info.offset + info.length; + } + None +} + +fn write_files( + w: &mut W, + file_infos: &[FileWriteInfo], + header: &DiscHeader, + partition_header: &PartitionHeader, + junk_id: Option<[u8; 4]>, + mut callback: impl FnMut(&mut HashStream<&mut W>, &str) -> io::Result<()>, +) -> nod::Result +where + W: Write + ?Sized, +{ + let fst_end = partition_header.fst_offset(false) + partition_header.fst_size(false); + let file_gap = find_file_gap(file_infos, fst_end); + let mut lfg = LaggedFibonacci::default(); + let mut out = HashStream::new(w); + let mut last_end = 0; + for info in file_infos { + if let Some(junk_id) = junk_id { + let aligned_end = gcm_align(last_end); + if info.offset > aligned_end && last_end >= fst_end { + // Junk data is aligned to 4 bytes with a 28 byte padding (aka `(n + 31) & !3`) + // but a few cases don't have the 28 byte padding. Namely, the junk data after the + // FST, and the junk data in between the inner and outer rim files. This attempts to + // determine the correct alignment, but is not 100% accurate. + let junk_start = + if file_gap == Some(last_end) { align_up::<4>(last_end) } else { aligned_end }; + debug!("Writing junk data at {:X} -> {:X}", junk_start, info.offset); + write_junk_data( + &mut lfg, + &mut out, + junk_id, + header.disc_num, + junk_start, + info.offset, + )?; + } + } + debug!( + "Writing file {} at {:X} -> {:X}", + info.name, + info.offset, + info.offset + info.length + ); + out.seek(SeekFrom::Start(info.offset)) + .with_context(|| format!("Seeking to offset {}", info.offset))?; + if info.length > 0 { + callback(&mut out, &info.name) + .with_context(|| format!("Failed to write file {}", info.name))?; + let cur = out.stream_position().context("Getting current position")?; + if cur != info.offset + info.length { + return Err(nod::Error::Other(format!( + "Wrote {} bytes, expected {}", + cur - info.offset, + info.length + ))); + } + } + last_end = info.offset + info.length; + } + if let Some(junk_id) = junk_id { + let aligned_end = gcm_align(last_end); + if aligned_end < MINI_DVD_SIZE && aligned_end >= fst_end { + debug!("Writing junk data at {:X} -> {:X}", aligned_end, MINI_DVD_SIZE); + write_junk_data( + &mut lfg, + &mut out, + junk_id, + header.disc_num, + aligned_end, + MINI_DVD_SIZE, + )?; + last_end = MINI_DVD_SIZE; + } + } + out.write_zeroes(MINI_DVD_SIZE - last_end).context("Writing end of file")?; + out.flush().context("Flushing output")?; + Ok(out.finish()) +} + +fn write_junk_data( + lfg: &mut LaggedFibonacci, + out: &mut W, + junk_id: [u8; 4], + disc_num: u8, + pos: u64, + end: u64, +) -> nod::Result<()> +where + W: Write + Seek + ?Sized, +{ + out.seek(SeekFrom::Start(pos)).with_context(|| format!("Seeking to offset {}", pos))?; + lfg.write_sector_chunked(out, end - pos, junk_id, disc_num, pos) + .with_context(|| format!("Failed to write junk data at offset {}", pos))?; + Ok(()) +} + +pub fn run_test(args: TestArgs) -> nod::Result<()> { + let mut failed = vec![]; + for input in args.inputs { + match in_memory_test(&input, args.output.as_deref(), args.test_output.as_deref()) { + Ok(()) => {} + Err(e) => { + error!("Failed to generate disc image: {:?}", e); + failed.push((input, e)); + } + } + } + if !failed.is_empty() { + error!("Failed to generate disc images:"); + for (input, e) in failed { + error!(" {}: {:?}", input.display(), e); + } + std::process::exit(1); + } + Ok(()) +} + +/// Some games (mainly beta and sample discs) have junk data that doesn't match the game ID. This +/// function returns the correct game ID to use, if an override is needed. +fn get_override_junk_id(header: &DiscHeader) -> Option<[u8; 4]> { + match &header.game_id { + // Dairantou Smash Brothers DX (Japan) (Taikenban) + b"DALJ01" if header.disc_num == 0 && header.disc_version == 0 => Some(*b"DPIJ"), + // 2002 FIFA World Cup (Japan) (Jitsuen-you Sample) + b"DFIJ13" if header.disc_num == 0 && header.disc_version == 0 => Some(*b"GFIJ"), + // Disney's Magical Park (Japan) (Jitsuen-you Sample) + b"DMTJ18" if header.disc_num == 0 && header.disc_version == 0 => Some(*b"GMTJ"), + // Star Wars - Rogue Squadron II (Japan) (Jitsuen-you Sample) + b"DSWJ13" if header.disc_num == 0 && header.disc_version == 0 => Some(*b"GSWJ"), + // Homeland (Japan) (Rev 1) [T-En by DOL-Translations v20230606] [i] + b"GHEE91" if header.disc_num == 0 && header.disc_version == 1 => Some(*b"GHEJ"), + // Kururin Squash! (Japan) [T-En by DOL-Translations v2.0.0] + b"GKQE01" if header.disc_num == 0 && header.disc_version == 0 => Some(*b"GKQJ"), + // Lupin III - Lost Treasure Under the Sea (Japan) (Disc 1) [T-En by DOL-Translations v0.5.0] [i] [n] + b"GL3EE8" if header.disc_num == 0 && header.disc_version == 0 => Some(*b"GL3J"), + // Lupin III - Lost Treasure Under the Sea (Japan) (Disc 2) [T-En by DOL-Translations v0.5.0] [i] [n] + b"GL3EE8" if header.disc_num == 1 && header.disc_version == 0 => Some(*b"GL3J"), + // Taxi 3 - The Game (France) [T-En by DOL-Translations v20230801] [n] + b"GXQP41" if header.disc_num == 0 && header.disc_version == 0 => Some(*b"GXQF"), + // Donkey Konga 3 - Tabehoudai! Haru Mogitate 50-kyoku (Japan) [T-En by DOL-Translations v0.1.1] [i] + b"GY3E01" if header.disc_num == 0 && header.disc_version == 0 => Some(*b"GY3J"), + // Need for Speed - Underground (Europe) (Alt) + b"PZHP69" if header.disc_num == 0 && header.disc_version == 0 => Some(*b"GNDP"), + _ => None, + } +} + +fn get_junk_id(header: &DiscHeader) -> Option<[u8; 4]> { + Some(match get_override_junk_id(header) { + Some(id) => { + info!("Using override junk ID: {:X?}", from_utf8(&id).unwrap()); + id + } + None => *array_ref!(header.game_id, 0, 4), + }) +} + +fn sort_files(files: &mut [FileWriteInfo]) -> nod::Result<()> { + files.sort_unstable_by_key(|info| (info.offset, info.length)); + for i in 1..files.len() { + let prev = &files[i - 1]; + let cur = &files[i]; + if cur.offset < prev.offset + prev.length { + return Err(nod::Error::Other(format!( + "File {} ({:#X}-{:#X}) overlaps with {} ({:#X}-{:#X})", + cur.name, + cur.offset, + cur.offset + cur.length, + prev.name, + prev.offset, + prev.offset + prev.length + ))); + } + } + Ok(()) +} + +fn in_memory_test( + path: &Path, + output: Option<&Path>, + test_output: Option<&Path>, +) -> nod::Result<()> { + let start = Instant::now(); + info!("Opening disc image '{}'", path.display()); + let disc = DiscReader::new(path, &DiscOptions::default())?; + info!( + "Opened disc image '{}' (Disc {}, Revision {})", + disc.header().game_title_str(), + disc.header().disc_num + 1, + disc.header().disc_version + ); + let Some(orig_crc32) = disc.meta().crc32 else { + return Err(nod::Error::Other("CRC32 not found in disc metadata".to_string())); + }; + let mut partition = + disc.open_partition_kind(PartitionKind::Data, &PartitionOptions::default())?; + let meta = partition.meta()?; + + // Build metadata + let mut file_infos = Vec::new(); + let header = meta.header(); + let junk_id = get_junk_id(header); + let partition_header = meta.partition_header(); + let fst = meta.fst()?; + + file_infos.push(FileWriteInfo { + name: "sys/boot.bin".to_string(), + offset: 0, + length: BOOT_SIZE as u64, + }); + file_infos.push(FileWriteInfo { + name: "sys/bi2.bin".to_string(), + offset: BOOT_SIZE as u64, + length: BI2_SIZE as u64, + }); + file_infos.push(FileWriteInfo { + name: "sys/apploader.img".to_string(), + offset: BOOT_SIZE as u64 + BI2_SIZE as u64, + length: meta.raw_apploader.len() as u64, + }); + let fst_offset = partition_header.fst_offset(false); + let dol_offset = partition_header.dol_offset(false); + if dol_offset < fst_offset { + file_infos.push(FileWriteInfo { + name: "sys/main.dol".to_string(), + offset: dol_offset, + length: meta.raw_dol.len() as u64, + }); + } else { + let mut found = false; + for (_, node, name) in fst.iter() { + if !node.is_file() { + continue; + } + let offset = node.offset(false); + if offset == dol_offset { + info!("Using DOL from FST: {}", name); + found = true; + } + } + if !found { + return Err(nod::Error::Other("DOL not found in FST".to_string())); + } + } + let fst_size = partition_header.fst_size(false); + file_infos.push(FileWriteInfo { + name: "sys/fst.bin".to_string(), + offset: fst_offset, + length: fst_size, + }); + + // Collect files + let mut builder = GCPartitionBuilder::new(false, PartitionOverrides::default()); + for (idx, node, path) in fst.iter() { + let offset = node.offset(false); + let length = node.length() as u64; + if node.is_dir() { + if length as usize == idx + 1 { + println!("Empty directory: {}", path); + } + continue; + } + + if let Some(junk_id) = junk_id { + // Some games have junk data in place of files that were removed from the disc layout. + // This is a naive check to skip these files in our disc layout so that the junk data + // alignment is correct. This misses some cases where the junk data starts in the middle + // of a file, but handling those cases would require a more complex solution. + if length > 4 + && check_junk_data(partition.as_mut(), offset, length, junk_id, header.disc_num)? + { + warn!("Skipping junk data file: {} (size {})", path, length); + builder.add_junk_file(path); + continue; + } + } + + builder.add_file(FileInfo { + name: path, + size: length, + offset: Some(offset), + alignment: None, + })?; + } + + // Write files + info!("Writing disc image with {} files", file_infos.len()); + for file in &file_infos { + builder.add_file(FileInfo { + name: file.name.clone(), + size: file.length, + offset: Some(file.offset), + alignment: None, + })?; + } + let writer = builder.build(|out: &mut dyn Write, name: &str| match name { + "sys/boot.bin" => out.write_all(meta.raw_boot.as_ref()), + "sys/bi2.bin" => out.write_all(meta.raw_bi2.as_ref()), + "sys/fst.bin" => out.write_all(meta.raw_fst.as_ref()), + "sys/apploader.img" => out.write_all(meta.raw_apploader.as_ref()), + "sys/main.dol" => out.write_all(meta.raw_dol.as_ref()), + path => { + let Some((_, node)) = fst.find(path) else { + return Err(io::Error::new( + io::ErrorKind::NotFound, + format!("File not found: {}", path), + )); + }; + let mut file = partition.open_file(node)?; + buf_copy(&mut file, out)?; + Ok(()) + } + })?; + let disc_stream = writer.into_stream(PartitionFileReader { partition, meta })?; + let disc_reader = DiscReader::new_stream(disc_stream, &DiscOptions::default())?; + let disc_writer = DiscWriter::new(disc_reader, &FormatOptions::default())?; + let process_options = ProcessOptions { digest_crc32: true, ..Default::default() }; + let finalization = if let Some(output) = output { + let mut out = File::create(output) + .with_context(|| format!("Failed to create {}", output.display()))?; + let finalization = + disc_writer.process(|data, _, _| out.write_all(data.as_ref()), &process_options)?; + out.flush().context("Failed to flush output file")?; + finalization + } else { + disc_writer.process(|_, _, _| Ok(()), &process_options)? + }; + let crc = finalization.crc32.unwrap(); + info!("Generated disc image in {:?} (CRC32: {:08X})", start.elapsed(), crc); + if crc != orig_crc32 { + if let Some(test_output) = test_output { + let open_options = DiscOptions { + partition_encryption: PartitionEncryption::Original, + preloader_threads: 4, + }; + convert_and_verify( + path, + Some(test_output), + false, + &open_options, + &FormatOptions::default(), + )?; + } + return Err(nod::Error::Other(format!( + "CRC32 mismatch: {:08X} != {:08X}", + crc, orig_crc32 + ))); + } + Ok(()) +} + +#[derive(Clone)] +struct PartitionFileReader { + partition: Box, + meta: PartitionMeta, +} + +impl FileCallback for PartitionFileReader { + fn read_file(&mut self, out: &mut [u8], name: &str, offset: u64) -> io::Result<()> { + let data: &[u8] = match name { + "sys/boot.bin" => self.meta.raw_boot.as_ref(), + "sys/bi2.bin" => self.meta.raw_bi2.as_ref(), + "sys/fst.bin" => self.meta.raw_fst.as_ref(), + "sys/apploader.img" => self.meta.raw_apploader.as_ref(), + "sys/main.dol" => self.meta.raw_dol.as_ref(), + path => { + let fst = self.meta.fst().map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; + let Some((_, node)) = fst.find(path) else { + return Err(io::Error::new( + io::ErrorKind::NotFound, + format!("File not found: {}", path), + )); + }; + let mut file = self.partition.open_file(node)?; + file.seek(SeekFrom::Start(offset))?; + file.read_exact(out)?; + return Ok(()); + } + }; + let offset = offset as usize; + let len = out.len().min(data.len() - offset); + out[..len].copy_from_slice(&data[offset..offset + len]); + Ok(()) + } +} + +/// Some disc files still exist in the FST, but were removed from the disc layout. These files had +/// junk data written in their place, since the disc creator did not know about them. To match the +/// original disc, we need to check for these files and remove them from our disc layout as well. +/// This ensures that the junk data alignment is correct. +fn check_junk_data( + partition: &mut dyn PartitionReader, + offset: u64, + len: u64, + junk_id: [u8; 4], + disc_num: u8, +) -> nod::Result { + if len == 0 { + return Ok(false); + } + + partition + .seek(SeekFrom::Start(offset)) + .with_context(|| format!("Seeking to offset {}", offset))?; + let mut lfg = LaggedFibonacci::default(); + let mut pos = offset; + let mut remaining = len; + while remaining > 0 { + let file_buf = partition + .fill_buf() + .with_context(|| format!("Failed to read disc file at offset {}", offset))?; + let read_len = (file_buf.len() as u64).min(remaining) as usize; + if !lfg.check_sector_chunked(&file_buf[..read_len], junk_id, disc_num, pos) { + return Ok(false); + } + + pos += read_len as u64; + remaining -= read_len as u64; + partition.consume(read_len); + } + Ok(true) +} + +pub struct HashStream { + inner: W, + hasher: crc32fast::Hasher, + position: u64, +} + +impl HashStream { + pub fn new(inner: W) -> Self { Self { inner, hasher: Default::default(), position: 0 } } + + pub fn finish(self) -> u32 { self.hasher.finalize() } +} + +impl HashStream +where W: Write +{ + pub fn write_zeroes(&mut self, mut len: u64) -> io::Result<()> { + while len > 0 { + let write_len = len.min(SECTOR_SIZE as u64) as usize; + self.write_all(&ZERO_SECTOR[..write_len])?; + len -= write_len as u64; + } + Ok(()) + } +} + +impl Write for HashStream +where W: Write +{ + fn write(&mut self, buf: &[u8]) -> io::Result { + self.hasher.update(buf); + self.position += buf.len() as u64; + self.inner.write(buf) + } + + fn flush(&mut self) -> io::Result<()> { self.inner.flush() } +} + +const ZERO_SECTOR: [u8; SECTOR_SIZE] = [0; SECTOR_SIZE]; + +impl Seek for HashStream +where W: Write +{ + fn seek(&mut self, pos: SeekFrom) -> io::Result { + let new_position = match pos { + SeekFrom::Start(v) => v, + SeekFrom::Current(v) => self.position.saturating_add_signed(v), + SeekFrom::End(_) => { + return Err(io::Error::new( + io::ErrorKind::Unsupported, + "HashStream: SeekFrom::End is not supported".to_string(), + )); + } + }; + if new_position < self.position { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + "HashStream: Cannot seek backwards".to_string(), + )); + } + self.write_zeroes(new_position - self.position)?; + Ok(new_position) + } + + fn stream_position(&mut self) -> io::Result { Ok(self.position) } +} + +/// Copies from a buffered reader to a writer without extra allocations. +fn buf_copy(reader: &mut R, writer: &mut W) -> io::Result +where + R: BufRead + ?Sized, + W: Write + ?Sized, +{ + let mut copied = 0; + loop { + let buf = reader.fill_buf()?; + let len = buf.len(); + if len == 0 { + break; + } + writer.write_all(buf)?; + reader.consume(len); + copied += len as u64; + } + Ok(copied) +} diff --git a/nodtool/src/cmd/info.rs b/nodtool/src/cmd/info.rs index 16c2d95..f52c639 100644 --- a/nodtool/src/cmd/info.rs +++ b/nodtool/src/cmd/info.rs @@ -1,10 +1,14 @@ use std::path::{Path, PathBuf}; use argp::FromArgs; -use nod::{Disc, SECTOR_SIZE}; +use nod::{ + disc::SECTOR_SIZE, + read::{DiscOptions, DiscReader, PartitionOptions}, +}; use size::Size; +use tracing::info; -use crate::util::{display, shared::print_header}; +use crate::util::{path_display, shared::print_header}; #[derive(FromArgs, Debug)] /// Displays information about disc images. @@ -23,15 +27,15 @@ pub fn run(args: Args) -> nod::Result<()> { } fn info_file(path: &Path) -> nod::Result<()> { - log::info!("Loading {}", display(path)); - let disc = Disc::new(path)?; + info!("Loading {}", path_display(path)); + let disc = DiscReader::new(path, &DiscOptions::default())?; let header = disc.header(); let meta = disc.meta(); print_header(header, &meta); if header.is_wii() { for (idx, info) in disc.partitions().iter().enumerate() { - let mut partition = disc.open_partition(idx)?; + let mut partition = disc.open_partition(idx, &PartitionOptions::default())?; let meta = partition.meta()?; println!(); diff --git a/nodtool/src/cmd/mod.rs b/nodtool/src/cmd/mod.rs index d8e9a9f..364ebee 100644 --- a/nodtool/src/cmd/mod.rs +++ b/nodtool/src/cmd/mod.rs @@ -1,5 +1,6 @@ pub mod convert; pub mod dat; pub mod extract; +pub mod gen; pub mod info; pub mod verify; diff --git a/nodtool/src/cmd/verify.rs b/nodtool/src/cmd/verify.rs index c70086d..c6acb1d 100644 --- a/nodtool/src/cmd/verify.rs +++ b/nodtool/src/cmd/verify.rs @@ -1,7 +1,10 @@ use std::path::PathBuf; use argp::FromArgs; -use nod::{OpenOptions, PartitionEncryptionMode}; +use nod::{ + read::{DiscOptions, PartitionEncryption}, + write::FormatOptions, +}; use crate::util::{redump, shared::convert_and_verify}; @@ -18,6 +21,12 @@ pub struct Args { #[argp(option, short = 'd')] /// path to DAT file(s) for verification (optional) dat: Vec, + #[argp(switch)] + /// decrypt Wii partition data + decrypt: bool, + #[argp(switch)] + /// encrypt Wii partition data + encrypt: bool, } pub fn run(args: Args) -> nod::Result<()> { @@ -25,9 +34,21 @@ pub fn run(args: Args) -> nod::Result<()> { println!("Loading dat files..."); redump::load_dats(args.dat.iter().map(PathBuf::as_ref))?; } - let options = OpenOptions { partition_encryption: PartitionEncryptionMode::Original }; + let cpus = num_cpus::get(); + let options = DiscOptions { + partition_encryption: match (args.decrypt, args.encrypt) { + (true, false) => PartitionEncryption::ForceDecrypted, + (false, true) => PartitionEncryption::ForceEncrypted, + (false, false) => PartitionEncryption::Original, + (true, true) => { + return Err(nod::Error::Other("Both --decrypt and --encrypt specified".to_string())) + } + }, + preloader_threads: 4.min(cpus), + }; + let format_options = FormatOptions::default(); for file in &args.file { - convert_and_verify(file, None, args.md5, &options)?; + convert_and_verify(file, None, args.md5, &options, &format_options)?; println!(); } Ok(()) diff --git a/nodtool/src/lib.rs b/nodtool/src/lib.rs index ac42d14..c60c1aa 100644 --- a/nodtool/src/lib.rs +++ b/nodtool/src/lib.rs @@ -9,19 +9,23 @@ pub use nod; #[derive(FromArgs, Debug)] #[argp(subcommand)] pub enum SubCommand { - Dat(cmd::dat::Args), - Info(cmd::info::Args), - Extract(cmd::extract::Args), Convert(cmd::convert::Args), + Dat(cmd::dat::Args), + Extract(cmd::extract::Args), + // Gen(cmd::gen::Args), + GenTest(cmd::gen::TestArgs), + Info(cmd::info::Args), Verify(cmd::verify::Args), } pub fn run(command: SubCommand) -> nod::Result<()> { match command { - SubCommand::Dat(c_args) => cmd::dat::run(c_args), - SubCommand::Info(c_args) => cmd::info::run(c_args), SubCommand::Convert(c_args) => cmd::convert::run(c_args), + SubCommand::Dat(c_args) => cmd::dat::run(c_args), SubCommand::Extract(c_args) => cmd::extract::run(c_args), + // SubCommand::Gen(c_args) => cmd::gen::run(c_args), + SubCommand::GenTest(c_args) => cmd::gen::run_test(c_args), + SubCommand::Info(c_args) => cmd::info::run(c_args), SubCommand::Verify(c_args) => cmd::verify::run(c_args), } } diff --git a/nodtool/src/main.rs b/nodtool/src/main.rs index 90ee79f..729be24 100644 --- a/nodtool/src/main.rs +++ b/nodtool/src/main.rs @@ -12,8 +12,6 @@ use argp::{FromArgValue, FromArgs}; use enable_ansi_support::enable_ansi_support; use nodtool::{run, SubCommand}; use supports_color::Stream; -use tracing::level_filters::LevelFilter; -use tracing_subscriber::EnvFilter; #[derive(FromArgs, Debug)] /// Tool for reading GameCube and Wii disc images. @@ -99,27 +97,43 @@ fn main() { supports_color::on(Stream::Stdout).is_some_and(|c| c.has_basic) }; - let format = - tracing_subscriber::fmt::format().with_ansi(use_colors).with_target(false).without_time(); - let builder = tracing_subscriber::fmt().event_format(format); - if let Some(level) = args.log_level { - builder - .with_max_level(match level { - LogLevel::Error => LevelFilter::ERROR, - LogLevel::Warn => LevelFilter::WARN, - LogLevel::Info => LevelFilter::INFO, - LogLevel::Debug => LevelFilter::DEBUG, - LogLevel::Trace => LevelFilter::TRACE, - }) - .init(); - } else { - builder - .with_env_filter( - EnvFilter::builder() - .with_default_directive(LevelFilter::INFO.into()) - .from_env_lossy(), - ) - .init(); + #[cfg(feature = "tracy")] + { + use tracing_subscriber::layer::SubscriberExt; + tracing::subscriber::set_global_default( + tracing_subscriber::registry().with(tracing_tracy::TracyLayer::default()), + ) + .expect("setup tracy layer"); + } + + #[cfg(not(feature = "tracy"))] + { + use tracing::level_filters::LevelFilter; + use tracing_subscriber::EnvFilter; + let format = tracing_subscriber::fmt::format() + .with_ansi(use_colors) + .with_target(false) + .without_time(); + let builder = tracing_subscriber::fmt().event_format(format); + if let Some(level) = args.log_level { + builder + .with_max_level(match level { + LogLevel::Error => LevelFilter::ERROR, + LogLevel::Warn => LevelFilter::WARN, + LogLevel::Info => LevelFilter::INFO, + LogLevel::Debug => LevelFilter::DEBUG, + LogLevel::Trace => LevelFilter::TRACE, + }) + .init(); + } else { + builder + .with_env_filter( + EnvFilter::builder() + .with_default_directive(LevelFilter::INFO.into()) + .from_env_lossy(), + ) + .init(); + } } let mut result = Ok(()); diff --git a/nodtool/src/util/digest.rs b/nodtool/src/util/digest.rs index 6fba156..5acf75f 100644 --- a/nodtool/src/util/digest.rs +++ b/nodtool/src/util/digest.rs @@ -1,29 +1,4 @@ -use std::{ - fmt, - sync::{ - mpsc::{sync_channel, SyncSender}, - Arc, - }, - thread, - thread::JoinHandle, -}; - -use digest::{Digest, Output}; - -pub type DigestThread = (SyncSender>, JoinHandle); - -pub fn digest_thread() -> DigestThread -where H: Hasher + Send + 'static { - let (tx, rx) = sync_channel::>(1); - let handle = thread::spawn(move || { - let mut hasher = H::new(); - while let Ok(data) = rx.recv() { - hasher.update(data.as_ref()); - } - hasher.finalize() - }); - (tx, handle) -} +use std::fmt; #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum DigestResult { @@ -48,49 +23,9 @@ impl fmt::Display for DigestResult { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { DigestResult::Crc32(crc) => write!(f, "{:08x}", crc), - DigestResult::Md5(md5) => write!(f, "{:032x}", >::from(*md5)), - DigestResult::Sha1(sha1) => write!(f, "{:040x}", >::from(*sha1)), + DigestResult::Md5(md5) => write!(f, "{}", hex::encode(md5)), + DigestResult::Sha1(sha1) => write!(f, "{}", hex::encode(sha1)), DigestResult::Xxh64(xxh64) => write!(f, "{:016x}", xxh64), } } } - -pub trait Hasher { - fn new() -> Self; - fn finalize(self) -> DigestResult; - fn update(&mut self, data: &[u8]); -} - -impl Hasher for md5::Md5 { - fn new() -> Self { Digest::new() } - - fn finalize(self) -> DigestResult { DigestResult::Md5(Digest::finalize(self).into()) } - - fn update(&mut self, data: &[u8]) { Digest::update(self, data) } -} - -impl Hasher for sha1::Sha1 { - fn new() -> Self { Digest::new() } - - fn finalize(self) -> DigestResult { DigestResult::Sha1(Digest::finalize(self).into()) } - - fn update(&mut self, data: &[u8]) { Digest::update(self, data) } -} - -impl Hasher for crc32fast::Hasher { - fn new() -> Self { crc32fast::Hasher::new() } - - fn finalize(self) -> DigestResult { DigestResult::Crc32(crc32fast::Hasher::finalize(self)) } - - fn update(&mut self, data: &[u8]) { crc32fast::Hasher::update(self, data) } -} - -impl Hasher for xxhash_rust::xxh64::Xxh64 { - fn new() -> Self { xxhash_rust::xxh64::Xxh64::new(0) } - - fn finalize(self) -> DigestResult { - DigestResult::Xxh64(xxhash_rust::xxh64::Xxh64::digest(&self)) - } - - fn update(&mut self, data: &[u8]) { xxhash_rust::xxh64::Xxh64::update(self, data) } -} diff --git a/nodtool/src/util/mod.rs b/nodtool/src/util/mod.rs index e3c82be..26c3fa4 100644 --- a/nodtool/src/util/mod.rs +++ b/nodtool/src/util/mod.rs @@ -8,7 +8,7 @@ use std::{ path::{Path, MAIN_SEPARATOR}, }; -pub fn display(path: &Path) -> PathDisplay { PathDisplay { path } } +pub fn path_display(path: &Path) -> PathDisplay { PathDisplay { path } } pub struct PathDisplay<'a> { path: &'a Path, @@ -19,7 +19,7 @@ impl fmt::Display for PathDisplay<'_> { let mut first = true; for segment in self.path.iter() { let segment_str = segment.to_string_lossy(); - if segment_str == "." { + if segment_str == "/" || segment_str == "." { continue; } if first { @@ -39,3 +39,15 @@ pub fn has_extension(filename: &Path, extension: &str) -> bool { None => false, } } + +/// Creates a fixed-size array reference from a slice. +macro_rules! array_ref { + ($slice:expr, $offset:expr, $size:expr) => {{ + #[inline(always)] + fn to_array(slice: &[T]) -> &[T; $size] { + unsafe { &*(slice.as_ptr() as *const [_; $size]) } + } + to_array(&$slice[$offset..$offset + $size]) + }}; +} +pub(crate) use array_ref; diff --git a/nodtool/src/util/redump.rs b/nodtool/src/util/redump.rs index 2bf5f52..585e07c 100644 --- a/nodtool/src/util/redump.rs +++ b/nodtool/src/util/redump.rs @@ -8,10 +8,12 @@ use std::{ }; use hex::deserialize as deserialize_hex; -use nod::{array_ref, Result}; +use nod::Result; use serde::Deserialize; use zerocopy::{FromBytes, FromZeros, Immutable, IntoBytes, KnownLayout}; +use crate::util::array_ref; + #[derive(Clone, Debug)] pub struct GameResult<'a> { pub name: &'a str, diff --git a/nodtool/src/util/shared.rs b/nodtool/src/util/shared.rs index 8758763..ab60235 100644 --- a/nodtool/src/util/shared.rs +++ b/nodtool/src/util/shared.rs @@ -1,22 +1,21 @@ use std::{ - cmp::min, fmt, fs::File, - io::{Read, Write}, + io::{Seek, SeekFrom, Write}, path::Path, - sync::{mpsc::sync_channel, Arc}, - thread, }; use indicatif::{ProgressBar, ProgressState, ProgressStyle}; -use nod::{Compression, Disc, DiscHeader, DiscMeta, OpenOptions, Result, ResultContext}; -use size::Size; -use zerocopy::FromZeros; - -use crate::util::{ - digest::{digest_thread, DigestResult}, - display, redump, +use nod::{ + common::Compression, + disc::DiscHeader, + read::{DiscMeta, DiscOptions, DiscReader, PartitionEncryption}, + write::{DiscWriter, DiscWriterWeight, FormatOptions, ProcessOptions}, + Result, ResultContext, }; +use size::Size; + +use crate::util::{digest::DigestResult, path_display, redump}; pub fn print_header(header: &DiscHeader, meta: &DiscMeta) { println!("Format: {}", meta.format); @@ -29,52 +28,71 @@ pub fn print_header(header: &DiscHeader, meta: &DiscMeta) { println!("Lossless: {}", meta.lossless); println!( "Verification data: {}", - meta.crc32.is_some() - || meta.md5.is_some() - || meta.sha1.is_some() - || meta.xxhash64.is_some() + meta.crc32.is_some() || meta.md5.is_some() || meta.sha1.is_some() || meta.xxh64.is_some() ); println!(); println!("Title: {}", header.game_title_str()); println!("Game ID: {}", header.game_id_str()); println!("Disc {}, Revision {}", header.disc_num + 1, header.disc_version); - if !header.has_partition_hashes() { - println!("[!] Disc has no hashes"); - } if !header.has_partition_encryption() { println!("[!] Disc is not encrypted"); } + if !header.has_partition_hashes() { + println!("[!] Disc has no hashes"); + } } pub fn convert_and_verify( in_file: &Path, out_file: Option<&Path>, md5: bool, - options: &OpenOptions, + options: &DiscOptions, + format_options: &FormatOptions, ) -> Result<()> { - println!("Loading {}", display(in_file)); - let mut disc = Disc::new_with_options(in_file, options)?; + println!("Loading {}", path_display(in_file)); + let disc = DiscReader::new(in_file, options)?; let header = disc.header(); let meta = disc.meta(); print_header(header, &meta); - let disc_size = disc.disc_size(); - let mut file = if let Some(out_file) = out_file { Some( File::create(out_file) - .with_context(|| format!("Creating file {}", display(out_file)))?, + .with_context(|| format!("Creating file {}", path_display(out_file)))?, ) } else { None }; if out_file.is_some() { - println!("\nConverting..."); + match options.partition_encryption { + PartitionEncryption::ForceEncrypted => { + println!("\nConverting to {} (encrypted)...", format_options.format) + } + PartitionEncryption::ForceDecrypted => { + println!("\nConverting to {} (decrypted)...", format_options.format) + } + _ => println!("\nConverting to {}...", format_options.format), + } + if format_options.compression != Compression::None { + println!("Compression: {}", format_options.compression); + } + if format_options.block_size > 0 { + println!("Block size: {}", Size::from_bytes(format_options.block_size)); + } } else { - println!("\nVerifying..."); + match options.partition_encryption { + PartitionEncryption::ForceEncrypted => { + println!("\nVerifying (encrypted)...") + } + PartitionEncryption::ForceDecrypted => { + println!("\nVerifying (decrypted)...") + } + _ => println!("\nVerifying..."), + } } - let pb = ProgressBar::new(disc_size); + let disc_writer = DiscWriter::new(disc, format_options)?; + let pb = ProgressBar::new(disc_writer.progress_bound()); pb.set_style(ProgressStyle::with_template("{spinner:.green} [{elapsed_precise}] [{wide_bar:.cyan/blue}] {bytes}/{total_bytes} ({bytes_per_sec}, {eta})") .unwrap() .with_key("eta", |state: &ProgressState, w: &mut dyn fmt::Write| { @@ -82,85 +100,71 @@ pub fn convert_and_verify( }) .progress_chars("#>-")); - const BUFFER_SIZE: usize = 1015808; // LCM(0x8000, 0x7C00) - let digest_threads = if md5 { - vec![ - digest_thread::(), - digest_thread::(), - digest_thread::(), - digest_thread::(), - ] - } else { - vec![ - digest_thread::(), - digest_thread::(), - digest_thread::(), - ] + let cpus = num_cpus::get(); + let processor_threads = match disc_writer.weight() { + DiscWriterWeight::Light => 0, + DiscWriterWeight::Medium => cpus / 2, + DiscWriterWeight::Heavy => cpus, }; - let (w_tx, w_rx) = sync_channel::>(1); - let w_thread = thread::spawn(move || { - let mut total_written = 0u64; - while let Ok(data) = w_rx.recv() { + let mut total_written = 0u64; + let finalization = disc_writer.process( + |data, pos, _| { if let Some(file) = &mut file { - file.write_all(data.as_ref()) - .with_context(|| { - format!("Writing {} bytes at offset {}", data.len(), total_written) - }) - .unwrap(); + file.write_all(data.as_ref())?; } total_written += data.len() as u64; - pb.set_position(total_written); - } - if let Some(mut file) = file { - file.flush().context("Flushing output file").unwrap(); - } - pb.finish(); - }); + pb.set_position(pos); + Ok(()) + }, + &ProcessOptions { + processor_threads, + digest_crc32: true, + digest_md5: md5, + digest_sha1: true, + digest_xxh64: true, + }, + )?; + pb.finish(); - let mut total_read = 0u64; - let mut buf = <[u8]>::new_box_zeroed_with_elems(BUFFER_SIZE)?; - while total_read < disc_size { - let read = min(BUFFER_SIZE as u64, disc_size - total_read) as usize; - disc.read_exact(&mut buf[..read]).with_context(|| { - format!("Reading {} bytes at disc offset {}", BUFFER_SIZE, total_read) - })?; - - let arc = Arc::<[u8]>::from(&buf[..read]); - for (tx, _) in &digest_threads { - tx.send(arc.clone()).map_err(|_| "Sending data to hash thread")?; + // Finalize disc writer + if !finalization.header.is_empty() { + if let Some(file) = &mut file { + file.seek(SeekFrom::Start(0)).context("Seeking to start of output file")?; + file.write_all(finalization.header.as_ref()).context("Writing header")?; + } else { + return Err(nod::Error::Other("No output file, but requires finalization".to_string())); } - w_tx.send(arc).map_err(|_| "Sending data to write thread")?; - total_read += read as u64; } - drop(w_tx); // Close channel - w_thread.join().unwrap(); + if let Some(mut file) = file { + file.flush().context("Flushing output file")?; + } println!(); if let Some(path) = out_file { - println!("Wrote {} to {}", Size::from_bytes(total_read), display(path)); + println!("Wrote {} to {}", Size::from_bytes(total_written), path_display(path)); } - println!(); - let mut crc32 = None; - let mut md5 = None; - let mut sha1 = None; - let mut xxh64 = None; - for (tx, handle) in digest_threads { - drop(tx); // Close channel - match handle.join().unwrap() { - DigestResult::Crc32(v) => crc32 = Some(v), - DigestResult::Md5(v) => md5 = Some(v), - DigestResult::Sha1(v) => sha1 = Some(v), - DigestResult::Xxh64(v) => xxh64 = Some(v), - } - } - let redump_entry = crc32.and_then(redump::find_by_crc32); - let expected_crc32 = meta.crc32.or(redump_entry.as_ref().map(|e| e.crc32)); - let expected_md5 = meta.md5.or(redump_entry.as_ref().map(|e| e.md5)); - let expected_sha1 = meta.sha1.or(redump_entry.as_ref().map(|e| e.sha1)); - let expected_xxh64 = meta.xxhash64; + let mut redump_entry = None; + let mut expected_crc32 = None; + let mut expected_md5 = None; + let mut expected_sha1 = None; + let mut expected_xxh64 = None; + if options.partition_encryption == PartitionEncryption::Original { + // Use verification data in disc and check redump + redump_entry = finalization.crc32.and_then(redump::find_by_crc32); + expected_crc32 = meta.crc32.or(redump_entry.as_ref().map(|e| e.crc32)); + expected_md5 = meta.md5.or(redump_entry.as_ref().map(|e| e.md5)); + expected_sha1 = meta.sha1.or(redump_entry.as_ref().map(|e| e.sha1)); + expected_xxh64 = meta.xxh64; + } else if options.partition_encryption == PartitionEncryption::ForceEncrypted { + // Ignore verification data in disc, but still check redump + redump_entry = finalization.crc32.and_then(redump::find_by_crc32); + expected_crc32 = redump_entry.as_ref().map(|e| e.crc32); + expected_md5 = redump_entry.as_ref().map(|e| e.md5); + expected_sha1 = redump_entry.as_ref().map(|e| e.sha1); + } fn print_digest(value: DigestResult, expected: Option) { print!("{:<6}: ", value.name()); @@ -176,36 +180,36 @@ pub fn convert_and_verify( println!(); } - if let Some(entry) = &redump_entry { - let mut full_match = true; - if let Some(md5) = md5 { - if entry.md5 != md5 { - full_match = false; + if let Some(crc32) = finalization.crc32 { + if let Some(entry) = &redump_entry { + let mut full_match = true; + if let Some(md5) = finalization.md5 { + if entry.md5 != md5 { + full_match = false; + } } - } - if let Some(sha1) = sha1 { - if entry.sha1 != sha1 { - full_match = false; + if let Some(sha1) = finalization.sha1 { + if entry.sha1 != sha1 { + full_match = false; + } + } + if full_match { + println!("Redump: {} ✅", entry.name); + } else { + println!("Redump: {} ❓ (partial match)", entry.name); } - } - if full_match { - println!("Redump: {} ✅", entry.name); } else { - println!("Redump: {} ❓ (partial match)", entry.name); + println!("Redump: Not found ❌"); } - } else { - println!("Redump: Not found ❌"); - } - if let Some(crc32) = crc32 { print_digest(DigestResult::Crc32(crc32), expected_crc32.map(DigestResult::Crc32)); } - if let Some(md5) = md5 { + if let Some(md5) = finalization.md5 { print_digest(DigestResult::Md5(md5), expected_md5.map(DigestResult::Md5)); } - if let Some(sha1) = sha1 { + if let Some(sha1) = finalization.sha1 { print_digest(DigestResult::Sha1(sha1), expected_sha1.map(DigestResult::Sha1)); } - if let Some(xxh64) = xxh64 { + if let Some(xxh64) = finalization.xxh64 { print_digest(DigestResult::Xxh64(xxh64), expected_xxh64.map(DigestResult::Xxh64)); } Ok(()) diff --git a/rustfmt.toml b/rustfmt.toml index 0a9eda5..a8c3a96 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -6,3 +6,4 @@ reorder_impl_items = true use_field_init_shorthand = true use_small_heuristics = "Max" where_single_line = true +format_code_in_doc_comments = true