Move sha1_hash to util/digest

This commit is contained in:
Luke Street 2024-11-24 01:22:28 -07:00
parent 490ae80a60
commit 55b0d3f29e
6 changed files with 37 additions and 28 deletions

View File

@ -7,7 +7,7 @@ use crate::{
wii::{HASHES_SIZE, SECTOR_DATA_SIZE},
SECTOR_GROUP_SIZE, SECTOR_SIZE,
},
util::{array_ref, array_ref_mut},
util::{array_ref, array_ref_mut, digest::sha1_hash},
};
/// Hashes for a single sector group (64 sectors).
@ -73,20 +73,3 @@ pub fn hash_sector_group(sector_group: &[u8; SECTOR_GROUP_SIZE]) -> Box<GroupHas
result.h3_hash = sha1_hash(result.h2_hashes.as_bytes());
result
}
/// Hashes a byte slice with SHA-1.
#[instrument(skip_all)]
pub fn sha1_hash(buf: &[u8]) -> HashBytes {
#[cfg(feature = "openssl")]
{
// The one-shot openssl::sha::sha1 ends up being much slower
let mut hasher = openssl::sha::Sha1::new();
hasher.update(buf);
hasher.finish()
}
#[cfg(not(feature = "openssl"))]
{
use sha1::Digest;
HashBytes::from(sha1::Sha1::digest(buf))
}
}

View File

@ -14,7 +14,6 @@ use crate::{
common::{HashBytes, KeyBytes, PartitionInfo},
disc::{
gcn::{read_part_meta, PartitionReaderGC},
hashes::sha1_hash,
preloader::{fetch_sector_group, Preloader, SectorGroup, SectorGroupRequest},
SECTOR_GROUP_SIZE, SECTOR_SIZE,
},
@ -22,7 +21,9 @@ use crate::{
read::{PartitionEncryption, PartitionMeta, PartitionOptions, PartitionReader},
util::{
aes::aes_cbc_decrypt,
array_ref, div_rem, impl_read_for_bufread,
array_ref,
digest::sha1_hash,
div_rem, impl_read_for_bufread,
read::{read_arc, read_arc_slice},
static_assert,
},

View File

@ -103,7 +103,6 @@ impl SplitFileReader {
pub fn len(&self) -> u64 { self.files.last().map_or(0, |f| f.begin + f.size) }
#[instrument(name = "SplitFileReader::check_open_file", skip_all)]
fn check_open_file(&mut self) -> io::Result<Option<&mut Split<BufReader<File>>>> {
if self.open_file.is_none() || !self.open_file.as_ref().unwrap().contains(self.pos) {
self.open_file = if let Some(split) = self.files.iter().find(|f| f.contains(self.pos)) {

View File

@ -14,7 +14,6 @@ use zerocopy::{big_endian::*, FromBytes, FromZeros, Immutable, IntoBytes, KnownL
use crate::{
common::{Compression, Format, HashBytes, KeyBytes, MagicBytes},
disc::{
hashes::sha1_hash,
reader::DiscReader,
wii::SECTOR_DATA_SIZE,
writer::{par_process, read_block, BlockProcessor, BlockResult, DataCallback, DiscWriter},
@ -29,7 +28,7 @@ use crate::{
aes::decrypt_sector_data_b2b,
align_up_32, align_up_64, array_ref, array_ref_mut,
compress::{Compressor, DecompressionKind, Decompressor},
digest::DigestManager,
digest::{sha1_hash, DigestManager},
lfg::LaggedFibonacci,
read::{read_arc_slice, read_from, read_vec},
static_assert,

View File

@ -6,10 +6,28 @@ use digest::Digest;
use tracing::instrument;
use crate::{
common::HashBytes,
io::nkit::NKitHeader,
write::{DiscFinalization, ProcessOptions},
};
/// Hashes a byte slice with SHA-1.
#[instrument(skip_all)]
pub fn sha1_hash(buf: &[u8]) -> HashBytes {
#[cfg(feature = "openssl")]
{
// The one-shot openssl::sha::sha1 ends up being much slower
let mut hasher = openssl::sha::Sha1::new();
hasher.update(buf);
hasher.finish()
}
#[cfg(not(feature = "openssl"))]
{
use sha1::Digest;
HashBytes::from(sha1::Sha1::digest(buf))
}
}
pub type DigestThread = (Sender<Bytes>, JoinHandle<DigestResult>);
pub fn digest_thread<H>() -> DigestThread
@ -40,13 +58,13 @@ impl DigestManager {
}
if options.digest_md5 {
#[cfg(feature = "openssl")]
threads.push(digest_thread::<ossl::HasherMD5>());
threads.push(digest_thread::<openssl_util::HasherMD5>());
#[cfg(not(feature = "openssl"))]
threads.push(digest_thread::<md5::Md5>());
}
if options.digest_sha1 {
#[cfg(feature = "openssl")]
threads.push(digest_thread::<ossl::HasherSHA1>());
threads.push(digest_thread::<openssl_util::HasherSHA1>());
#[cfg(not(feature = "openssl"))]
threads.push(digest_thread::<sha1::Sha1>());
}
@ -156,7 +174,7 @@ impl Hasher for xxhash_rust::xxh64::Xxh64 {
}
#[cfg(feature = "openssl")]
mod ossl {
mod openssl_util {
use tracing::instrument;
use super::{DigestResult, Hasher};
@ -208,7 +226,7 @@ mod ossl {
}
#[allow(unused_braces)] // https://github.com/rust-lang/rust/issues/116347
#[instrument(name = "ossl::HasherMD5::update", skip_all)]
#[instrument(name = "openssl_util::HasherMD5::update", skip_all)]
fn update(&mut self, data: &[u8]) { self.hasher.update(data).unwrap() }
}
@ -222,7 +240,7 @@ mod ossl {
}
#[allow(unused_braces)] // https://github.com/rust-lang/rust/issues/116347
#[instrument(name = "ossl::HasherSHA1::update", skip_all)]
#[instrument(name = "openssl_util::HasherSHA1::update", skip_all)]
fn update(&mut self, data: &[u8]) { self.hasher.update(data).unwrap() }
}
}

View File

@ -7,6 +7,7 @@ use std::{
};
use bytes::Buf;
use tracing::instrument;
use zerocopy::{transmute_ref, IntoBytes};
use crate::disc::SECTOR_SIZE;
@ -54,6 +55,7 @@ impl LaggedFibonacci {
/// Initializes the LFG with the standard seed for a given disc ID, disc number, and sector.
/// The partition offset is used to determine the sector and how many bytes to skip within the
/// sector.
#[instrument(name = "LaggedFibonacci::init_with_seed", skip_all)]
pub fn init_with_seed(&mut self, disc_id: [u8; 4], disc_num: u8, partition_offset: u64) {
let seed = u32::from_be_bytes([
disc_id[2],
@ -80,6 +82,7 @@ impl LaggedFibonacci {
/// Initializes the LFG with the seed read from a reader. The seed is assumed to be big-endian.
/// This is used for rebuilding junk data in WIA/RVZ files.
#[instrument(name = "LaggedFibonacci::init_with_reader", skip_all)]
pub fn init_with_reader<R>(&mut self, reader: &mut R) -> io::Result<()>
where R: Read + ?Sized {
reader.read_exact(self.buffer[..SEED_SIZE].as_mut_bytes())?;
@ -93,6 +96,7 @@ impl LaggedFibonacci {
/// Initializes the LFG with the seed read from a [`Buf`]. The seed is assumed to be big-endian.
/// This is used for rebuilding junk data in WIA/RVZ files.
#[instrument(name = "LaggedFibonacci::init_with_buf", skip_all)]
pub fn init_with_buf(&mut self, reader: &mut impl Buf) -> io::Result<()> {
let out = self.buffer[..SEED_SIZE].as_mut_bytes();
if reader.remaining() < out.len() {
@ -142,6 +146,7 @@ impl LaggedFibonacci {
// }
/// Fills the buffer with junk data.
#[instrument(name = "LaggedFibonacci::fill", skip_all)]
pub fn fill(&mut self, mut buf: &mut [u8]) {
while !buf.is_empty() {
let len = min(buf.len(), LFG_K * 4 - self.position);
@ -157,6 +162,7 @@ impl LaggedFibonacci {
}
/// Writes junk data to the output stream.
#[instrument(name = "LaggedFibonacci::write", skip_all)]
pub fn write<W>(&mut self, w: &mut W, mut len: u64) -> io::Result<()>
where W: Write + ?Sized {
while len > 0 {
@ -175,6 +181,7 @@ impl LaggedFibonacci {
/// The junk data on GC / Wii discs is reinitialized every 32KB. This functions handles the
/// wrapping logic and reinitializes the LFG at sector boundaries.
#[instrument(name = "LaggedFibonacci::fill_sector_chunked", skip_all)]
pub fn fill_sector_chunked(
&mut self,
mut buf: &mut [u8],
@ -194,6 +201,7 @@ impl LaggedFibonacci {
/// The junk data on GC / Wii discs is reinitialized every 32KB. This functions handles the
/// wrapping logic and reinitializes the LFG at sector boundaries.
#[instrument(name = "LaggedFibonacci::write_sector_chunked", skip_all)]
pub fn write_sector_chunked<W>(
&mut self,
w: &mut W,
@ -217,6 +225,7 @@ impl LaggedFibonacci {
/// Checks if the data matches the junk data generated by the LFG. This function handles the
/// wrapping logic and reinitializes the LFG at sector boundaries.
#[instrument(name = "LaggedFibonacci::check_sector_chunked", skip_all)]
pub fn check_sector_chunked(
&mut self,
mut buf: &[u8],