Centralize logic into fetch_sector_group

This commit is contained in:
Luke Street 2024-11-23 12:54:05 -07:00
parent b8b06dcd5c
commit d197b8e7c2
6 changed files with 80 additions and 64 deletions

26
Cargo.lock generated
View File

@ -393,6 +393,15 @@ dependencies = [
"serde", "serde",
] ]
[[package]]
name = "higher-kinded-types"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "561985554c8b8d4808605c90a5f1979cc6c31a5d20b78465cd59501233c6678e"
dependencies = [
"never-say-never",
]
[[package]] [[package]]
name = "hybrid-array" name = "hybrid-array"
version = "0.2.1" version = "0.2.1"
@ -580,6 +589,12 @@ dependencies = [
"adler2", "adler2",
] ]
[[package]]
name = "never-say-never"
version = "6.6.666"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf5a574dadd7941adeaa71823ecba5e28331b8313fb2e1c6a5c7e5981ea53ad6"
[[package]] [[package]]
name = "nod" name = "nod"
version = "2.0.0-alpha.1" version = "2.0.0-alpha.1"
@ -605,6 +620,7 @@ dependencies = [
"memmap2", "memmap2",
"miniz_oxide", "miniz_oxide",
"openssl", "openssl",
"polonius-the-crab",
"rand", "rand",
"rayon", "rayon",
"sha1", "sha1",
@ -751,6 +767,16 @@ version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2"
[[package]]
name = "polonius-the-crab"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e97ca2c89572ae41bbec1c99498251f87dd5a94e500c5ec19c382dd593dd5ce9"
dependencies = [
"higher-kinded-types",
"never-say-never",
]
[[package]] [[package]]
name = "portable-atomic" name = "portable-atomic"
version = "1.9.0" version = "1.9.0"

View File

@ -45,6 +45,7 @@ md-5 = { workspace = true }
memmap2 = "0.9" memmap2 = "0.9"
miniz_oxide = { version = "0.8", optional = true } miniz_oxide = { version = "0.8", optional = true }
openssl = { version = "0.10", optional = true } openssl = { version = "0.10", optional = true }
polonius-the-crab = "0.4"
rand = "0.8" rand = "0.8"
rayon = "1.10" rayon = "1.10"
sha1 = { workspace = true } sha1 = { workspace = true }

View File

@ -9,7 +9,7 @@ use zerocopy::FromBytes;
use crate::{ use crate::{
disc::{ disc::{
preloader::{Preloader, SectorGroup, SectorGroupRequest}, preloader::{fetch_sector_group, Preloader, SectorGroup, SectorGroupRequest},
ApploaderHeader, DiscHeader, DolHeader, PartitionHeader, BI2_SIZE, BOOT_SIZE, ApploaderHeader, DiscHeader, DolHeader, PartitionHeader, BI2_SIZE, BOOT_SIZE,
SECTOR_GROUP_SIZE, SECTOR_SIZE, SECTOR_GROUP_SIZE, SECTOR_SIZE,
}, },
@ -77,14 +77,9 @@ impl BufRead for PartitionReaderGC {
mode: PartitionEncryption::Original, mode: PartitionEncryption::Original,
}; };
let sector_group = if matches!(&self.sector_group, Some(sector_group) if sector_group.request == request) // Load sector group
{ let (sector_group, _updated) =
// We can improve this in Rust 2024 with `if_let_rescope` fetch_sector_group(request, max_groups, &mut self.sector_group, &self.preloader)?;
// https://github.com/rust-lang/rust/issues/124085
self.sector_group.as_ref().unwrap()
} else {
self.sector_group.insert(self.preloader.fetch(request, max_groups)?)
};
// Calculate the number of consecutive sectors in the group // Calculate the number of consecutive sectors in the group
let group_sector = abs_sector - abs_group_sector; let group_sector = abs_sector - abs_group_sector;
@ -146,18 +141,9 @@ pub(crate) fn read_dol(
let mut raw_dol: Vec<u8> = let mut raw_dol: Vec<u8> =
read_vec(reader, size_of::<DolHeader>()).context("Reading DOL header")?; read_vec(reader, size_of::<DolHeader>()).context("Reading DOL header")?;
let dol_header = DolHeader::ref_from_bytes(raw_dol.as_slice()).unwrap(); let dol_header = DolHeader::ref_from_bytes(raw_dol.as_slice()).unwrap();
let dol_size = dol_header let dol_size = (dol_header.text_offs.iter().zip(&dol_header.text_sizes))
.text_offs .chain(dol_header.data_offs.iter().zip(&dol_header.data_sizes))
.iter()
.zip(&dol_header.text_sizes)
.map(|(offs, size)| offs.get() + size.get()) .map(|(offs, size)| offs.get() + size.get())
.chain(
dol_header
.data_offs
.iter()
.zip(&dol_header.data_sizes)
.map(|(offs, size)| offs.get() + size.get()),
)
.max() .max()
.unwrap_or(size_of::<DolHeader>() as u32); .unwrap_or(size_of::<DolHeader>() as u32);
raw_dol.resize(dol_size as usize, 0); raw_dol.resize(dol_size as usize, 0);
@ -203,12 +189,10 @@ pub(crate) fn read_part_meta(
let mut raw_apploader: Vec<u8> = let mut raw_apploader: Vec<u8> =
read_vec(reader, size_of::<ApploaderHeader>()).context("Reading apploader header")?; read_vec(reader, size_of::<ApploaderHeader>()).context("Reading apploader header")?;
let apploader_header = ApploaderHeader::ref_from_bytes(raw_apploader.as_slice()).unwrap(); let apploader_header = ApploaderHeader::ref_from_bytes(raw_apploader.as_slice()).unwrap();
raw_apploader.resize( let apploader_size = size_of::<ApploaderHeader>()
size_of::<ApploaderHeader>() + apploader_header.size.get() as usize
+ apploader_header.size.get() as usize + apploader_header.trailer_size.get() as usize;
+ apploader_header.trailer_size.get() as usize, raw_apploader.resize(apploader_size, 0);
0,
);
reader reader
.read_exact(&mut raw_apploader[size_of::<ApploaderHeader>()..]) .read_exact(&mut raw_apploader[size_of::<ApploaderHeader>()..])
.context("Reading apploader")?; .context("Reading apploader")?;

View File

@ -11,6 +11,7 @@ use bytes::{Bytes, BytesMut};
use crossbeam_channel::{Receiver, Sender}; use crossbeam_channel::{Receiver, Sender};
use crossbeam_utils::sync::WaitGroup; use crossbeam_utils::sync::WaitGroup;
use lru::LruCache; use lru::LruCache;
use polonius_the_crab::{polonius, polonius_return};
use simple_moving_average::{SingleSumSMA, SMA}; use simple_moving_average::{SingleSumSMA, SMA};
use tracing::{debug, error, instrument, span, Level}; use tracing::{debug, error, instrument, span, Level};
use zerocopy::FromZeros; use zerocopy::FromZeros;
@ -556,3 +557,22 @@ impl SectorGroupLoader {
Ok((sector_bitmap, io_duration)) Ok((sector_bitmap, io_duration))
} }
} }
/// Fetch a sector group from the cache or from the preloader.
/// Returns a boolean indicating if the group was updated.
pub fn fetch_sector_group<'a>(
request: SectorGroupRequest,
max_groups: u32,
mut cached: &'a mut Option<SectorGroup>,
preloader: &Preloader,
) -> io::Result<(&'a SectorGroup, bool)> {
polonius!(|cached| -> io::Result<(&'polonius SectorGroup, bool)> {
if let Some(sector_group) = cached {
if sector_group.request == request {
polonius_return!(Ok((sector_group, false)));
}
}
});
let sector_group = preloader.fetch(request, max_groups)?;
Ok((cached.insert(sector_group), true))
}

View File

@ -14,7 +14,9 @@ use crate::{
direct::{DirectDiscReader, DirectDiscReaderMode}, direct::{DirectDiscReader, DirectDiscReaderMode},
fst::{Fst, NodeKind}, fst::{Fst, NodeKind},
gcn::{read_fst, PartitionReaderGC}, gcn::{read_fst, PartitionReaderGC},
preloader::{Preloader, SectorGroup, SectorGroupLoader, SectorGroupRequest}, preloader::{
fetch_sector_group, Preloader, SectorGroup, SectorGroupLoader, SectorGroupRequest,
},
wii::{ wii::{
PartitionReaderWii, WiiPartEntry, WiiPartGroup, WiiPartitionHeader, REGION_OFFSET, PartitionReaderWii, WiiPartEntry, WiiPartGroup, WiiPartitionHeader, REGION_OFFSET,
REGION_SIZE, WII_PART_GROUP_OFF, REGION_SIZE, WII_PART_GROUP_OFF,
@ -32,6 +34,7 @@ use crate::{
pub struct DiscReader { pub struct DiscReader {
io: Box<dyn BlockReader>, io: Box<dyn BlockReader>,
preloader: Arc<Preloader>,
pos: u64, pos: u64,
size: u64, size: u64,
mode: PartitionEncryption, mode: PartitionEncryption,
@ -39,7 +42,6 @@ pub struct DiscReader {
partitions: Arc<[PartitionInfo]>, partitions: Arc<[PartitionInfo]>,
region: Option<[u8; REGION_SIZE]>, region: Option<[u8; REGION_SIZE]>,
sector_group: Option<SectorGroup>, sector_group: Option<SectorGroup>,
preloader: Arc<Preloader>,
alt_disc_header: Option<Arc<DiscHeader>>, alt_disc_header: Option<Arc<DiscHeader>>,
alt_partitions: Option<Arc<[PartitionInfo]>>, alt_partitions: Option<Arc<[PartitionInfo]>>,
} }
@ -48,6 +50,7 @@ impl Clone for DiscReader {
fn clone(&self) -> Self { fn clone(&self) -> Self {
Self { Self {
io: self.io.clone(), io: self.io.clone(),
preloader: self.preloader.clone(),
pos: 0, pos: 0,
size: self.size, size: self.size,
mode: self.mode, mode: self.mode,
@ -55,7 +58,6 @@ impl Clone for DiscReader {
partitions: self.partitions.clone(), partitions: self.partitions.clone(),
region: self.region, region: self.region,
sector_group: None, sector_group: None,
preloader: self.preloader.clone(),
alt_disc_header: self.alt_disc_header.clone(), alt_disc_header: self.alt_disc_header.clone(),
alt_partitions: self.alt_partitions.clone(), alt_partitions: self.alt_partitions.clone(),
} }
@ -124,6 +126,7 @@ impl DiscReader {
); );
Ok(Self { Ok(Self {
io, io,
preloader,
pos: 0, pos: 0,
size, size,
mode: options.partition_encryption, mode: options.partition_encryption,
@ -131,7 +134,6 @@ impl DiscReader {
partitions, partitions,
region, region,
sector_group: None, sector_group: None,
preloader,
alt_disc_header, alt_disc_header,
alt_partitions, alt_partitions,
}) })
@ -246,14 +248,8 @@ impl DiscReader {
}; };
// Load sector group // Load sector group
let sector_group = if matches!(&self.sector_group, Some(sector_group) if sector_group.request == request) let (sector_group, _updated) =
{ fetch_sector_group(request, max_groups, &mut self.sector_group, &self.preloader)?;
// We can improve this in Rust 2024 with `if_let_rescope`
// https://github.com/rust-lang/rust/issues/124085
self.sector_group.as_ref().unwrap()
} else {
self.sector_group.insert(self.preloader.fetch(request, max_groups)?)
};
// Calculate the number of consecutive sectors in the group // Calculate the number of consecutive sectors in the group
let group_sector = abs_sector - abs_group_sector; let group_sector = abs_sector - abs_group_sector;
@ -307,14 +303,8 @@ impl BufRead for DiscReader {
}; };
// Load sector group // Load sector group
let sector_group = if matches!(&self.sector_group, Some(sector_group) if sector_group.request == request) let (sector_group, _updated) =
{ fetch_sector_group(request, max_groups, &mut self.sector_group, &self.preloader)?;
// We can improve this in Rust 2024 with `if_let_rescope`
// https://github.com/rust-lang/rust/issues/124085
self.sector_group.as_ref().unwrap()
} else {
self.sector_group.insert(self.preloader.fetch(request, max_groups)?)
};
// Calculate the number of consecutive sectors in the group // Calculate the number of consecutive sectors in the group
let group_sector = abs_sector - abs_group_sector; let group_sector = abs_sector - abs_group_sector;

View File

@ -15,7 +15,7 @@ use crate::{
disc::{ disc::{
gcn::{read_part_meta, PartitionReaderGC}, gcn::{read_part_meta, PartitionReaderGC},
hashes::sha1_hash, hashes::sha1_hash,
preloader::{Preloader, SectorGroup, SectorGroupRequest}, preloader::{fetch_sector_group, Preloader, SectorGroup, SectorGroupRequest},
SECTOR_GROUP_SIZE, SECTOR_SIZE, SECTOR_GROUP_SIZE, SECTOR_SIZE,
}, },
io::block::BlockReader, io::block::BlockReader,
@ -378,24 +378,19 @@ impl BufRead for PartitionReaderWii {
PartitionEncryption::ForceDecryptedNoHashes PartitionEncryption::ForceDecryptedNoHashes
}, },
}; };
let sector_group = if matches!(&self.sector_group, Some(sector_group) if sector_group.request == request)
{ // Load sector group
// We can improve this in Rust 2024 with `if_let_rescope` let (sector_group, updated) =
// https://github.com/rust-lang/rust/issues/124085 fetch_sector_group(request, max_groups, &mut self.sector_group, &self.preloader)?;
self.sector_group.as_ref().unwrap() if updated && self.options.validate_hashes {
} else { if let Some(h3_table) = self.meta.as_ref().and_then(|m| m.raw_h3_table.as_deref()) {
let sector_group = self.preloader.fetch(request, max_groups)?; verify_hashes(
if self.options.validate_hashes { array_ref![sector_group.data, 0, SECTOR_GROUP_SIZE],
if let Some(h3_table) = self.meta.as_ref().and_then(|m| m.raw_h3_table.as_deref()) { group_idx,
verify_hashes( h3_table,
array_ref![sector_group.data, 0, SECTOR_GROUP_SIZE], )?;
group_idx,
h3_table,
)?;
}
} }
self.sector_group.insert(sector_group) }
};
// Read from sector group buffer // Read from sector group buffer
let consecutive_sectors = sector_group.consecutive_sectors(group_sector); let consecutive_sectors = sector_group.consecutive_sectors(group_sector);