From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [212.224.123.68]) by lore.proxmox.com (Postfix) with ESMTPS id 5E94F1FF141 for ; Fri, 30 Jan 2026 17:47:10 +0100 (CET) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id 9A68632F4; Fri, 30 Jan 2026 17:47:37 +0100 (CET) From: Robert Obkircher To: pbs-devel@lists.proxmox.com Subject: [PATCH v5 proxmox-backup 16/16] datastore: FixedIndexWriter: switch internal chunk_size to u32 Date: Fri, 30 Jan 2026 17:45:40 +0100 Message-ID: <20260130164552.281581-17-r.obkircher@proxmox.com> X-Mailer: git-send-email 2.47.3 In-Reply-To: <20260130164552.281581-1-r.obkircher@proxmox.com> References: <20260130164552.281581-1-r.obkircher@proxmox.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Bm-Milter-Handled: 55990f41-d878-4baa-be0a-ee34c49e34d2 X-Bm-Transport-Timestamp: 1769791554753 X-SPAM-LEVEL: Spam detection results: 0 AWL 0.057 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record Message-ID-Hash: H6GCTNHPEXIBOOEJ5ZF2UE7ZL64PRUDZ X-Message-ID-Hash: H6GCTNHPEXIBOOEJ5ZF2UE7ZL64PRUDZ X-MailFrom: r.obkircher@proxmox.com X-Mailman-Rule-Misses: dmarc-mitigation; no-senders; approved; loop; banned-address; emergency; member-moderation; nonmember-moderation; administrivia; implicit-dest; max-recipients; max-size; news-moderation; no-subject; digests; suspicious-header X-Mailman-Version: 3.3.10 Precedence: list List-Id: Proxmox Backup Server development discussion List-Help: List-Owner: List-Post: List-Subscribe: List-Unsubscribe: I don't really like this change becasue it introduces quite a few conversions. Signed-off-by: Robert Obkircher --- pbs-datastore/src/datastore.rs | 2 +- pbs-datastore/src/fixed_index.rs | 43 ++++++++++++++++---------------- 2 files changed, 23 insertions(+), 22 deletions(-) diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs index 98eab18d..631bdc30 100644 --- a/pbs-datastore/src/datastore.rs +++ b/pbs-datastore/src/datastore.rs @@ -699,7 +699,7 @@ impl DataStore { chunk_size: u32, ) -> Result { let full_path = self.inner.chunk_store.relative_path(filename.as_ref()); - FixedIndexWriter::create(full_path, size, chunk_size.into()) + FixedIndexWriter::create(full_path, size, chunk_size) } pub fn open_fixed_reader>( diff --git a/pbs-datastore/src/fixed_index.rs b/pbs-datastore/src/fixed_index.rs index 3fa146f8..ba6874fe 100644 --- a/pbs-datastore/src/fixed_index.rs +++ b/pbs-datastore/src/fixed_index.rs @@ -225,7 +225,7 @@ pub struct FixedIndexWriter { file: File, filename: PathBuf, tmp_filename: PathBuf, - chunk_size: u64, + chunk_size: u32, size: u64, index_length: usize, index_capacity: usize, @@ -263,7 +263,7 @@ impl FixedIndexWriter { pub fn create( full_path: impl Into, known_size: Option, - chunk_size: u64, + chunk_size: u32, ) -> Result { let full_path = full_path.into(); let mut tmp_path = full_path.clone(); @@ -290,7 +290,7 @@ impl FixedIndexWriter { let (index_length, index_capacity) = match known_size { Some(s) => { - let len = s.div_ceil(chunk_size).try_into()?; + let len = s.div_ceil(chunk_size.into()).try_into()?; (len, len) } None => (0, Self::INITIAL_CAPACITY), @@ -313,7 +313,7 @@ impl FixedIndexWriter { let header = unsafe { memory.header().as_mut() }; header.magic = file_formats::FIXED_SIZED_CHUNK_INDEX_1_0; header.ctime = i64::to_le(ctime); - header.chunk_size = u64::to_le(chunk_size); + header.chunk_size = u64::to_le(chunk_size.into()); header.uuid = *uuid.as_bytes(); Ok(Self { @@ -402,8 +402,9 @@ impl FixedIndexWriter { if !self.growable_size { bail!("refusing to resize from {} to {requested_size}", self.size); } - let new_len = requested_size.div_ceil(self.chunk_size).try_into()?; - if new_len as u64 * self.chunk_size != requested_size { + let cs = u64::from(self.chunk_size); + let new_len = requested_size.div_ceil(cs).try_into()?; + if new_len as u64 * cs != requested_size { // not a full chunk, so this must be the last one self.growable_size = false; self.set_index_capacity_or_unmap(new_len)?; @@ -472,8 +473,8 @@ impl FixedIndexWriter { Ok(index_csum) } - fn check_chunk_alignment(&self, offset: u64, chunk_len: u64) -> Result { - let Some(pos) = offset.checked_sub(chunk_len) else { + fn check_chunk_alignment(&self, offset: u64, chunk_len: u32) -> Result { + let Some(pos) = offset.checked_sub(chunk_len.into()) else { bail!("got chunk with small offset ({} < {}", offset, chunk_len); }; @@ -493,11 +494,11 @@ impl FixedIndexWriter { ); } - if pos & (self.chunk_size - 1) != 0 { + if pos & (u64::from(self.chunk_size) - 1) != 0 { bail!("got unaligned chunk (pos = {})", pos); } - Ok((pos / self.chunk_size) as usize) + Ok((pos / u64::from(self.chunk_size)) as usize) } fn add_digest(&mut self, index: usize, digest: &[u8; 32]) -> Result<(), Error> { @@ -536,7 +537,7 @@ impl FixedIndexWriter { bail!("add_chunk: start and size are too large: {start}+{size}"); }; self.grow_to_size(end)?; - let idx = self.check_chunk_alignment(end, size.into())?; + let idx = self.check_chunk_alignment(end, size)?; self.add_digest(idx, digest) } @@ -568,7 +569,7 @@ mod tests { use super::*; use crate::temp_test_dir::TempTestDir; - const CS: u64 = 4096; + const CS: u32 = 4096; #[test] fn test_empty() { @@ -594,7 +595,7 @@ mod tests { let path = dir.join("test_single_partial_chunk"); let mut w = FixedIndexWriter::create(&path, None, CS).unwrap(); - let size = CS - 1; + let size = CS as u64 - 1; let expected = test_data(size); w.grow_to_size(size).unwrap(); expected[0].add_to(&mut w); @@ -614,7 +615,7 @@ mod tests { let initial = FixedIndexWriter::INITIAL_CAPACITY; let steps = [1, 2, initial, initial + 1, 5 * initial, 10 * initial + 1]; - let expected = test_data(*steps.last().unwrap() as u64 * CS); + let expected = test_data(*steps.last().unwrap() as u64 * CS as u64); let mut begin = 0; for chunk_count in steps { @@ -631,7 +632,7 @@ mod tests { w.close().unwrap(); drop(w); - let size = expected.len() as u64 * CS; + let size = expected.len() as u64 * CS as u64; check_with_reader(&path, size, &expected); compare_to_known_size_writer(&path, size, &expected); } @@ -642,7 +643,7 @@ mod tests { let path = dir.join("test_grow_to_misaligned_size"); let mut w = FixedIndexWriter::create(&path, None, CS).unwrap(); - let size = (FixedIndexWriter::INITIAL_CAPACITY as u64 + 42) * CS - 1; // last is not full + let size = (FixedIndexWriter::INITIAL_CAPACITY as u64 + 42) * CS as u64 - 1; // last is not full let expected = test_data(size); w.grow_to_size(size).unwrap(); @@ -664,7 +665,7 @@ mod tests { struct TestChunk { digest: [u8; 32], index: usize, - size: u64, + size: u32, end: u64, } @@ -679,23 +680,23 @@ mod tests { } fn test_data(size: u64) -> Vec { - (0..size.div_ceil(CS)) + (0..size.div_ceil(CS.into())) .map(|index| { let mut digest = [0u8; 32]; let i = &(index as u64).to_le_bytes(); for c in digest.chunks_mut(i.len()) { c.copy_from_slice(i); } - let size = if ((index + 1) * CS) <= size { + let size = if ((index + 1) * CS as u64) <= size { CS } else { - size % CS + (size % CS as u64) as u32 }; TestChunk { digest, index: index as usize, size, - end: index as u64 * CS + size, + end: index as u64 * CS as u64 + size as u64, } }) .collect() -- 2.47.3