From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [212.224.123.68]) by lore.proxmox.com (Postfix) with ESMTPS id A23A41FF187 for ; Mon, 6 Oct 2025 15:15:14 +0200 (CEST) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id 3035749E8; Mon, 6 Oct 2025 15:15:18 +0200 (CEST) Date: Mon, 06 Oct 2025 15:14:41 +0200 From: Fabian =?iso-8859-1?q?Gr=FCnbichler?= To: Proxmox Backup Server development discussion References: <20251006104151.487202-1-c.ebner@proxmox.com> <20251006104151.487202-3-c.ebner@proxmox.com> In-Reply-To: <20251006104151.487202-3-c.ebner@proxmox.com> MIME-Version: 1.0 User-Agent: astroid/0.17.0 (https://github.com/astroidmail/astroid) Message-Id: <1759750587.hi20p6x9ui.astroid@yuna.none> X-Bm-Milter-Handled: 55990f41-d878-4baa-be0a-ee34c49e34d2 X-Bm-Transport-Timestamp: 1759756456084 X-SPAM-LEVEL: Spam detection results: 0 AWL -0.101 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment POISEN_SPAM_PILL 0.1 Meta: its spam POISEN_SPAM_PILL_2 0.1 random spam to be learned in bayes POISEN_SPAM_PILL_4 0.1 random spam to be learned in bayes SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record Subject: Re: [pbs-devel] [PATCH proxmox-backup 2/7] gc: chunk store: rework atime check and gc status into common helper X-BeenThere: pbs-devel@lists.proxmox.com X-Mailman-Version: 2.1.29 Precedence: list List-Id: Proxmox Backup Server development discussion List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: Proxmox Backup Server development discussion Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Errors-To: pbs-devel-bounces@lists.proxmox.com Sender: "pbs-devel" On October 6, 2025 12:41 pm, Christian Ebner wrote: > Use the shared code paths for both, filesystem and s3 backend to a > common helper to avoid code duplication and adapt callsites > accordingly. > > Signed-off-by: Christian Ebner > --- > pbs-datastore/src/chunk_store.rs | 69 ++++++++++++++++++++++---------- > pbs-datastore/src/datastore.rs | 29 +++++--------- > 2 files changed, 57 insertions(+), 41 deletions(-) > > diff --git a/pbs-datastore/src/chunk_store.rs b/pbs-datastore/src/chunk_store.rs > index 3c59612bb..0725ca3a7 100644 > --- a/pbs-datastore/src/chunk_store.rs > +++ b/pbs-datastore/src/chunk_store.rs > @@ -408,36 +408,27 @@ impl ChunkStore { > > chunk_count += 1; > > - if stat.st_atime < min_atime { > - //let age = now - stat.st_atime; > - //println!("UNLINK {} {:?}", age/(3600*24), filename); > + if Self::check_atime_and_update_gc_status( > + stat.st_atime, > + min_atime, > + oldest_writer, > + stat.st_size as u64, > + bad, > + status, > + ) { > if let Err(err) = unlinkat(Some(dirfd), filename, UnlinkatFlags::NoRemoveDir) { if this part where also handled by the helper (in a fashion that allows using it for both cache and regular chunk store) > if bad { > + status.removed_bad -= 1; > status.still_bad += 1; > + } else { > + status.removed_chunks += 1; > } > + status.removed_bytes -= stat.st_size as u64; then this error handling here would not leak outside of the helper, and we could "simply" call the helper and bubble up any error it returns.. > bail!( > "unlinking chunk {filename:?} failed on store '{}' - {err}", > self.name, > ); > } > - if bad { > - status.removed_bad += 1; > - } else { > - status.removed_chunks += 1; > - } > - status.removed_bytes += stat.st_size as u64; > - } else if stat.st_atime < oldest_writer { > - if bad { > - status.still_bad += 1; > - } else { > - status.pending_chunks += 1; > - } > - status.pending_bytes += stat.st_size as u64; > - } else { > - if !bad { > - status.disk_chunks += 1; > - } > - status.disk_bytes += stat.st_size as u64; > } > } > drop(lock); > @@ -446,6 +437,42 @@ impl ChunkStore { > Ok(()) > } > > + /// Check within what range the provided chunks atime falls and update the garbage collection > + /// status accordingly. > + /// > + /// Returns true if the chunk file should be removed. > + pub(super) fn check_atime_and_update_gc_status( > + atime: i64, > + min_atime: i64, > + oldest_writer: i64, > + size: u64, > + bad: bool, > + gc_status: &mut GarbageCollectionStatus, > + ) -> bool { > + if atime < min_atime { > + if bad { > + gc_status.removed_bad += 1; > + } else { > + gc_status.removed_chunks += 1; > + } > + gc_status.removed_bytes += size; > + return true; > + } else if atime < oldest_writer { > + if bad { > + gc_status.still_bad += 1; > + } else { > + gc_status.pending_chunks += 1; > + } > + gc_status.pending_bytes += size; > + } else { > + if !bad { > + gc_status.disk_chunks += 1; > + } > + gc_status.disk_bytes += size; > + } > + false > + } > + > /// Check if atime updates are honored by the filesystem backing the chunk store. > /// > /// Checks if the atime is always updated by utimensat taking into consideration the Linux > diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs > index c2a82b8b8..e36af68fc 100644 > --- a/pbs-datastore/src/datastore.rs > +++ b/pbs-datastore/src/datastore.rs > @@ -1676,30 +1676,19 @@ impl DataStore { > .extension() > .is_some_and(|ext| ext == "bad"); > > - if atime < min_atime { > + if ChunkStore::check_atime_and_update_gc_status( > + atime, > + min_atime, > + oldest_writer, > + content.size, > + bad, > + &mut gc_status, > + ) { > if let Some(cache) = self.cache() { > // ignore errors, phase 3 will retry cleanup anyways > let _ = cache.remove(&digest); > } > - delete_list.push(content.key.clone()); > - if bad { > - gc_status.removed_bad += 1; > - } else { > - gc_status.removed_chunks += 1; > - } > - gc_status.removed_bytes += content.size; > - } else if atime < oldest_writer { > - if bad { > - gc_status.still_bad += 1; > - } else { > - gc_status.pending_chunks += 1; > - } > - gc_status.pending_bytes += content.size; > - } else { > - if !bad { > - gc_status.disk_chunks += 1; > - } > - gc_status.disk_bytes += content.size; > + delete_list.push(content.key); nit: this removal of the clone could have happened in patch #1 > } > > chunk_count += 1; > -- > 2.47.3 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel@lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > > _______________________________________________ pbs-devel mailing list pbs-devel@lists.proxmox.com https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel