From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [212.224.123.68]) by lore.proxmox.com (Postfix) with ESMTPS id 1196F1FF187 for ; Mon, 6 Oct 2025 15:17:19 +0200 (CEST) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id 9BC294ACC; Mon, 6 Oct 2025 15:17:22 +0200 (CEST) Date: Mon, 06 Oct 2025 15:17:15 +0200 From: Fabian =?iso-8859-1?q?Gr=FCnbichler?= To: Proxmox Backup Server development discussion References: <20251006104151.487202-1-c.ebner@proxmox.com> <20251006104151.487202-4-c.ebner@proxmox.com> In-Reply-To: <20251006104151.487202-4-c.ebner@proxmox.com> MIME-Version: 1.0 User-Agent: astroid/0.17.0 (https://github.com/astroidmail/astroid) Message-Id: <1759750889.o3xg1a8w89.astroid@yuna.none> X-Bm-Milter-Handled: 55990f41-d878-4baa-be0a-ee34c49e34d2 X-Bm-Transport-Timestamp: 1759756610432 X-SPAM-LEVEL: Spam detection results: 0 AWL 0.049 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record Subject: Re: [pbs-devel] [PATCH proxmox-backup 3/7] chunk store: add and use method to remove chunks X-BeenThere: pbs-devel@lists.proxmox.com X-Mailman-Version: 2.1.29 Precedence: list List-Id: Proxmox Backup Server development discussion List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: Proxmox Backup Server development discussion Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Errors-To: pbs-devel-bounces@lists.proxmox.com Sender: "pbs-devel" On October 6, 2025 12:41 pm, Christian Ebner wrote: > Reworks the removing of cached chunks during phase 2 of garbage > collection for datastores backed by s3. > > Move the actual chunk removal logic to be a method of the chunk store > and require the mutex guard to be passe as shared reference, > signaling that the caller locked the store as required to avoid races > with chunk insert. > > Signed-off-by: Christian Ebner > --- > pbs-datastore/src/chunk_store.rs | 15 ++++++++++++++- > pbs-datastore/src/datastore.rs | 2 +- > pbs-datastore/src/local_datastore_lru_cache.rs | 7 +++---- > 3 files changed, 18 insertions(+), 6 deletions(-) > > diff --git a/pbs-datastore/src/chunk_store.rs b/pbs-datastore/src/chunk_store.rs > index 0725ca3a7..010785fbc 100644 > --- a/pbs-datastore/src/chunk_store.rs > +++ b/pbs-datastore/src/chunk_store.rs > @@ -1,7 +1,7 @@ > use std::os::unix::fs::MetadataExt; > use std::os::unix::io::AsRawFd; > use std::path::{Path, PathBuf}; > -use std::sync::{Arc, Mutex}; > +use std::sync::{Arc, Mutex, MutexGuard}; > use std::time::Duration; > > use anyhow::{bail, format_err, Context, Error}; > @@ -254,6 +254,19 @@ impl ChunkStore { > Ok(true) > } > > + /// Remove a chunk from the chunk store > + /// > + /// Used to remove chunks from the local datastore cache. Caller must signal to hold the chunk > + /// store mutex lock. > + pub fn remove_chunk( > + &self, > + digest: &[u8; 32], > + _guard: &MutexGuard<'_, ()>, if we do this, then this should be a proper type across the board.. but it also is a bit wrong interface-wise - just obtaining the chunk store lock doesn't make it safe to remove chunks, it's still only GC that is "allowed" to do that since it handles all the logic and additional locking.. while that is the case in the call path here/now, it should be mentioned in the doc comments at least, and the visibility restricted accordingly.. > + ) -> Result<(), Error> { > + let (path, _digest) = self.chunk_path(digest); > + std::fs::remove_file(path).map_err(Error::from) > + } > + > pub fn get_chunk_iterator( > &self, > ) -> Result< > diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs > index e36af68fc..4f55eb9db 100644 > --- a/pbs-datastore/src/datastore.rs > +++ b/pbs-datastore/src/datastore.rs > @@ -1686,7 +1686,7 @@ impl DataStore { > ) { > if let Some(cache) = self.cache() { > // ignore errors, phase 3 will retry cleanup anyways > - let _ = cache.remove(&digest); > + let _ = cache.remove(&digest, &lock); so this call site here is okay, because it happens in GC after all the checks and additional locking has been done to ensure: - chunks which are not yet referenced by visible indices are not removed - GC is not running in a pre-reload process that doesn't "see" new backup writers - GC is not running in a post-reload process while the old process still has writers - .. > } > delete_list.push(content.key); > } > diff --git a/pbs-datastore/src/local_datastore_lru_cache.rs b/pbs-datastore/src/local_datastore_lru_cache.rs > index c0edd3619..1d2e87cb9 100644 > --- a/pbs-datastore/src/local_datastore_lru_cache.rs > +++ b/pbs-datastore/src/local_datastore_lru_cache.rs > @@ -2,7 +2,7 @@ > //! a network layer (e.g. via the S3 backend). > > use std::future::Future; > -use std::sync::Arc; > +use std::sync::{Arc, MutexGuard}; > > use anyhow::{bail, Error}; > use http_body_util::BodyExt; > @@ -87,10 +87,9 @@ impl LocalDatastoreLruCache { > /// Remove a chunk from the local datastore cache. > /// > /// Fails if the chunk cannot be deleted successfully. > - pub fn remove(&self, digest: &[u8; 32]) -> Result<(), Error> { > + pub fn remove(&self, digest: &[u8; 32], guard: &MutexGuard<'_, ()>) -> Result<(), Error> { > self.cache.remove(*digest); > - let (path, _digest_str) = self.store.chunk_path(digest); > - std::fs::remove_file(path).map_err(Error::from) > + self.store.remove_chunk(digest, guard) and this here is the only call site "forwarding" this removal from the cache called above by GC to the underlying chunk store, but it should probably also not be `pub`?? > } > > /// Access the locally cached chunk or fetch it from the S3 object store via the provided > -- > 2.47.3 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel@lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > > _______________________________________________ pbs-devel mailing list pbs-devel@lists.proxmox.com https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel