From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [IPv6:2a01:7e0:0:424::9]) by lore.proxmox.com (Postfix) with ESMTPS id 64A081FF179 for ; Wed, 15 Oct 2025 18:40:08 +0200 (CEST) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id BD9286D5; Wed, 15 Oct 2025 18:40:24 +0200 (CEST) From: Christian Ebner To: pbs-devel@lists.proxmox.com Date: Wed, 15 Oct 2025 18:40:07 +0200 Message-ID: <20251015164008.975591-10-c.ebner@proxmox.com> X-Mailer: git-send-email 2.47.3 In-Reply-To: <20251015164008.975591-1-c.ebner@proxmox.com> References: <20251015164008.975591-1-c.ebner@proxmox.com> MIME-Version: 1.0 X-Bm-Milter-Handled: 55990f41-d878-4baa-be0a-ee34c49e34d2 X-Bm-Transport-Timestamp: 1760546418439 X-SPAM-LEVEL: Spam detection results: 0 AWL 0.042 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record Subject: [pbs-devel] [PATCH proxmox-backup v3 7/8] api: chunk upload: fix race with garbage collection for no-cache on s3 X-BeenThere: pbs-devel@lists.proxmox.com X-Mailman-Version: 2.1.29 Precedence: list List-Id: Proxmox Backup Server development discussion List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: Proxmox Backup Server development discussion Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Errors-To: pbs-devel-bounces@lists.proxmox.com Sender: "pbs-devel" Chunks uploaded to the s3 backend are never inserted into the local datastore cache. The presence of the chunk marker file is however required for garbage collection to not cleanup the chunks. While the marker files are created during phase 1 of the garbage collection for indexed chunks, this is not the case for in progress backups with the no-cache flag set. Therefore, mark chunks as in-progress while being uploaded just like for the regular mode with cache, but replace this with the zero-sized chunk marker file after upload finished to avoid incorrect garbage collection cleanup. Signed-off-by: Christian Ebner --- pbs-datastore/src/chunk_store.rs | 26 ++++++++++++++++++++++++++ pbs-datastore/src/datastore.rs | 7 +++++++ src/api2/backup/upload_chunk.rs | 4 ++++ 3 files changed, 37 insertions(+) diff --git a/pbs-datastore/src/chunk_store.rs b/pbs-datastore/src/chunk_store.rs index 2693a1c11..1e71b2970 100644 --- a/pbs-datastore/src/chunk_store.rs +++ b/pbs-datastore/src/chunk_store.rs @@ -646,6 +646,32 @@ impl ChunkStore { Ok(atime) } + /// Transform the backend upload marker to be a chunk marker. + /// + /// If the chunk marker is already present, its atime will be updated instead. + pub(crate) fn persist_backend_upload_marker(&self, digest: &[u8; 32]) -> Result<(), Error> { + if self.datastore_backend_type == DatastoreBackendType::Filesystem { + bail!("cannot create backend upload marker, not a cache store"); + } + let (marker_path, _digest_str) = self.chunk_backed_upload_marker_path(digest); + let (chunk_path, digest_str) = self.chunk_path(digest); + let _lock = self.mutex.lock(); + + if let Err(err) = std::fs::rename(&marker_path, chunk_path) { + // Assert the chunk has been inserted and it is therefore safe to cleanup + // the upload marker nevertheless. + if self.cond_touch_chunk(digest, false)? { + std::fs::remove_file(&marker_path)?; + return Ok(()); + } + + return Err(format_err!( + "persisting backup upload marker failed for {digest_str} - {err}" + )); + } + Ok(()) + } + pub fn insert_chunk(&self, chunk: &DataBlob, digest: &[u8; 32]) -> Result<(bool, u64), Error> { self.insert_chunk_impl(chunk, digest, |_, _| Ok(())) } diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs index aa34ab037..69c87c336 100644 --- a/pbs-datastore/src/datastore.rs +++ b/pbs-datastore/src/datastore.rs @@ -1871,6 +1871,13 @@ impl DataStore { self.inner.chunk_store.touch_backend_upload_marker(digest) } + /// Persist the backend upload marker to be a zero size chunk marker. + /// + /// Marks the chunk as present in the local store cache without inserting its payload. + pub fn persist_backend_upload_marker(&self, digest: &[u8; 32]) -> Result<(), Error> { + self.inner.chunk_store.persist_backend_upload_marker(digest) + } + pub fn stat_chunk(&self, digest: &[u8; 32]) -> Result { let (chunk_path, _digest_str) = self.inner.chunk_store.chunk_path(digest); std::fs::metadata(chunk_path).map_err(Error::from) diff --git a/src/api2/backup/upload_chunk.rs b/src/api2/backup/upload_chunk.rs index 0640f3652..bc64054a8 100644 --- a/src/api2/backup/upload_chunk.rs +++ b/src/api2/backup/upload_chunk.rs @@ -263,10 +263,14 @@ async fn upload_to_backend( if env.no_cache { let object_key = pbs_datastore::s3::object_key_from_digest(&digest)?; + if !datastore.touch_backend_upload_marker(&digest)? { + return Ok((digest, size, encoded_size, true)); + } let is_duplicate = s3_client .upload_replace_on_final_retry(object_key, data) .await .map_err(|err| format_err!("failed to upload chunk to s3 backend - {err:#}"))?; + datastore.persist_backend_upload_marker(&digest)?; return Ok((digest, size, encoded_size, is_duplicate)); } -- 2.47.3 _______________________________________________ pbs-devel mailing list pbs-devel@lists.proxmox.com https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel