From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [212.224.123.68]) by lore.proxmox.com (Postfix) with ESMTPS id 9376B1FF17E for ; Thu, 16 Oct 2025 15:18:24 +0200 (CEST) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id 6D38713B41; Thu, 16 Oct 2025 15:18:42 +0200 (CEST) From: Christian Ebner To: pbs-devel@lists.proxmox.com Date: Thu, 16 Oct 2025 15:18:15 +0200 Message-ID: <20251016131819.349049-3-c.ebner@proxmox.com> X-Mailer: git-send-email 2.47.3 In-Reply-To: <20251016131819.349049-1-c.ebner@proxmox.com> References: <20251016131819.349049-1-c.ebner@proxmox.com> MIME-Version: 1.0 X-Bm-Milter-Handled: 55990f41-d878-4baa-be0a-ee34c49e34d2 X-Bm-Transport-Timestamp: 1760620714171 X-SPAM-LEVEL: Spam detection results: 0 AWL 0.041 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment RCVD_IN_VALIDITY_CERTIFIED_BLOCKED 0.001 ADMINISTRATOR NOTICE: The query to Validity was blocked. See https://knowledge.validity.com/hc/en-us/articles/20961730681243 for more information. RCVD_IN_VALIDITY_RPBL_BLOCKED 0.001 ADMINISTRATOR NOTICE: The query to Validity was blocked. See https://knowledge.validity.com/hc/en-us/articles/20961730681243 for more information. RCVD_IN_VALIDITY_SAFE_BLOCKED 0.001 ADMINISTRATOR NOTICE: The query to Validity was blocked. See https://knowledge.validity.com/hc/en-us/articles/20961730681243 for more information. SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record URIBL_BLOCKED 0.001 ADMINISTRATOR NOTICE: The query to URIBL was blocked. See http://wiki.apache.org/spamassassin/DnsBlocklists#dnsbl-block for more information. [verify.rs, datastore.rs] Subject: [pbs-devel] [PATCH proxmox-backup 2/6] datastore: refactor rename_corrupted_chunk error handling X-BeenThere: pbs-devel@lists.proxmox.com X-Mailman-Version: 2.1.29 Precedence: list List-Id: Proxmox Backup Server development discussion List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: Proxmox Backup Server development discussion Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Errors-To: pbs-devel-bounces@lists.proxmox.com Sender: "pbs-devel" As part of the verification process, the helper was not intended to return errors on failure but rather just log information and errors. Refactoring the code so that the helper method returns errors and an optional success message makes more concise and readable. However, keep the logging as info at the callsite for both error and success message logging to not interfere with the task log. Signed-off-by: Christian Ebner --- pbs-datastore/src/datastore.rs | 85 ++++++++++++++-------------------- src/backup/verify.rs | 12 ++++- 2 files changed, 44 insertions(+), 53 deletions(-) diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs index 802a39536..c280b82c7 100644 --- a/pbs-datastore/src/datastore.rs +++ b/pbs-datastore/src/datastore.rs @@ -2419,13 +2419,13 @@ impl DataStore { Ok((backend_type, Some(s3_client))) } - pub fn rename_corrupted_chunk(&self, digest: &[u8; 32]) { + pub fn rename_corrupted_chunk(&self, digest: &[u8; 32]) -> Result, Error> { let (path, digest_str) = self.chunk_path(digest); let mut counter = 0; let mut new_path = path.clone(); loop { - new_path.set_file_name(format!("{}.{}.bad", digest_str, counter)); + new_path.set_file_name(format!("{digest_str}.{counter}.bad")); if new_path.exists() && counter < 9 { counter += 1; } else { @@ -2433,59 +2433,42 @@ impl DataStore { } } - let backend = match self.backend() { - Ok(backend) => backend, - Err(err) => { - info!( - "failed to get backend while trying to rename bad chunk: {digest_str} - {err}" - ); - return; - } - }; + let backend = self.backend().map_err(|err| { + format_err!( + "failed to get backend while trying to rename bad chunk: {digest_str} - {err}" + ) + })?; if let DatastoreBackend::S3(s3_client) = backend { - let suffix = format!(".{}.bad", counter); - let target_key = match crate::s3::object_key_from_digest_with_suffix(digest, &suffix) { - Ok(target_key) => target_key, - Err(err) => { - info!("could not generate target key for corrupted chunk {path:?} - {err}"); - return; - } - }; - let object_key = match crate::s3::object_key_from_digest(digest) { - Ok(object_key) => object_key, - Err(err) => { - info!("could not generate object key for corrupted chunk {path:?} - {err}"); - return; - } - }; - if proxmox_async::runtime::block_on( - s3_client.copy_object(object_key.clone(), target_key), - ) - .is_ok() - { - if proxmox_async::runtime::block_on(s3_client.delete_object(object_key)).is_err() { - info!("failed to delete corrupt chunk on s3 backend: {digest_str}"); - } - } else { - info!("failed to copy corrupt chunk on s3 backend: {digest_str}"); - // Early return to leave the potentially locally cached chunk in the same state as - // on the object store. Verification might have failed because of connection issue - // after all. - return; - } + let suffix = format!(".{counter}.bad"); + let target_key = crate::s3::object_key_from_digest_with_suffix(digest, &suffix) + .map_err(|err| { + format_err!( + "could not generate target key for corrupted chunk {path:?} - {err}" + ) + })?; + let object_key = crate::s3::object_key_from_digest(digest).map_err(|err| { + format_err!("could not generate object key for corrupted chunk {path:?} - {err}") + })?; + + proxmox_async::runtime::block_on(s3_client.copy_object(object_key.clone(), target_key)) + .map_err(|err| { + format_err!("failed to copy corrupt chunk on s3 backend: {digest_str} - {err}") + })?; + + proxmox_async::runtime::block_on(s3_client.delete_object(object_key)).map_err( + |err| { + format_err!( + "failed to delete corrupt chunk on s3 backend: {digest_str} - {err}" + ) + }, + )?; } match std::fs::rename(&path, &new_path) { - Ok(_) => { - info!("corrupted chunk renamed to {:?}", &new_path); - } - Err(err) => { - match err.kind() { - std::io::ErrorKind::NotFound => { /* ignored */ } - _ => info!("could not rename corrupted chunk {:?} - {err}", &path), - } - } - }; + Ok(_) => Ok(Some(format!("corrupted chunk renamed to {new_path:?}"))), + Err(err) if err.kind() == std::io::ErrorKind::NotFound => Ok(None), + Err(err) => bail!("could not rename corrupted chunk {path:?} - {err}"), + } } } diff --git a/src/backup/verify.rs b/src/backup/verify.rs index 92d3d9c49..39f36cd95 100644 --- a/src/backup/verify.rs +++ b/src/backup/verify.rs @@ -118,7 +118,11 @@ impl VerifyWorker { corrupt_chunks2.lock().unwrap().insert(digest); info!("{err}"); errors2.fetch_add(1, Ordering::SeqCst); - datastore2.rename_corrupted_chunk(&digest); + match datastore2.rename_corrupted_chunk(&digest) { + Ok(Some(message)) => info!("{message}"), + Err(err) => info!("{err}"), + _ => (), + } } else { verified_chunks2.lock().unwrap().insert(digest); } @@ -265,7 +269,11 @@ impl VerifyWorker { corrupt_chunks.insert(digest); error!(message); errors.fetch_add(1, Ordering::SeqCst); - self.datastore.rename_corrupted_chunk(&digest); + match self.datastore.rename_corrupted_chunk(&digest) { + Ok(Some(message)) => info!("{message}"), + Err(err) => info!("{err}"), + _ => (), + } } fn verify_fixed_index(&self, backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> { -- 2.47.3 _______________________________________________ pbs-devel mailing list pbs-devel@lists.proxmox.com https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel