all lists on lists.proxmox.com
 help / color / mirror / Atom feed
From: Christian Ebner <c.ebner@proxmox.com>
To: pbs-devel@lists.proxmox.com
Subject: [pbs-devel] [PATCH proxmox-backup 2/6] datastore: refactor rename_corrupted_chunk error handling
Date: Thu, 16 Oct 2025 15:18:15 +0200	[thread overview]
Message-ID: <20251016131819.349049-3-c.ebner@proxmox.com> (raw)
In-Reply-To: <20251016131819.349049-1-c.ebner@proxmox.com>

As part of the verification process, the helper was not intended to
return errors on failure but rather just log information and errors.

Refactoring the code so that the helper method returns errors and
an optional success message makes more concise and readable.

However, keep the logging as info at the callsite for both error and
success message logging to not interfere with the task log.

Signed-off-by: Christian Ebner <c.ebner@proxmox.com>
---
 pbs-datastore/src/datastore.rs | 85 ++++++++++++++--------------------
 src/backup/verify.rs           | 12 ++++-
 2 files changed, 44 insertions(+), 53 deletions(-)

diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs
index 802a39536..c280b82c7 100644
--- a/pbs-datastore/src/datastore.rs
+++ b/pbs-datastore/src/datastore.rs
@@ -2419,13 +2419,13 @@ impl DataStore {
         Ok((backend_type, Some(s3_client)))
     }
 
-    pub fn rename_corrupted_chunk(&self, digest: &[u8; 32]) {
+    pub fn rename_corrupted_chunk(&self, digest: &[u8; 32]) -> Result<Option<String>, Error> {
         let (path, digest_str) = self.chunk_path(digest);
 
         let mut counter = 0;
         let mut new_path = path.clone();
         loop {
-            new_path.set_file_name(format!("{}.{}.bad", digest_str, counter));
+            new_path.set_file_name(format!("{digest_str}.{counter}.bad"));
             if new_path.exists() && counter < 9 {
                 counter += 1;
             } else {
@@ -2433,59 +2433,42 @@ impl DataStore {
             }
         }
 
-        let backend = match self.backend() {
-            Ok(backend) => backend,
-            Err(err) => {
-                info!(
-                    "failed to get backend while trying to rename bad chunk: {digest_str} - {err}"
-                );
-                return;
-            }
-        };
+        let backend = self.backend().map_err(|err| {
+            format_err!(
+                "failed to get backend while trying to rename bad chunk: {digest_str} - {err}"
+            )
+        })?;
 
         if let DatastoreBackend::S3(s3_client) = backend {
-            let suffix = format!(".{}.bad", counter);
-            let target_key = match crate::s3::object_key_from_digest_with_suffix(digest, &suffix) {
-                Ok(target_key) => target_key,
-                Err(err) => {
-                    info!("could not generate target key for corrupted chunk {path:?} - {err}");
-                    return;
-                }
-            };
-            let object_key = match crate::s3::object_key_from_digest(digest) {
-                Ok(object_key) => object_key,
-                Err(err) => {
-                    info!("could not generate object key for corrupted chunk {path:?} - {err}");
-                    return;
-                }
-            };
-            if proxmox_async::runtime::block_on(
-                s3_client.copy_object(object_key.clone(), target_key),
-            )
-            .is_ok()
-            {
-                if proxmox_async::runtime::block_on(s3_client.delete_object(object_key)).is_err() {
-                    info!("failed to delete corrupt chunk on s3 backend: {digest_str}");
-                }
-            } else {
-                info!("failed to copy corrupt chunk on s3 backend: {digest_str}");
-                // Early return to leave the potentially locally cached chunk in the same state as
-                // on the object store. Verification might have failed because of connection issue
-                // after all.
-                return;
-            }
+            let suffix = format!(".{counter}.bad");
+            let target_key = crate::s3::object_key_from_digest_with_suffix(digest, &suffix)
+                .map_err(|err| {
+                    format_err!(
+                        "could not generate target key for corrupted chunk {path:?} - {err}"
+                    )
+                })?;
+            let object_key = crate::s3::object_key_from_digest(digest).map_err(|err| {
+                format_err!("could not generate object key for corrupted chunk {path:?} - {err}")
+            })?;
+
+            proxmox_async::runtime::block_on(s3_client.copy_object(object_key.clone(), target_key))
+                .map_err(|err| {
+                    format_err!("failed to copy corrupt chunk on s3 backend: {digest_str} - {err}")
+                })?;
+
+            proxmox_async::runtime::block_on(s3_client.delete_object(object_key)).map_err(
+                |err| {
+                    format_err!(
+                        "failed to delete corrupt chunk on s3 backend: {digest_str} - {err}"
+                    )
+                },
+            )?;
         }
 
         match std::fs::rename(&path, &new_path) {
-            Ok(_) => {
-                info!("corrupted chunk renamed to {:?}", &new_path);
-            }
-            Err(err) => {
-                match err.kind() {
-                    std::io::ErrorKind::NotFound => { /* ignored */ }
-                    _ => info!("could not rename corrupted chunk {:?} - {err}", &path),
-                }
-            }
-        };
+            Ok(_) => Ok(Some(format!("corrupted chunk renamed to {new_path:?}"))),
+            Err(err) if err.kind() == std::io::ErrorKind::NotFound => Ok(None),
+            Err(err) => bail!("could not rename corrupted chunk {path:?} - {err}"),
+        }
     }
 }
diff --git a/src/backup/verify.rs b/src/backup/verify.rs
index 92d3d9c49..39f36cd95 100644
--- a/src/backup/verify.rs
+++ b/src/backup/verify.rs
@@ -118,7 +118,11 @@ impl VerifyWorker {
                     corrupt_chunks2.lock().unwrap().insert(digest);
                     info!("{err}");
                     errors2.fetch_add(1, Ordering::SeqCst);
-                    datastore2.rename_corrupted_chunk(&digest);
+                    match datastore2.rename_corrupted_chunk(&digest) {
+                        Ok(Some(message)) => info!("{message}"),
+                        Err(err) => info!("{err}"),
+                        _ => (),
+                    }
                 } else {
                     verified_chunks2.lock().unwrap().insert(digest);
                 }
@@ -265,7 +269,11 @@ impl VerifyWorker {
         corrupt_chunks.insert(digest);
         error!(message);
         errors.fetch_add(1, Ordering::SeqCst);
-        self.datastore.rename_corrupted_chunk(&digest);
+        match self.datastore.rename_corrupted_chunk(&digest) {
+            Ok(Some(message)) => info!("{message}"),
+            Err(err) => info!("{err}"),
+            _ => (),
+        }
     }
 
     fn verify_fixed_index(&self, backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> {
-- 
2.47.3



_______________________________________________
pbs-devel mailing list
pbs-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel


  parent reply	other threads:[~2025-10-16 13:18 UTC|newest]

Thread overview: 7+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-10-16 13:18 [pbs-devel] [PATCH proxmox-backup 0/6] s3 store verify: fix concurrency issues and add missing in-memory cache eviction Christian Ebner
2025-10-16 13:18 ` [pbs-devel] [PATCH proxmox-backup 1/6] verify/datastore: make rename corrupt chunk a datastore helper method Christian Ebner
2025-10-16 13:18 ` Christian Ebner [this message]
2025-10-16 13:18 ` [pbs-devel] [PATCH proxmox-backup 3/6] verify: never hold mutex lock in async scope on corrupt chunk rename Christian Ebner
2025-10-16 13:18 ` [pbs-devel] [PATCH proxmox-backup 4/6] datastore: acquire chunk store mutex lock when renaming corrupt chunk Christian Ebner
2025-10-16 13:18 ` [pbs-devel] [PATCH proxmox-backup 5/6] datastore: verify: evict corrupt chunks from in-memory LRU cache Christian Ebner
2025-10-16 13:18 ` [pbs-devel] [PATCH proxmox-backup 6/6] verify: distinguish s3 object fetching and chunk loading error Christian Ebner

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20251016131819.349049-3-c.ebner@proxmox.com \
    --to=c.ebner@proxmox.com \
    --cc=pbs-devel@lists.proxmox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.
Service provided by Proxmox Server Solutions GmbH | Privacy | Legal