all lists on lists.proxmox.com
 help / color / mirror / Atom feed
From: Christian Ebner <c.ebner@proxmox.com>
To: pbs-devel@lists.proxmox.com
Subject: [pbs-devel] [PATCH proxmox-backup v4 02/14] api/datastore: move s3 index upload helper to datastore backend
Date: Mon, 10 Nov 2025 12:56:15 +0100	[thread overview]
Message-ID: <20251110115627.280318-3-c.ebner@proxmox.com> (raw)
In-Reply-To: <20251110115627.280318-1-c.ebner@proxmox.com>

In an effort to decouple the api implementation from the backend
implementation and deduplicate code. Return a boolean flag to
distigush between successful uploads and no actions required
(filesystem backends only).

Signed-off-by: Christian Ebner <c.ebner@proxmox.com>
---
 pbs-datastore/src/datastore.rs | 30 ++++++++++++++++++++++++++++
 src/api2/backup/environment.rs | 36 ++++++++++++----------------------
 src/server/pull.rs             | 16 ++++-----------
 3 files changed, 47 insertions(+), 35 deletions(-)

diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs
index 70af94d8f..d66e68332 100644
--- a/pbs-datastore/src/datastore.rs
+++ b/pbs-datastore/src/datastore.rs
@@ -224,6 +224,36 @@ pub enum DatastoreBackend {
     S3(Arc<S3Client>),
 }
 
+impl DatastoreBackend {
+    /// Reads the index file and uploads it to the backend.
+    ///
+    /// Returns with true if the backend was updated, false if no action needed to be performed
+    pub async fn upload_index_to_backend(
+        &self,
+        backup_dir: &BackupDir,
+        name: &str,
+    ) -> Result<bool, Error> {
+        match self {
+            Self::Filesystem => Ok(false),
+            Self::S3(s3_client) => {
+                let object_key = crate::s3::object_key_from_path(&backup_dir.relative_path(), name)
+                    .context("invalid index file object key")?;
+
+                let mut full_path = backup_dir.full_path();
+                full_path.push(name);
+                let data = tokio::fs::read(&full_path)
+                    .await
+                    .context("failed to read index contents")?;
+                let contents = hyper::body::Bytes::from(data);
+                let _is_duplicate = s3_client
+                    .upload_replace_with_retry(object_key, contents)
+                    .await?;
+                Ok(true)
+            }
+        }
+    }
+}
+
 impl DataStore {
     // This one just panics on everything
     #[doc(hidden)]
diff --git a/src/api2/backup/environment.rs b/src/api2/backup/environment.rs
index 0faf6c8e0..1b8e0e1db 100644
--- a/src/api2/backup/environment.rs
+++ b/src/api2/backup/environment.rs
@@ -18,7 +18,6 @@ use pbs_datastore::dynamic_index::DynamicIndexWriter;
 use pbs_datastore::fixed_index::FixedIndexWriter;
 use pbs_datastore::{DataBlob, DataStore, DatastoreBackend};
 use proxmox_rest_server::{formatter::*, WorkerTask};
-use proxmox_s3_client::S3Client;
 
 use crate::backup::VerifyWorker;
 
@@ -560,11 +559,14 @@ impl BackupEnvironment {
         drop(state);
 
         // For S3 backends, upload the index file to the object store after closing
-        if let DatastoreBackend::S3(s3_client) = &self.backend {
-            self.s3_upload_index(s3_client, &writer_name)
-                .context("failed to upload dynamic index to s3 backend")?;
+        if proxmox_async::runtime::block_on(
+            self.backend
+                .upload_index_to_backend(&self.backup_dir, &writer_name),
+        )
+        .context("failed to upload dynamic index to backend")?
+        {
             self.log(format!(
-                "Uploaded dynamic index file to s3 backend: {writer_name}"
+                "Uploaded dynamic index file to backend: {writer_name}"
             ))
         }
 
@@ -659,9 +661,12 @@ impl BackupEnvironment {
         drop(state);
 
         // For S3 backends, upload the index file to the object store after closing
-        if let DatastoreBackend::S3(s3_client) = &self.backend {
-            self.s3_upload_index(s3_client, &writer_name)
-                .context("failed to upload fixed index to s3 backend")?;
+        if proxmox_async::runtime::block_on(
+            self.backend
+                .upload_index_to_backend(&self.backup_dir, &writer_name),
+        )
+        .context("failed to upload fixed index to backend")?
+        {
             self.log(format!(
                 "Uploaded fixed index file to object store: {writer_name}"
             ))
@@ -842,21 +847,6 @@ impl BackupEnvironment {
         let state = self.state.lock().unwrap();
         state.finished == BackupState::Finished
     }
-
-    fn s3_upload_index(&self, s3_client: &S3Client, name: &str) -> Result<(), Error> {
-        let object_key =
-            pbs_datastore::s3::object_key_from_path(&self.backup_dir.relative_path(), name)
-                .context("invalid index file object key")?;
-
-        let mut full_path = self.backup_dir.full_path();
-        full_path.push(name);
-        let data = std::fs::read(&full_path).context("failed to read index contents")?;
-        let contents = hyper::body::Bytes::from(data);
-        proxmox_async::runtime::block_on(
-            s3_client.upload_replace_with_retry(object_key, contents),
-        )?;
-        Ok(())
-    }
 }
 
 impl RpcEnvironment for BackupEnvironment {
diff --git a/src/server/pull.rs b/src/server/pull.rs
index 92513fe70..ba79704cd 100644
--- a/src/server/pull.rs
+++ b/src/server/pull.rs
@@ -342,19 +342,11 @@ async fn pull_single_archive<'a>(
     if let Err(err) = std::fs::rename(&tmp_path, &path) {
         bail!("Atomic rename file {:?} failed - {}", path, err);
     }
-    if let DatastoreBackend::S3(s3_client) = backend {
-        let object_key =
-            pbs_datastore::s3::object_key_from_path(&snapshot.relative_path(), archive_name)
-                .context("invalid archive object key")?;
 
-        let data = tokio::fs::read(&path)
-            .await
-            .context("failed to read archive contents")?;
-        let contents = hyper::body::Bytes::from(data);
-        let _is_duplicate = s3_client
-            .upload_replace_with_retry(object_key, contents)
-            .await?;
-    }
+    backend
+        .upload_index_to_backend(snapshot, archive_name)
+        .await?;
+
     Ok(sync_stats)
 }
 
-- 
2.47.3



_______________________________________________
pbs-devel mailing list
pbs-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel


  parent reply	other threads:[~2025-11-10 11:56 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-11-10 11:56 [pbs-devel] [PATCH proxmox-backup v4 00/14] fix chunk upload/insert, rename corrupt chunks and GC race conditions for s3 backend Christian Ebner
2025-11-10 11:56 ` [pbs-devel] [PATCH proxmox-backup v4 01/14] datastore: limit scope of snapshot/group destroy methods to crate Christian Ebner
2025-11-10 11:56 ` Christian Ebner [this message]
2025-11-10 11:56 ` [pbs-devel] [PATCH proxmox-backup v4 03/14] chunk store: implement per-chunk file locking helper for s3 backend Christian Ebner
2025-11-10 11:56 ` [pbs-devel] [PATCH proxmox-backup v4 04/14] datastore: acquire chunk store mutex lock when renaming corrupt chunk Christian Ebner
2025-11-10 11:56 ` [pbs-devel] [PATCH proxmox-backup v4 05/14] datastore: get per-chunk file lock for chunk rename on s3 backend Christian Ebner
2025-11-10 11:56 ` [pbs-devel] [PATCH proxmox-backup v4 06/14] fix #6961: datastore: verify: evict corrupt chunks from in-memory LRU cache Christian Ebner
2025-11-10 11:56 ` [pbs-devel] [PATCH proxmox-backup v4 07/14] datastore: add locking to protect against races on chunk insert for s3 Christian Ebner
2025-11-10 11:56 ` [pbs-devel] [PATCH proxmox-backup v4 08/14] GC: fix race with chunk upload/insert on s3 backends Christian Ebner
2025-11-10 11:56 ` [pbs-devel] [PATCH proxmox-backup v4 09/14] GC: cleanup chunk markers from cache in phase 3 " Christian Ebner
2025-11-10 11:56 ` [pbs-devel] [PATCH proxmox-backup v4 10/14] datastore: GC: drop overly verbose info message during s3 chunk sweep Christian Ebner
2025-11-10 11:56 ` [pbs-devel] [PATCH proxmox-backup v4 11/14] chunk store: reduce exposure of clear_chunk() to crate only Christian Ebner
2025-11-10 11:56 ` [pbs-devel] [PATCH proxmox-backup v4 12/14] chunk store: make chunk removal a helper method of the chunk store Christian Ebner
2025-11-10 11:56 ` [pbs-devel] [PATCH proxmox-backup v4 13/14] GC: fix deadlock for cache eviction and garbage collection Christian Ebner
2025-11-10 11:56 ` [pbs-devel] [PATCH proxmox-backup v4 14/14] chunk store: never fail when trying to remove missing chunk file Christian Ebner
2025-11-11 11:09 ` [pbs-devel] partially-applied: [PATCH proxmox-backup v4 00/14] fix chunk upload/insert, rename corrupt chunks and GC race conditions for s3 backend Fabian Grünbichler
2025-11-11 14:31 ` [pbs-devel] " Christian Ebner

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20251110115627.280318-3-c.ebner@proxmox.com \
    --to=c.ebner@proxmox.com \
    --cc=pbs-devel@lists.proxmox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.
Service provided by Proxmox Server Solutions GmbH | Privacy | Legal