From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [IPv6:2a01:7e0:0:424::9]) by lore.proxmox.com (Postfix) with ESMTPS id 3F7D91FF165 for ; Thu, 6 Nov 2025 10:37:47 +0100 (CET) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id 5317511CF4; Thu, 6 Nov 2025 10:38:28 +0100 (CET) Date: Thu, 06 Nov 2025 10:37:51 +0100 From: Fabian =?iso-8859-1?q?Gr=FCnbichler?= To: Proxmox Backup Server development discussion References: <20251105122233.439382-1-c.ebner@proxmox.com> <20251105122233.439382-8-c.ebner@proxmox.com> In-Reply-To: <20251105122233.439382-8-c.ebner@proxmox.com> MIME-Version: 1.0 User-Agent: astroid/0.17.0 (https://github.com/astroidmail/astroid) Message-Id: <1762417422.pqzqcsjzhk.astroid@yuna.none> X-Bm-Milter-Handled: 55990f41-d878-4baa-be0a-ee34c49e34d2 X-Bm-Transport-Timestamp: 1762421855120 X-SPAM-LEVEL: Spam detection results: 0 AWL 0.048 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record Subject: Re: [pbs-devel] [PATCH proxmox-backup v3 07/23] api/datastore: move s3 index upload helper to datastore backend X-BeenThere: pbs-devel@lists.proxmox.com X-Mailman-Version: 2.1.29 Precedence: list List-Id: Proxmox Backup Server development discussion List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: Proxmox Backup Server development discussion Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Errors-To: pbs-devel-bounces@lists.proxmox.com Sender: "pbs-devel" On November 5, 2025 1:22 pm, Christian Ebner wrote: > In an effort to decouple the api implementation from the backend > implementation and deduplicate code. > > Signed-off-by: Christian Ebner > --- > changes since version 2: > - no changes > > pbs-datastore/src/datastore.rs | 26 ++++++++++++++++++++++++++ > src/api2/backup/environment.rs | 32 ++++++++++---------------------- > src/server/pull.rs | 14 ++------------ > 3 files changed, 38 insertions(+), 34 deletions(-) > > diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs > index 0d738f0ac..343f49f36 100644 > --- a/pbs-datastore/src/datastore.rs > +++ b/pbs-datastore/src/datastore.rs > @@ -223,6 +223,32 @@ pub enum DatastoreBackend { > S3(Arc), > } > > +impl DatastoreBackend { > + /// Reads the index file and uploads it to the S3 backend. > + /// > + /// Returns with error if the backend variant is not S3. > + pub async fn s3_upload_index(&self, backup_dir: &BackupDir, name: &str) -> Result<(), Error> { the interface here would be nicer/more ergonomic if this would just be datastore.upload_index_to_backend(backend, backup_dir, name) -> Result and implemented as NOP for filesystem backends.. but I suspect we'd need to change this once more if we integrate the backend more directly into the datastore in the next series? > + match self { > + Self::Filesystem => bail!("datastore backend not of type S3"), > + Self::S3(s3_client) => { > + let object_key = crate::s3::object_key_from_path(&backup_dir.relative_path(), name) > + .context("invalid index file object key")?; > + > + let mut full_path = backup_dir.full_path(); > + full_path.push(name); > + let data = tokio::fs::read(&full_path) > + .await > + .context("failed to read index contents")?; > + let contents = hyper::body::Bytes::from(data); > + let _is_duplicate = s3_client > + .upload_replace_with_retry(object_key, contents) > + .await?; > + Ok(()) > + } > + } > + } > +} > + > impl DataStore { > // This one just panics on everything > #[doc(hidden)] > diff --git a/src/api2/backup/environment.rs b/src/api2/backup/environment.rs > index 0faf6c8e0..f87d5a89e 100644 > --- a/src/api2/backup/environment.rs > +++ b/src/api2/backup/environment.rs > @@ -18,7 +18,6 @@ use pbs_datastore::dynamic_index::DynamicIndexWriter; > use pbs_datastore::fixed_index::FixedIndexWriter; > use pbs_datastore::{DataBlob, DataStore, DatastoreBackend}; > use proxmox_rest_server::{formatter::*, WorkerTask}; > -use proxmox_s3_client::S3Client; > > use crate::backup::VerifyWorker; > > @@ -560,9 +559,11 @@ impl BackupEnvironment { > drop(state); > > // For S3 backends, upload the index file to the object store after closing > - if let DatastoreBackend::S3(s3_client) = &self.backend { > - self.s3_upload_index(s3_client, &writer_name) > - .context("failed to upload dynamic index to s3 backend")?; > + if let DatastoreBackend::S3(_s3_client) = &self.backend { > + proxmox_async::runtime::block_on( > + self.backend.s3_upload_index(&self.backup_dir, &writer_name), > + ) > + .context("failed to upload dynamic index to s3 backend")?; > self.log(format!( > "Uploaded dynamic index file to s3 backend: {writer_name}" > )) > @@ -659,9 +660,11 @@ impl BackupEnvironment { > drop(state); > > // For S3 backends, upload the index file to the object store after closing > - if let DatastoreBackend::S3(s3_client) = &self.backend { > - self.s3_upload_index(s3_client, &writer_name) > - .context("failed to upload fixed index to s3 backend")?; > + if let DatastoreBackend::S3(_s3_client) = &self.backend { > + proxmox_async::runtime::block_on( > + self.backend.s3_upload_index(&self.backup_dir, &writer_name), > + ) > + .context("failed to upload fixed index to s3 backend")?; > self.log(format!( > "Uploaded fixed index file to object store: {writer_name}" > )) > @@ -842,21 +845,6 @@ impl BackupEnvironment { > let state = self.state.lock().unwrap(); > state.finished == BackupState::Finished > } > - > - fn s3_upload_index(&self, s3_client: &S3Client, name: &str) -> Result<(), Error> { > - let object_key = > - pbs_datastore::s3::object_key_from_path(&self.backup_dir.relative_path(), name) > - .context("invalid index file object key")?; > - > - let mut full_path = self.backup_dir.full_path(); > - full_path.push(name); > - let data = std::fs::read(&full_path).context("failed to read index contents")?; > - let contents = hyper::body::Bytes::from(data); > - proxmox_async::runtime::block_on( > - s3_client.upload_replace_with_retry(object_key, contents), > - )?; > - Ok(()) > - } > } > > impl RpcEnvironment for BackupEnvironment { > diff --git a/src/server/pull.rs b/src/server/pull.rs > index 2dcadf972..94b2fbf55 100644 > --- a/src/server/pull.rs > +++ b/src/server/pull.rs > @@ -359,19 +359,9 @@ async fn pull_single_archive<'a>( > if let Err(err) = std::fs::rename(&tmp_path, &path) { > bail!("Atomic rename file {:?} failed - {}", path, err); > } > - if let DatastoreBackend::S3(s3_client) = backend { > - let object_key = > - pbs_datastore::s3::object_key_from_path(&snapshot.relative_path(), archive_name) > - .context("invalid archive object key")?; > > - let data = tokio::fs::read(&path) > - .await > - .context("failed to read archive contents")?; > - let contents = hyper::body::Bytes::from(data); > - let _is_duplicate = s3_client > - .upload_replace_with_retry(object_key, contents) > - .await?; > - } > + backend.s3_upload_index(snapshot, archive_name).await?; > + > Ok(sync_stats) > } > > -- > 2.47.3 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel@lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > > _______________________________________________ pbs-devel mailing list pbs-devel@lists.proxmox.com https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel