From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [IPv6:2a01:7e0:0:424::9]) by lore.proxmox.com (Postfix) with ESMTPS id 7CCC81FF185 for ; Mon, 21 Jul 2025 18:48:52 +0200 (CEST) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id 3CAFC1718B; Mon, 21 Jul 2025 18:50:00 +0200 (CEST) From: Christian Ebner To: pbs-devel@lists.proxmox.com Date: Mon, 21 Jul 2025 18:44:34 +0200 Message-ID: <20250721164507.1045869-17-c.ebner@proxmox.com> X-Mailer: git-send-email 2.47.2 In-Reply-To: <20250721164507.1045869-1-c.ebner@proxmox.com> References: <20250721164507.1045869-1-c.ebner@proxmox.com> MIME-Version: 1.0 X-Bm-Milter-Handled: 55990f41-d878-4baa-be0a-ee34c49e34d2 X-Bm-Transport-Timestamp: 1753116307260 X-SPAM-LEVEL: Spam detection results: 0 AWL 0.045 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record Subject: [pbs-devel] [PATCH proxmox-backup v10 13/46] sync: pull: conditionally upload content to s3 backend X-BeenThere: pbs-devel@lists.proxmox.com X-Mailman-Version: 2.1.29 Precedence: list List-Id: Proxmox Backup Server development discussion List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: Proxmox Backup Server development discussion Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Errors-To: pbs-devel-bounces@lists.proxmox.com Sender: "pbs-devel" If the datastore is backed by an S3 object store, not only insert the pulled contents to the local cache store, but also upload it to the S3 backend. Signed-off-by: Christian Ebner --- changes since version 9: - no changes src/server/pull.rs | 64 +++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 61 insertions(+), 3 deletions(-) diff --git a/src/server/pull.rs b/src/server/pull.rs index b1724c142..775ed0c59 100644 --- a/src/server/pull.rs +++ b/src/server/pull.rs @@ -6,7 +6,7 @@ use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, Mutex}; use std::time::SystemTime; -use anyhow::{bail, format_err, Error}; +use anyhow::{bail, format_err, Context, Error}; use proxmox_human_byte::HumanByte; use tracing::info; @@ -24,7 +24,7 @@ use pbs_datastore::fixed_index::FixedIndexReader; use pbs_datastore::index::IndexFile; use pbs_datastore::manifest::{BackupManifest, FileInfo}; use pbs_datastore::read_chunk::AsyncReadChunk; -use pbs_datastore::{check_backup_owner, DataStore, StoreProgress}; +use pbs_datastore::{check_backup_owner, DataStore, DatastoreBackend, StoreProgress}; use pbs_tools::sha::sha256; use super::sync::{ @@ -167,7 +167,20 @@ async fn pull_index_chunks( move |(chunk, digest, size): (DataBlob, [u8; 32], u64)| { // println!("verify and write {}", hex::encode(&digest)); chunk.verify_unencrypted(size as usize, &digest)?; - target2.insert_chunk(&chunk, &digest)?; + match target2.backend()? { + DatastoreBackend::Filesystem => { + target2.insert_chunk(&chunk, &digest)?; + } + DatastoreBackend::S3(s3_client) => { + let data = chunk.raw_data().to_vec(); + let upload_data = hyper::body::Bytes::from(data); + let object_key = pbs_datastore::s3::object_key_from_digest(&digest)?; + let _is_duplicate = proxmox_async::runtime::block_on( + s3_client.upload_no_replace_with_retry(object_key, upload_data), + ) + .context("failed to upload chunk to s3 backend")?; + } + } Ok(()) }, ); @@ -331,6 +344,19 @@ async fn pull_single_archive<'a>( if let Err(err) = std::fs::rename(&tmp_path, &path) { bail!("Atomic rename file {:?} failed - {}", path, err); } + if let DatastoreBackend::S3(s3_client) = snapshot.datastore().backend()? { + let object_key = + pbs_datastore::s3::object_key_from_path(&snapshot.relative_path(), archive_name) + .context("invalid archive object key")?; + + let data = tokio::fs::read(&path) + .await + .context("failed to read archive contents")?; + let contents = hyper::body::Bytes::from(data); + let _is_duplicate = s3_client + .upload_replace_with_retry(object_key, contents) + .await?; + } Ok(sync_stats) } @@ -401,6 +427,7 @@ async fn pull_snapshot<'a>( } } + let manifest_data = tmp_manifest_blob.raw_data().to_vec(); let manifest = BackupManifest::try_from(tmp_manifest_blob)?; if ignore_not_verified_or_encrypted( @@ -467,9 +494,40 @@ async fn pull_snapshot<'a>( if let Err(err) = std::fs::rename(&tmp_manifest_name, &manifest_name) { bail!("Atomic rename file {:?} failed - {}", manifest_name, err); } + if let DatastoreBackend::S3(s3_client) = snapshot.datastore().backend()? { + let object_key = pbs_datastore::s3::object_key_from_path( + &snapshot.relative_path(), + MANIFEST_BLOB_NAME.as_ref(), + ) + .context("invalid manifest object key")?; + + let data = hyper::body::Bytes::from(manifest_data); + let _is_duplicate = s3_client + .upload_replace_with_retry(object_key, data) + .await + .context("failed to upload manifest to s3 backend")?; + } if !client_log_name.exists() { reader.try_download_client_log(&client_log_name).await?; + if client_log_name.exists() { + if let DatastoreBackend::S3(s3_client) = snapshot.datastore().backend()? { + let object_key = pbs_datastore::s3::object_key_from_path( + &snapshot.relative_path(), + CLIENT_LOG_BLOB_NAME.as_ref(), + ) + .context("invalid archive object key")?; + + let data = tokio::fs::read(&client_log_name) + .await + .context("failed to read log file contents")?; + let contents = hyper::body::Bytes::from(data); + let _is_duplicate = s3_client + .upload_replace_with_retry(object_key, contents) + .await + .context("failed to upload client log to s3 backend")?; + } + } }; snapshot .cleanup_unreferenced_files(&manifest) -- 2.47.2 _______________________________________________ pbs-devel mailing list pbs-devel@lists.proxmox.com https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel