From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [212.224.123.68]) by lore.proxmox.com (Postfix) with ESMTPS id B207A1FF187 for ; Mon, 3 Nov 2025 12:31:05 +0100 (CET) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id 943061A709; Mon, 3 Nov 2025 12:31:42 +0100 (CET) From: Christian Ebner To: pbs-devel@lists.proxmox.com Date: Mon, 3 Nov 2025 12:31:04 +0100 Message-ID: <20251103113120.239455-2-c.ebner@proxmox.com> X-Mailer: git-send-email 2.47.3 In-Reply-To: <20251103113120.239455-1-c.ebner@proxmox.com> References: <20251103113120.239455-1-c.ebner@proxmox.com> MIME-Version: 1.0 X-Bm-Milter-Handled: 55990f41-d878-4baa-be0a-ee34c49e34d2 X-Bm-Transport-Timestamp: 1762169482618 X-SPAM-LEVEL: Spam detection results: 0 AWL -0.004 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment PROLO_LEO1 0.1 Meta Catches all Leo drug variations so far RCVD_IN_VALIDITY_CERTIFIED_BLOCKED 0.001 ADMINISTRATOR NOTICE: The query to Validity was blocked. See https://knowledge.validity.com/hc/en-us/articles/20961730681243 for more information. RCVD_IN_VALIDITY_RPBL_BLOCKED 0.001 ADMINISTRATOR NOTICE: The query to Validity was blocked. See https://knowledge.validity.com/hc/en-us/articles/20961730681243 for more information. RCVD_IN_VALIDITY_SAFE_BLOCKED 0.001 ADMINISTRATOR NOTICE: The query to Validity was blocked. See https://knowledge.validity.com/hc/en-us/articles/20961730681243 for more information. SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record URIBL_BLOCKED 0.001 ADMINISTRATOR NOTICE: The query to URIBL was blocked. See http://wiki.apache.org/spamassassin/DnsBlocklists#dnsbl-block for more information. [pull.rs] Subject: [pbs-devel] [PATCH proxmox-backup 01/17] sync: pull: instantiate backend only once per sync job X-BeenThere: pbs-devel@lists.proxmox.com X-Mailman-Version: 2.1.29 Precedence: list List-Id: Proxmox Backup Server development discussion List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: Proxmox Backup Server development discussion Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Errors-To: pbs-devel-bounces@lists.proxmox.com Sender: "pbs-devel" Currently the target datastores' backend is instatziated for each chunk to be inserted, which on s3 backed datastores leads to the s3-client being re-instantiated and a new connection being established. Optimize this by only creating the backend once and sharing it for all the chunk inserts to be performed. Signed-off-by: Christian Ebner --- src/server/pull.rs | 30 +++++++++++++++++++++--------- 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/src/server/pull.rs b/src/server/pull.rs index 817b57ac5..de8b140bc 100644 --- a/src/server/pull.rs +++ b/src/server/pull.rs @@ -38,6 +38,8 @@ use crate::tools::parallel_handler::ParallelHandler; pub(crate) struct PullTarget { store: Arc, ns: BackupNamespace, + // Contains the active S3Client in case of S3 backend + backend: DatastoreBackend, } /// Parameters for a pull operation. @@ -114,10 +116,9 @@ impl PullParameters { ns: remote_ns, }) }; - let target = PullTarget { - store: DataStore::lookup_datastore(store, Some(Operation::Write))?, - ns, - }; + let store = DataStore::lookup_datastore(store, Some(Operation::Write))?; + let backend = store.backend()?; + let target = PullTarget { store, ns, backend }; let group_filter = group_filter.unwrap_or_default(); @@ -141,6 +142,7 @@ async fn pull_index_chunks( target: Arc, index: I, downloaded_chunks: Arc>>, + backend: &DatastoreBackend, ) -> Result { use futures::stream::{self, StreamExt, TryStreamExt}; @@ -162,13 +164,14 @@ async fn pull_index_chunks( ); let target2 = target.clone(); + let backend = backend.clone(); let verify_pool = ParallelHandler::new( "sync chunk writer", 4, move |(chunk, digest, size): (DataBlob, [u8; 32], u64)| { // println!("verify and write {}", hex::encode(&digest)); chunk.verify_unencrypted(size as usize, &digest)?; - match target2.backend()? { + match &backend { DatastoreBackend::Filesystem => { target2.insert_chunk(&chunk, &digest)?; } @@ -283,6 +286,7 @@ async fn pull_single_archive<'a>( snapshot: &'a pbs_datastore::BackupDir, archive_info: &'a FileInfo, downloaded_chunks: Arc>>, + backend: &DatastoreBackend, ) -> Result { let archive_name = &archive_info.filename; let mut path = snapshot.full_path(); @@ -317,6 +321,7 @@ async fn pull_single_archive<'a>( snapshot.datastore().clone(), index, downloaded_chunks, + backend, ) .await?; sync_stats.add(stats); @@ -339,6 +344,7 @@ async fn pull_single_archive<'a>( snapshot.datastore().clone(), index, downloaded_chunks, + backend, ) .await?; sync_stats.add(stats); @@ -495,15 +501,21 @@ async fn pull_snapshot<'a>( } } - let stats = - pull_single_archive(reader.clone(), snapshot, item, downloaded_chunks.clone()).await?; + let stats = pull_single_archive( + reader.clone(), + snapshot, + item, + downloaded_chunks.clone(), + ¶ms.target.backend, + ) + .await?; sync_stats.add(stats); } if let Err(err) = std::fs::rename(&tmp_manifest_name, &manifest_name) { bail!("Atomic rename file {:?} failed - {}", manifest_name, err); } - if let DatastoreBackend::S3(s3_client) = snapshot.datastore().backend()? { + if let DatastoreBackend::S3(s3_client) = ¶ms.target.backend { let object_key = pbs_datastore::s3::object_key_from_path( &snapshot.relative_path(), MANIFEST_BLOB_NAME.as_ref(), @@ -520,7 +532,7 @@ async fn pull_snapshot<'a>( if !client_log_name.exists() { reader.try_download_client_log(&client_log_name).await?; if client_log_name.exists() { - if let DatastoreBackend::S3(s3_client) = snapshot.datastore().backend()? { + if let DatastoreBackend::S3(s3_client) = ¶ms.target.backend { let object_key = pbs_datastore::s3::object_key_from_path( &snapshot.relative_path(), CLIENT_LOG_BLOB_NAME.as_ref(), -- 2.47.3 _______________________________________________ pbs-devel mailing list pbs-devel@lists.proxmox.com https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel