From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [212.224.123.68]) by lore.proxmox.com (Postfix) with ESMTPS id 1903A1FF191 for ; Mon, 16 Jun 2025 16:29:04 +0200 (CEST) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id 9961DAD47; Mon, 16 Jun 2025 16:29:32 +0200 (CEST) From: Christian Ebner To: pbs-devel@lists.proxmox.com Date: Mon, 16 Jun 2025 16:21:39 +0200 Message-Id: <20250616142156.413652-27-c.ebner@proxmox.com> X-Mailer: git-send-email 2.39.5 In-Reply-To: <20250616142156.413652-1-c.ebner@proxmox.com> References: <20250616142156.413652-1-c.ebner@proxmox.com> MIME-Version: 1.0 X-SPAM-LEVEL: Spam detection results: 0 AWL 0.036 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record Subject: [pbs-devel] [PATCH proxmox-backup v3 24/41] verify worker: add datastore backed to verify worker X-BeenThere: pbs-devel@lists.proxmox.com X-Mailman-Version: 2.1.29 Precedence: list List-Id: Proxmox Backup Server development discussion List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: Proxmox Backup Server development discussion Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Errors-To: pbs-devel-bounces@lists.proxmox.com Sender: "pbs-devel" In order to fetch chunks from an S3 compatible object store, instantiate and store the s3 client in the verify worker by storing the datastore's backend. This allows to reuse the same instance for the whole verification task. Signed-off-by: Christian Ebner --- src/api2/admin/datastore.rs | 2 +- src/api2/backup/environment.rs | 2 +- src/backup/verify.rs | 14 ++++++++++---- src/server/verify_job.rs | 2 +- 4 files changed, 13 insertions(+), 7 deletions(-) diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs index 443f8f4c0..4b032e817 100644 --- a/src/api2/admin/datastore.rs +++ b/src/api2/admin/datastore.rs @@ -881,7 +881,7 @@ pub fn verify( auth_id.to_string(), to_stdout, move |worker| { - let verify_worker = VerifyWorker::new(worker.clone(), datastore); + let verify_worker = VerifyWorker::new(worker.clone(), datastore)?; let failed_dirs = if let Some(backup_dir) = backup_dir { let mut res = Vec::new(); if !verify_worker.verify_backup_dir( diff --git a/src/api2/backup/environment.rs b/src/api2/backup/environment.rs index 685b78e89..384e8a73f 100644 --- a/src/api2/backup/environment.rs +++ b/src/api2/backup/environment.rs @@ -796,7 +796,7 @@ impl BackupEnvironment { move |worker| { worker.log_message("Automatically verifying newly added snapshot"); - let verify_worker = VerifyWorker::new(worker.clone(), datastore); + let verify_worker = VerifyWorker::new(worker.clone(), datastore)?; if !verify_worker.verify_backup_dir_with_lock( &backup_dir, worker.upid().clone(), diff --git a/src/backup/verify.rs b/src/backup/verify.rs index 0b954ae23..a01ddcca3 100644 --- a/src/backup/verify.rs +++ b/src/backup/verify.rs @@ -17,7 +17,7 @@ use pbs_api_types::{ use pbs_datastore::backup_info::{BackupDir, BackupGroup, BackupInfo}; use pbs_datastore::index::IndexFile; use pbs_datastore::manifest::{BackupManifest, FileInfo}; -use pbs_datastore::{DataBlob, DataStore, StoreProgress}; +use pbs_datastore::{DataBlob, DataStore, DatastoreBackend, StoreProgress}; use crate::tools::parallel_handler::ParallelHandler; @@ -30,19 +30,25 @@ pub struct VerifyWorker { datastore: Arc, verified_chunks: Arc>>, corrupt_chunks: Arc>>, + backend: DatastoreBackend, } impl VerifyWorker { /// Creates a new VerifyWorker for a given task worker and datastore. - pub fn new(worker: Arc, datastore: Arc) -> Self { - Self { + pub fn new( + worker: Arc, + datastore: Arc, + ) -> Result { + let backend = datastore.backend()?; + Ok(Self { worker, datastore, // start with 16k chunks == up to 64G data verified_chunks: Arc::new(Mutex::new(HashSet::with_capacity(16 * 1024))), // start with 64 chunks since we assume there are few corrupt ones corrupt_chunks: Arc::new(Mutex::new(HashSet::with_capacity(64))), - } + backend, + }) } fn verify_blob(backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> { diff --git a/src/server/verify_job.rs b/src/server/verify_job.rs index 95a7b2a9b..c8792174b 100644 --- a/src/server/verify_job.rs +++ b/src/server/verify_job.rs @@ -41,7 +41,7 @@ pub fn do_verification_job( None => Default::default(), }; - let verify_worker = VerifyWorker::new(worker.clone(), datastore); + let verify_worker = VerifyWorker::new(worker.clone(), datastore)?; let result = verify_worker.verify_all_backups( worker.upid(), ns, -- 2.39.5 _______________________________________________ pbs-devel mailing list pbs-devel@lists.proxmox.com https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel