From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [212.224.123.68]) by lore.proxmox.com (Postfix) with ESMTPS id DBDBF1FF17A for ; Tue, 11 Nov 2025 11:22:31 +0100 (CET) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id 2B3C94847; Tue, 11 Nov 2025 11:23:18 +0100 (CET) Message-ID: Date: Tue, 11 Nov 2025 11:22:43 +0100 MIME-Version: 1.0 User-Agent: Mozilla Thunderbird From: Christian Ebner To: Proxmox Backup Server development discussion , Nicolas Frey References: <20251110084417.173290-1-n.frey@proxmox.com> <20251110084417.173290-7-n.frey@proxmox.com> Content-Language: en-US, de-DE In-Reply-To: <20251110084417.173290-7-n.frey@proxmox.com> X-Bm-Milter-Handled: 55990f41-d878-4baa-be0a-ee34c49e34d2 X-Bm-Transport-Timestamp: 1762856541456 X-SPAM-LEVEL: Spam detection results: 0 AWL 0.048 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record Subject: Re: [pbs-devel] [PATCH proxmox-backup v3 3/6] api: verify: determine the number of threads to use with {read, verify}-threads X-BeenThere: pbs-devel@lists.proxmox.com X-Mailman-Version: 2.1.29 Precedence: list List-Id: Proxmox Backup Server development discussion List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: Proxmox Backup Server development discussion Content-Transfer-Encoding: 7bit Content-Type: text/plain; charset="us-ascii"; Format="flowed" Errors-To: pbs-devel-bounces@lists.proxmox.com Sender: "pbs-devel" two comments inline On 11/10/25 9:44 AM, Nicolas Frey wrote: > use previously introduced {read,verify}-threads in API, where default > values match the ones of the schema definition. > > Signed-off-by: Nicolas Frey > --- > src/api2/admin/datastore.rs | 16 ++++++++++++++-- > src/api2/backup/environment.rs | 2 +- > src/backup/verify.rs | 16 ++++++++++++++-- > src/server/verify_job.rs | 7 ++++++- > 4 files changed, 35 insertions(+), 6 deletions(-) > > diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs > index 6e269ef9..5e4dd3fc 100644 > --- a/src/api2/admin/datastore.rs > +++ b/src/api2/admin/datastore.rs > @@ -45,7 +45,8 @@ use pbs_api_types::{ > BACKUP_TYPE_SCHEMA, CATALOG_NAME, CLIENT_LOG_BLOB_NAME, DATASTORE_SCHEMA, > IGNORE_VERIFIED_BACKUPS_SCHEMA, MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, > PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, > - PRIV_DATASTORE_VERIFY, PRIV_SYS_MODIFY, UPID, UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA, > + PRIV_DATASTORE_VERIFY, PRIV_SYS_MODIFY, READ_THREADS_SCHEMA, UPID, UPID_SCHEMA, > + VERIFICATION_OUTDATED_AFTER_SCHEMA, VERIFY_THREADS_SCHEMA, > }; > use pbs_client::pxar::{create_tar, create_zip}; > use pbs_config::CachedUserInfo; > @@ -675,6 +676,14 @@ pub async fn status( > schema: NS_MAX_DEPTH_SCHEMA, > optional: true, > }, > + "read-threads": { > + schema: READ_THREADS_SCHEMA, > + optional: true, > + }, > + "verify-threads": { > + schema: VERIFY_THREADS_SCHEMA, > + optional: true, > + }, There is one consumer for this api endpoint, which still would be nice to get this parameters as well. That is the cli invocation by `proxmox-backup-manager verify`, defined in `src/bin/proxmox-backup-manager.rs` That should be done as dedicated patch after this one though. > }, > }, > returns: { > @@ -700,6 +709,8 @@ pub fn verify( > ignore_verified: Option, > outdated_after: Option, > max_depth: Option, > + read_threads: Option, > + verify_threads: Option, > rpcenv: &mut dyn RpcEnvironment, > ) -> Result { > let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; > @@ -779,7 +790,8 @@ pub fn verify( > auth_id.to_string(), > to_stdout, > move |worker| { > - let verify_worker = VerifyWorker::new(worker.clone(), datastore)?; > + let verify_worker = > + VerifyWorker::new(worker.clone(), datastore, read_threads, verify_threads)?; > let failed_dirs = if let Some(backup_dir) = backup_dir { > let mut res = Vec::new(); > if !verify_worker.verify_backup_dir( > diff --git a/src/api2/backup/environment.rs b/src/api2/backup/environment.rs > index 0faf6c8e..06696c78 100644 > --- a/src/api2/backup/environment.rs > +++ b/src/api2/backup/environment.rs > @@ -795,7 +795,7 @@ impl BackupEnvironment { > move |worker| { > worker.log_message("Automatically verifying newly added snapshot"); > > - let verify_worker = VerifyWorker::new(worker.clone(), datastore)?; > + let verify_worker = VerifyWorker::new(worker.clone(), datastore, None, None)?; this now uses always the defaults, which is fine IMO. I would however suggest to add a fixme if we would like to extend the series to further allow to set per-datastore read/verify settings, as this would then need to get these values. > if !verify_worker.verify_backup_dir_with_lock( > &backup_dir, > worker.upid().clone(), > diff --git a/src/backup/verify.rs b/src/backup/verify.rs > index 9a20c8e1..8a530159 100644 > --- a/src/backup/verify.rs > +++ b/src/backup/verify.rs > @@ -32,6 +32,8 @@ pub struct VerifyWorker { > verified_chunks: Arc>>, > corrupt_chunks: Arc>>, > backend: DatastoreBackend, > + read_threads: usize, > + verify_threads: usize, > } > > struct IndexVerifyState { > @@ -67,6 +69,8 @@ impl VerifyWorker { > pub fn new( > worker: Arc, > datastore: Arc, > + read_threads: Option, > + verify_threads: Option, > ) -> Result { > let backend = datastore.backend()?; > Ok(Self { > @@ -77,6 +81,8 @@ impl VerifyWorker { > // start with 64 chunks since we assume there are few corrupt ones > corrupt_chunks: Arc::new(Mutex::new(HashSet::with_capacity(64))), > backend, > + read_threads: read_threads.unwrap_or(1), > + verify_threads: verify_threads.unwrap_or(4), > }) > } > > @@ -115,7 +121,7 @@ impl VerifyWorker { > &self.verified_chunks, > )); > > - let decoder_pool = ParallelHandler::new("verify chunk decoder", 4, { > + let decoder_pool = ParallelHandler::new("verify chunk decoder", self.verify_threads, { > let verify_state = Arc::clone(&verify_state); > move |(chunk, digest, size): (DataBlob, [u8; 32], u64)| { > let chunk_crypt_mode = match chunk.crypt_mode() { > @@ -177,7 +183,7 @@ impl VerifyWorker { > .datastore > .get_chunks_in_order(&*index, skip_chunk, check_abort)?; > > - let reader_pool = ParallelHandler::new("read chunks", 4, { > + let reader_pool = ParallelHandler::new("read chunks", self.read_threads, { > let decoder_pool = decoder_pool.channel(); > let verify_state = Arc::clone(&verify_state); > let backend = self.backend.clone(); > @@ -578,6 +584,12 @@ impl VerifyWorker { > let group_count = list.len(); > info!("found {group_count} groups"); > > + log::info!( > + "using {} read and {} verify thread(s)", > + self.read_threads, > + self.verify_threads, > + ); > + > let mut progress = StoreProgress::new(group_count as u64); > > for (pos, group) in list.into_iter().enumerate() { > diff --git a/src/server/verify_job.rs b/src/server/verify_job.rs > index c8792174..e0b03155 100644 > --- a/src/server/verify_job.rs > +++ b/src/server/verify_job.rs > @@ -41,7 +41,12 @@ pub fn do_verification_job( > None => Default::default(), > }; > > - let verify_worker = VerifyWorker::new(worker.clone(), datastore)?; > + let verify_worker = VerifyWorker::new( > + worker.clone(), > + datastore, > + verification_job.read_threads, > + verification_job.verify_threads, > + )?; > let result = verify_worker.verify_all_backups( > worker.upid(), > ns, _______________________________________________ pbs-devel mailing list pbs-devel@lists.proxmox.com https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel