From: Nicolas Frey <n.frey@proxmox.com>
To: pbs-devel@lists.proxmox.com
Subject: [pbs-devel] [PATCH proxmox-backup v2 2/4] api: verify: determine the number of threads to use with {read, verify}-threads
Date: Thu, 6 Nov 2025 17:13:14 +0100 [thread overview]
Message-ID: <20251106161316.528349-6-n.frey@proxmox.com> (raw)
In-Reply-To: <20251106161316.528349-1-n.frey@proxmox.com>
use previously introduced {read,verify}-threads in API, where default
values match the ones of the schema definition.
Signed-off-by: Nicolas Frey <n.frey@proxmox.com>
---
src/api2/admin/datastore.rs | 18 +++++++++++++++---
src/api2/backup/environment.rs | 2 +-
src/backup/verify.rs | 19 ++++++++++++++++---
src/server/verify_job.rs | 7 ++++++-
4 files changed, 38 insertions(+), 8 deletions(-)
diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs
index 6e269ef9..fde4c247 100644
--- a/src/api2/admin/datastore.rs
+++ b/src/api2/admin/datastore.rs
@@ -45,7 +45,8 @@ use pbs_api_types::{
BACKUP_TYPE_SCHEMA, CATALOG_NAME, CLIENT_LOG_BLOB_NAME, DATASTORE_SCHEMA,
IGNORE_VERIFIED_BACKUPS_SCHEMA, MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT,
PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ,
- PRIV_DATASTORE_VERIFY, PRIV_SYS_MODIFY, UPID, UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA,
+ PRIV_DATASTORE_VERIFY, PRIV_SYS_MODIFY, READ_THREADS_SCHEMA, UPID, UPID_SCHEMA,
+ VERIFICATION_OUTDATED_AFTER_SCHEMA, VERIFY_THREADS_SCHEMA,
};
use pbs_client::pxar::{create_tar, create_zip};
use pbs_config::CachedUserInfo;
@@ -675,6 +676,14 @@ pub async fn status(
schema: NS_MAX_DEPTH_SCHEMA,
optional: true,
},
+ "read-threads": {
+ schema: READ_THREADS_SCHEMA,
+ optional: true,
+ },
+ "verify-threads": {
+ schema: VERIFY_THREADS_SCHEMA,
+ optional: true,
+ },
},
},
returns: {
@@ -688,7 +697,7 @@ pub async fn status(
)]
/// Verify backups.
///
-/// This function can verify a single backup snapshot, all backup from a backup group,
+/// This function can verify a single backup snapshot, all backups from a backup group,
/// or all backups in the datastore.
#[allow(clippy::too_many_arguments)]
pub fn verify(
@@ -700,6 +709,8 @@ pub fn verify(
ignore_verified: Option<bool>,
outdated_after: Option<i64>,
max_depth: Option<usize>,
+ read_threads: Option<usize>,
+ verify_threads: Option<usize>,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
@@ -779,7 +790,8 @@ pub fn verify(
auth_id.to_string(),
to_stdout,
move |worker| {
- let verify_worker = VerifyWorker::new(worker.clone(), datastore)?;
+ let verify_worker =
+ VerifyWorker::new(worker.clone(), datastore, read_threads, verify_threads)?;
let failed_dirs = if let Some(backup_dir) = backup_dir {
let mut res = Vec::new();
if !verify_worker.verify_backup_dir(
diff --git a/src/api2/backup/environment.rs b/src/api2/backup/environment.rs
index 0faf6c8e..06696c78 100644
--- a/src/api2/backup/environment.rs
+++ b/src/api2/backup/environment.rs
@@ -795,7 +795,7 @@ impl BackupEnvironment {
move |worker| {
worker.log_message("Automatically verifying newly added snapshot");
- let verify_worker = VerifyWorker::new(worker.clone(), datastore)?;
+ let verify_worker = VerifyWorker::new(worker.clone(), datastore, None, None)?;
if !verify_worker.verify_backup_dir_with_lock(
&backup_dir,
worker.upid().clone(),
diff --git a/src/backup/verify.rs b/src/backup/verify.rs
index 910a3ed5..f3cbe4d6 100644
--- a/src/backup/verify.rs
+++ b/src/backup/verify.rs
@@ -32,6 +32,8 @@ pub struct VerifyWorker {
verified_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
corrupt_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
backend: DatastoreBackend,
+ read_threads: usize,
+ verify_threads: usize,
}
struct IndexVerifyState {
@@ -44,10 +46,13 @@ struct IndexVerifyState {
}
impl VerifyWorker {
- /// Creates a new VerifyWorker for a given task worker and datastore.
+ /// Creates a new VerifyWorker for a given task worker and datastore. \
+ /// Default values for read_threads: 1, verify_threads: 4
pub fn new(
worker: Arc<dyn WorkerTaskContext>,
datastore: Arc<DataStore>,
+ read_threads: Option<usize>,
+ verify_threads: Option<usize>,
) -> Result<Self, Error> {
let backend = datastore.backend()?;
Ok(Self {
@@ -58,6 +63,8 @@ impl VerifyWorker {
// start with 64 chunks since we assume there are few corrupt ones
corrupt_chunks: Arc::new(Mutex::new(HashSet::with_capacity(64))),
backend,
+ read_threads: read_threads.unwrap_or(1),
+ verify_threads: verify_threads.unwrap_or(4),
})
}
@@ -101,7 +108,7 @@ impl VerifyWorker {
verified_chunks: Arc::clone(&self.verified_chunks),
});
- let decoder_pool = ParallelHandler::new("verify chunk decoder", 4, {
+ let decoder_pool = ParallelHandler::new("verify chunk decoder", self.verify_threads, {
let verify_state = Arc::clone(&verify_state);
move |(chunk, digest, size): (DataBlob, [u8; 32], u64)| {
let chunk_crypt_mode = match chunk.crypt_mode() {
@@ -163,7 +170,13 @@ impl VerifyWorker {
.datastore
.get_chunks_in_order(&*index, skip_chunk, check_abort)?;
- let reader_pool = ParallelHandler::new("read chunks", 4, {
+ log::info!(
+ " using {} read and {} verify thread(s)",
+ self.read_threads,
+ self.verify_threads,
+ );
+
+ let reader_pool = ParallelHandler::new("read chunks", self.read_threads, {
let decoder_pool = decoder_pool.channel();
let verify_state = Arc::clone(&verify_state);
let backend = self.backend.clone();
diff --git a/src/server/verify_job.rs b/src/server/verify_job.rs
index c8792174..e0b03155 100644
--- a/src/server/verify_job.rs
+++ b/src/server/verify_job.rs
@@ -41,7 +41,12 @@ pub fn do_verification_job(
None => Default::default(),
};
- let verify_worker = VerifyWorker::new(worker.clone(), datastore)?;
+ let verify_worker = VerifyWorker::new(
+ worker.clone(),
+ datastore,
+ verification_job.read_threads,
+ verification_job.verify_threads,
+ )?;
let result = verify_worker.verify_all_backups(
worker.upid(),
ns,
--
2.47.3
_______________________________________________
pbs-devel mailing list
pbs-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
next prev parent reply other threads:[~2025-11-06 16:12 UTC|newest]
Thread overview: 9+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-11-06 16:13 [pbs-devel] [PATCH proxmox{, -backup} v2 0/7] parallelize chunk reads in verification Nicolas Frey
2025-11-06 16:13 ` [pbs-devel] [PATCH proxmox v2 1/3] pbs-api-types: add schema for {worker, read, verify}-threads Nicolas Frey
2025-11-06 16:13 ` [pbs-devel] [PATCH proxmox v2 2/3] pbs-api-types: jobs: add {read, verify}-threads to VerificationJobConfig Nicolas Frey
2025-11-06 17:44 ` Thomas Lamprecht
2025-11-06 16:13 ` [pbs-devel] [PATCH proxmox v2 3/3] pbs-api-types: use worker-threads schema for TapeBackupJobSetup Nicolas Frey
2025-11-06 16:13 ` [pbs-devel] [PATCH proxmox-backup v2 1/4] api: verify: move chunk loading into parallel handler Nicolas Frey
2025-11-06 16:13 ` Nicolas Frey [this message]
2025-11-06 16:13 ` [pbs-devel] [PATCH proxmox-backup v2 3/4] api: verify: add {read, verify}-threads to update endpoint Nicolas Frey
2025-11-06 16:13 ` [pbs-devel] [PATCH proxmox-backup v2 4/4] ui: verify: add option to set number of threads for job Nicolas Frey
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251106161316.528349-6-n.frey@proxmox.com \
--to=n.frey@proxmox.com \
--cc=pbs-devel@lists.proxmox.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox