public inbox for pbs-devel@lists.proxmox.com
 help / color / mirror / Atom feed
From: Christian Ebner <c.ebner@proxmox.com>
To: pbs-devel@lists.proxmox.com
Subject: [pbs-devel] [RFC proxmox-backup 04/39] verify: refactor verify related functions to be methods of worker
Date: Mon, 19 May 2025 13:46:05 +0200	[thread overview]
Message-ID: <20250519114640.303640-5-c.ebner@proxmox.com> (raw)
In-Reply-To: <20250519114640.303640-1-c.ebner@proxmox.com>

Instead of passing the VerifyWorker state as reference to the various
verification related functions, implement them as methods or
associated functions of the VerifyWorker. This does not only make
their correlation more clear, but it also reduces the number of
function call parameters and improves readability.

No functional changes intended.

Signed-off-by: Christian Ebner <c.ebner@proxmox.com>
---
 src/api2/admin/datastore.rs    |  28 +-
 src/api2/backup/environment.rs |   7 +-
 src/backup/verify.rs           | 830 ++++++++++++++++-----------------
 src/server/verify_job.rs       |  12 +-
 4 files changed, 423 insertions(+), 454 deletions(-)

diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs
index 392494488..7dc881ade 100644
--- a/src/api2/admin/datastore.rs
+++ b/src/api2/admin/datastore.rs
@@ -70,10 +70,7 @@ use proxmox_rest_server::{formatter, WorkerTask};
 
 use crate::api2::backup::optional_ns_param;
 use crate::api2::node::rrd::create_value_from_rrd;
-use crate::backup::{
-    check_ns_privs_full, verify_all_backups, verify_backup_dir, verify_backup_group, verify_filter,
-    ListAccessibleBackupGroups, NS_PRIVS_OK,
-};
+use crate::backup::{check_ns_privs_full, ListAccessibleBackupGroups, VerifyWorker, NS_PRIVS_OK};
 
 use crate::server::jobstate::{compute_schedule_status, Job, JobState};
 
@@ -896,14 +893,15 @@ pub fn verify(
         auth_id.to_string(),
         to_stdout,
         move |worker| {
-            let verify_worker = crate::backup::VerifyWorker::new(worker.clone(), datastore);
+            let verify_worker = VerifyWorker::new(worker.clone(), datastore);
             let failed_dirs = if let Some(backup_dir) = backup_dir {
                 let mut res = Vec::new();
-                if !verify_backup_dir(
-                    &verify_worker,
+                if !verify_worker.verify_backup_dir(
                     &backup_dir,
                     worker.upid().clone(),
-                    Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
+                    Some(&move |manifest| {
+                        VerifyWorker::verify_filter(ignore_verified, outdated_after, manifest)
+                    }),
                 )? {
                     res.push(print_ns_and_snapshot(
                         backup_dir.backup_ns(),
@@ -912,12 +910,13 @@ pub fn verify(
                 }
                 res
             } else if let Some(backup_group) = backup_group {
-                verify_backup_group(
-                    &verify_worker,
+                verify_worker.verify_backup_group(
                     &backup_group,
                     &mut StoreProgress::new(1),
                     worker.upid(),
-                    Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
+                    Some(&move |manifest| {
+                        VerifyWorker::verify_filter(ignore_verified, outdated_after, manifest)
+                    }),
                 )?
             } else {
                 let owner = if owner_check_required {
@@ -926,13 +925,14 @@ pub fn verify(
                     None
                 };
 
-                verify_all_backups(
-                    &verify_worker,
+                verify_worker.verify_all_backups(
                     worker.upid(),
                     ns,
                     max_depth,
                     owner,
-                    Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
+                    Some(&move |manifest| {
+                        VerifyWorker::verify_filter(ignore_verified, outdated_after, manifest)
+                    }),
                 )?
             };
             if !failed_dirs.is_empty() {
diff --git a/src/api2/backup/environment.rs b/src/api2/backup/environment.rs
index 3d541b461..6cd29f512 100644
--- a/src/api2/backup/environment.rs
+++ b/src/api2/backup/environment.rs
@@ -18,7 +18,7 @@ use pbs_datastore::fixed_index::FixedIndexWriter;
 use pbs_datastore::{DataBlob, DataStore};
 use proxmox_rest_server::{formatter::*, WorkerTask};
 
-use crate::backup::verify_backup_dir_with_lock;
+use crate::backup::VerifyWorker;
 
 use hyper::{Body, Response};
 
@@ -671,9 +671,8 @@ impl BackupEnvironment {
             move |worker| {
                 worker.log_message("Automatically verifying newly added snapshot");
 
-                let verify_worker = crate::backup::VerifyWorker::new(worker.clone(), datastore);
-                if !verify_backup_dir_with_lock(
-                    &verify_worker,
+                let verify_worker = VerifyWorker::new(worker.clone(), datastore);
+                if !verify_worker.verify_backup_dir_with_lock(
                     &backup_dir,
                     worker.upid().clone(),
                     None,
diff --git a/src/backup/verify.rs b/src/backup/verify.rs
index 3d2cba8ac..0b954ae23 100644
--- a/src/backup/verify.rs
+++ b/src/backup/verify.rs
@@ -44,517 +44,491 @@ impl VerifyWorker {
             corrupt_chunks: Arc::new(Mutex::new(HashSet::with_capacity(64))),
         }
     }
-}
-
-fn verify_blob(backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> {
-    let blob = backup_dir.load_blob(&info.filename)?;
 
-    let raw_size = blob.raw_size();
-    if raw_size != info.size {
-        bail!("wrong size ({} != {})", info.size, raw_size);
-    }
-
-    let csum = openssl::sha::sha256(blob.raw_data());
-    if csum != info.csum {
-        bail!("wrong index checksum");
-    }
+    fn verify_blob(backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> {
+        let blob = backup_dir.load_blob(&info.filename)?;
 
-    match blob.crypt_mode()? {
-        CryptMode::Encrypt => Ok(()),
-        CryptMode::None => {
-            // digest already verified above
-            blob.decode(None, None)?;
-            Ok(())
+        let raw_size = blob.raw_size();
+        if raw_size != info.size {
+            bail!("wrong size ({} != {})", info.size, raw_size);
         }
-        CryptMode::SignOnly => bail!("Invalid CryptMode for blob"),
-    }
-}
-
-fn rename_corrupted_chunk(datastore: Arc<DataStore>, digest: &[u8; 32]) {
-    let (path, digest_str) = datastore.chunk_path(digest);
 
-    let mut counter = 0;
-    let mut new_path = path.clone();
-    loop {
-        new_path.set_file_name(format!("{}.{}.bad", digest_str, counter));
-        if new_path.exists() && counter < 9 {
-            counter += 1;
-        } else {
-            break;
+        let csum = openssl::sha::sha256(blob.raw_data());
+        if csum != info.csum {
+            bail!("wrong index checksum");
         }
-    }
 
-    match std::fs::rename(&path, &new_path) {
-        Ok(_) => {
-            info!("corrupted chunk renamed to {:?}", &new_path);
-        }
-        Err(err) => {
-            match err.kind() {
-                std::io::ErrorKind::NotFound => { /* ignored */ }
-                _ => info!("could not rename corrupted chunk {:?} - {err}", &path),
+        match blob.crypt_mode()? {
+            CryptMode::Encrypt => Ok(()),
+            CryptMode::None => {
+                // digest already verified above
+                blob.decode(None, None)?;
+                Ok(())
             }
+            CryptMode::SignOnly => bail!("Invalid CryptMode for blob"),
         }
-    };
-}
+    }
 
-fn verify_index_chunks(
-    verify_worker: &VerifyWorker,
-    index: Box<dyn IndexFile + Send>,
-    crypt_mode: CryptMode,
-) -> Result<(), Error> {
-    let errors = Arc::new(AtomicUsize::new(0));
+    fn rename_corrupted_chunk(datastore: Arc<DataStore>, digest: &[u8; 32]) {
+        let (path, digest_str) = datastore.chunk_path(digest);
 
-    let start_time = Instant::now();
+        let mut counter = 0;
+        let mut new_path = path.clone();
+        loop {
+            new_path.set_file_name(format!("{}.{}.bad", digest_str, counter));
+            if new_path.exists() && counter < 9 {
+                counter += 1;
+            } else {
+                break;
+            }
+        }
 
-    let mut read_bytes = 0;
-    let mut decoded_bytes = 0;
+        match std::fs::rename(&path, &new_path) {
+            Ok(_) => {
+                info!("corrupted chunk renamed to {:?}", &new_path);
+            }
+            Err(err) => {
+                match err.kind() {
+                    std::io::ErrorKind::NotFound => { /* ignored */ }
+                    _ => info!("could not rename corrupted chunk {:?} - {err}", &path),
+                }
+            }
+        };
+    }
 
-    let datastore2 = Arc::clone(&verify_worker.datastore);
-    let corrupt_chunks2 = Arc::clone(&verify_worker.corrupt_chunks);
-    let verified_chunks2 = Arc::clone(&verify_worker.verified_chunks);
-    let errors2 = Arc::clone(&errors);
+    fn verify_index_chunks(
+        &self,
+        index: Box<dyn IndexFile + Send>,
+        crypt_mode: CryptMode,
+    ) -> Result<(), Error> {
+        let errors = Arc::new(AtomicUsize::new(0));
+
+        let start_time = Instant::now();
+
+        let mut read_bytes = 0;
+        let mut decoded_bytes = 0;
+
+        let datastore2 = Arc::clone(&self.datastore);
+        let corrupt_chunks2 = Arc::clone(&self.corrupt_chunks);
+        let verified_chunks2 = Arc::clone(&self.verified_chunks);
+        let errors2 = Arc::clone(&errors);
+
+        let decoder_pool = ParallelHandler::new(
+            "verify chunk decoder",
+            4,
+            move |(chunk, digest, size): (DataBlob, [u8; 32], u64)| {
+                let chunk_crypt_mode = match chunk.crypt_mode() {
+                    Err(err) => {
+                        corrupt_chunks2.lock().unwrap().insert(digest);
+                        info!("can't verify chunk, unknown CryptMode - {err}");
+                        errors2.fetch_add(1, Ordering::SeqCst);
+                        return Ok(());
+                    }
+                    Ok(mode) => mode,
+                };
+
+                if chunk_crypt_mode != crypt_mode {
+                    info!(
+                    "chunk CryptMode {chunk_crypt_mode:?} does not match index CryptMode {crypt_mode:?}"
+                );
+                    errors2.fetch_add(1, Ordering::SeqCst);
+                }
 
-    let decoder_pool = ParallelHandler::new(
-        "verify chunk decoder",
-        4,
-        move |(chunk, digest, size): (DataBlob, [u8; 32], u64)| {
-            let chunk_crypt_mode = match chunk.crypt_mode() {
-                Err(err) => {
+                if let Err(err) = chunk.verify_unencrypted(size as usize, &digest) {
                     corrupt_chunks2.lock().unwrap().insert(digest);
-                    info!("can't verify chunk, unknown CryptMode - {err}");
+                    info!("{err}");
                     errors2.fetch_add(1, Ordering::SeqCst);
-                    return Ok(());
+                    Self::rename_corrupted_chunk(datastore2.clone(), &digest);
+                } else {
+                    verified_chunks2.lock().unwrap().insert(digest);
                 }
-                Ok(mode) => mode,
-            };
 
-            if chunk_crypt_mode != crypt_mode {
-                info!(
-                    "chunk CryptMode {chunk_crypt_mode:?} does not match index CryptMode {crypt_mode:?}"
-                );
-                errors2.fetch_add(1, Ordering::SeqCst);
-            }
+                Ok(())
+            },
+        );
 
-            if let Err(err) = chunk.verify_unencrypted(size as usize, &digest) {
-                corrupt_chunks2.lock().unwrap().insert(digest);
-                info!("{err}");
-                errors2.fetch_add(1, Ordering::SeqCst);
-                rename_corrupted_chunk(datastore2.clone(), &digest);
+        let skip_chunk = |digest: &[u8; 32]| -> bool {
+            if self.verified_chunks.lock().unwrap().contains(digest) {
+                true
+            } else if self.corrupt_chunks.lock().unwrap().contains(digest) {
+                let digest_str = hex::encode(digest);
+                info!("chunk {digest_str} was marked as corrupt");
+                errors.fetch_add(1, Ordering::SeqCst);
+                true
             } else {
-                verified_chunks2.lock().unwrap().insert(digest);
+                false
             }
+        };
 
+        let check_abort = |pos: usize| -> Result<(), Error> {
+            if pos & 1023 == 0 {
+                self.worker.check_abort()?;
+                self.worker.fail_on_shutdown()?;
+            }
             Ok(())
-        },
-    );
-
-    let skip_chunk = |digest: &[u8; 32]| -> bool {
-        if verify_worker
-            .verified_chunks
-            .lock()
-            .unwrap()
-            .contains(digest)
-        {
-            true
-        } else if verify_worker
-            .corrupt_chunks
-            .lock()
-            .unwrap()
-            .contains(digest)
-        {
-            let digest_str = hex::encode(digest);
-            info!("chunk {digest_str} was marked as corrupt");
-            errors.fetch_add(1, Ordering::SeqCst);
-            true
-        } else {
-            false
-        }
-    };
-
-    let check_abort = |pos: usize| -> Result<(), Error> {
-        if pos & 1023 == 0 {
-            verify_worker.worker.check_abort()?;
-            verify_worker.worker.fail_on_shutdown()?;
-        }
-        Ok(())
-    };
+        };
 
-    let chunk_list =
-        verify_worker
+        let chunk_list = self
             .datastore
             .get_chunks_in_order(&*index, skip_chunk, check_abort)?;
 
-    for (pos, _) in chunk_list {
-        verify_worker.worker.check_abort()?;
-        verify_worker.worker.fail_on_shutdown()?;
+        for (pos, _) in chunk_list {
+            self.worker.check_abort()?;
+            self.worker.fail_on_shutdown()?;
 
-        let info = index.chunk_info(pos).unwrap();
+            let info = index.chunk_info(pos).unwrap();
 
-        // we must always recheck this here, the parallel worker below alter it!
-        if skip_chunk(&info.digest) {
-            continue; // already verified or marked corrupt
-        }
-
-        match verify_worker.datastore.load_chunk(&info.digest) {
-            Err(err) => {
-                verify_worker
-                    .corrupt_chunks
-                    .lock()
-                    .unwrap()
-                    .insert(info.digest);
-                error!("can't verify chunk, load failed - {err}");
-                errors.fetch_add(1, Ordering::SeqCst);
-                rename_corrupted_chunk(verify_worker.datastore.clone(), &info.digest);
+            // we must always recheck this here, the parallel worker below alter it!
+            if skip_chunk(&info.digest) {
+                continue; // already verified or marked corrupt
             }
-            Ok(chunk) => {
-                let size = info.size();
-                read_bytes += chunk.raw_size();
-                decoder_pool.send((chunk, info.digest, size))?;
-                decoded_bytes += size;
+
+            match self.datastore.load_chunk(&info.digest) {
+                Err(err) => {
+                    self.corrupt_chunks.lock().unwrap().insert(info.digest);
+                    error!("can't verify chunk, load failed - {err}");
+                    errors.fetch_add(1, Ordering::SeqCst);
+                    Self::rename_corrupted_chunk(self.datastore.clone(), &info.digest);
+                }
+                Ok(chunk) => {
+                    let size = info.size();
+                    read_bytes += chunk.raw_size();
+                    decoder_pool.send((chunk, info.digest, size))?;
+                    decoded_bytes += size;
+                }
             }
         }
-    }
 
-    decoder_pool.complete()?;
+        decoder_pool.complete()?;
+
+        let elapsed = start_time.elapsed().as_secs_f64();
 
-    let elapsed = start_time.elapsed().as_secs_f64();
+        let read_bytes_mib = (read_bytes as f64) / (1024.0 * 1024.0);
+        let decoded_bytes_mib = (decoded_bytes as f64) / (1024.0 * 1024.0);
 
-    let read_bytes_mib = (read_bytes as f64) / (1024.0 * 1024.0);
-    let decoded_bytes_mib = (decoded_bytes as f64) / (1024.0 * 1024.0);
+        let read_speed = read_bytes_mib / elapsed;
+        let decode_speed = decoded_bytes_mib / elapsed;
 
-    let read_speed = read_bytes_mib / elapsed;
-    let decode_speed = decoded_bytes_mib / elapsed;
+        let error_count = errors.load(Ordering::SeqCst);
 
-    let error_count = errors.load(Ordering::SeqCst);
+        info!(
+            "  verified {read_bytes_mib:.2}/{decoded_bytes_mib:.2} MiB in {elapsed:.2} seconds, speed {read_speed:.2}/{decode_speed:.2} MiB/s ({error_count} errors)"
+        );
 
-    info!(
-        "  verified {read_bytes_mib:.2}/{decoded_bytes_mib:.2} MiB in {elapsed:.2} seconds, speed {read_speed:.2}/{decode_speed:.2} MiB/s ({error_count} errors)"
-    );
+        if errors.load(Ordering::SeqCst) > 0 {
+            bail!("chunks could not be verified");
+        }
 
-    if errors.load(Ordering::SeqCst) > 0 {
-        bail!("chunks could not be verified");
+        Ok(())
     }
 
-    Ok(())
-}
+    fn verify_fixed_index(&self, backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> {
+        let mut path = backup_dir.relative_path();
+        path.push(&info.filename);
 
-fn verify_fixed_index(
-    verify_worker: &VerifyWorker,
-    backup_dir: &BackupDir,
-    info: &FileInfo,
-) -> Result<(), Error> {
-    let mut path = backup_dir.relative_path();
-    path.push(&info.filename);
+        let index = self.datastore.open_fixed_reader(&path)?;
 
-    let index = verify_worker.datastore.open_fixed_reader(&path)?;
+        let (csum, size) = index.compute_csum();
+        if size != info.size {
+            bail!("wrong size ({} != {})", info.size, size);
+        }
 
-    let (csum, size) = index.compute_csum();
-    if size != info.size {
-        bail!("wrong size ({} != {})", info.size, size);
-    }
+        if csum != info.csum {
+            bail!("wrong index checksum");
+        }
 
-    if csum != info.csum {
-        bail!("wrong index checksum");
+        self.verify_index_chunks(Box::new(index), info.chunk_crypt_mode())
     }
 
-    verify_index_chunks(verify_worker, Box::new(index), info.chunk_crypt_mode())
-}
-
-fn verify_dynamic_index(
-    verify_worker: &VerifyWorker,
-    backup_dir: &BackupDir,
-    info: &FileInfo,
-) -> Result<(), Error> {
-    let mut path = backup_dir.relative_path();
-    path.push(&info.filename);
+    fn verify_dynamic_index(&self, backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> {
+        let mut path = backup_dir.relative_path();
+        path.push(&info.filename);
 
-    let index = verify_worker.datastore.open_dynamic_reader(&path)?;
+        let index = self.datastore.open_dynamic_reader(&path)?;
 
-    let (csum, size) = index.compute_csum();
-    if size != info.size {
-        bail!("wrong size ({} != {})", info.size, size);
-    }
-
-    if csum != info.csum {
-        bail!("wrong index checksum");
-    }
+        let (csum, size) = index.compute_csum();
+        if size != info.size {
+            bail!("wrong size ({} != {})", info.size, size);
+        }
 
-    verify_index_chunks(verify_worker, Box::new(index), info.chunk_crypt_mode())
-}
+        if csum != info.csum {
+            bail!("wrong index checksum");
+        }
 
-/// Verify a single backup snapshot
-///
-/// This checks all archives inside a backup snapshot.
-/// Errors are logged to the worker log.
-///
-/// Returns
-/// - Ok(true) if verify is successful
-/// - Ok(false) if there were verification errors
-/// - Err(_) if task was aborted
-pub fn verify_backup_dir(
-    verify_worker: &VerifyWorker,
-    backup_dir: &BackupDir,
-    upid: UPID,
-    filter: Option<&dyn Fn(&BackupManifest) -> bool>,
-) -> Result<bool, Error> {
-    if !backup_dir.full_path().exists() {
-        info!(
-            "SKIPPED: verify {}:{} - snapshot does not exist (anymore).",
-            verify_worker.datastore.name(),
-            backup_dir.dir(),
-        );
-        return Ok(true);
+        self.verify_index_chunks(Box::new(index), info.chunk_crypt_mode())
     }
 
-    let snap_lock = backup_dir.lock_shared();
-
-    match snap_lock {
-        Ok(snap_lock) => {
-            verify_backup_dir_with_lock(verify_worker, backup_dir, upid, filter, snap_lock)
-        }
-        Err(err) => {
+    /// Verify a single backup snapshot
+    ///
+    /// This checks all archives inside a backup snapshot.
+    /// Errors are logged to the worker log.
+    ///
+    /// Returns
+    /// - Ok(true) if verify is successful
+    /// - Ok(false) if there were verification errors
+    /// - Err(_) if task was aborted
+    pub fn verify_backup_dir(
+        &self,
+        backup_dir: &BackupDir,
+        upid: UPID,
+        filter: Option<&dyn Fn(&BackupManifest) -> bool>,
+    ) -> Result<bool, Error> {
+        if !backup_dir.full_path().exists() {
             info!(
-                "SKIPPED: verify {}:{} - could not acquire snapshot lock: {}",
-                verify_worker.datastore.name(),
+                "SKIPPED: verify {}:{} - snapshot does not exist (anymore).",
+                self.datastore.name(),
                 backup_dir.dir(),
-                err,
             );
-            Ok(true)
+            return Ok(true);
         }
-    }
-}
 
-/// See verify_backup_dir
-pub fn verify_backup_dir_with_lock(
-    verify_worker: &VerifyWorker,
-    backup_dir: &BackupDir,
-    upid: UPID,
-    filter: Option<&dyn Fn(&BackupManifest) -> bool>,
-    _snap_lock: BackupLockGuard,
-) -> Result<bool, Error> {
-    let datastore_name = verify_worker.datastore.name();
-    let backup_dir_name = backup_dir.dir();
-
-    let manifest = match backup_dir.load_manifest() {
-        Ok((manifest, _)) => manifest,
-        Err(err) => {
-            info!("verify {datastore_name}:{backup_dir_name} - manifest load error: {err}");
-            return Ok(false);
-        }
-    };
+        let snap_lock = backup_dir.lock_shared();
 
-    if let Some(filter) = filter {
-        if !filter(&manifest) {
-            info!("SKIPPED: verify {datastore_name}:{backup_dir_name} (recently verified)");
-            return Ok(true);
+        match snap_lock {
+            Ok(snap_lock) => self.verify_backup_dir_with_lock(backup_dir, upid, filter, snap_lock),
+            Err(err) => {
+                info!(
+                    "SKIPPED: verify {}:{} - could not acquire snapshot lock: {}",
+                    self.datastore.name(),
+                    backup_dir.dir(),
+                    err,
+                );
+                Ok(true)
+            }
         }
     }
 
-    info!("verify {datastore_name}:{backup_dir_name}");
-
-    let mut error_count = 0;
+    /// See verify_backup_dir
+    pub fn verify_backup_dir_with_lock(
+        &self,
+        backup_dir: &BackupDir,
+        upid: UPID,
+        filter: Option<&dyn Fn(&BackupManifest) -> bool>,
+        _snap_lock: BackupLockGuard,
+    ) -> Result<bool, Error> {
+        let datastore_name = self.datastore.name();
+        let backup_dir_name = backup_dir.dir();
+
+        let manifest = match backup_dir.load_manifest() {
+            Ok((manifest, _)) => manifest,
+            Err(err) => {
+                info!("verify {datastore_name}:{backup_dir_name} - manifest load error: {err}");
+                return Ok(false);
+            }
+        };
 
-    let mut verify_result = VerifyState::Ok;
-    for info in manifest.files() {
-        let result = proxmox_lang::try_block!({
-            info!("  check {}", info.filename);
-            match ArchiveType::from_path(&info.filename)? {
-                ArchiveType::FixedIndex => verify_fixed_index(verify_worker, backup_dir, info),
-                ArchiveType::DynamicIndex => verify_dynamic_index(verify_worker, backup_dir, info),
-                ArchiveType::Blob => verify_blob(backup_dir, info),
+        if let Some(filter) = filter {
+            if !filter(&manifest) {
+                info!("SKIPPED: verify {datastore_name}:{backup_dir_name} (recently verified)");
+                return Ok(true);
             }
-        });
+        }
 
-        verify_worker.worker.check_abort()?;
-        verify_worker.worker.fail_on_shutdown()?;
+        info!("verify {datastore_name}:{backup_dir_name}");
 
-        if let Err(err) = result {
-            info!(
-                "verify {datastore_name}:{backup_dir_name}/{file_name} failed: {err}",
-                file_name = info.filename,
-            );
-            error_count += 1;
-            verify_result = VerifyState::Failed;
-        }
-    }
+        let mut error_count = 0;
 
-    let verify_state = SnapshotVerifyState {
-        state: verify_result,
-        upid,
-    };
-
-    if let Err(err) = {
-        let verify_state = serde_json::to_value(verify_state)?;
-        backup_dir.update_manifest(|manifest| {
-            manifest.unprotected["verify_state"] = verify_state;
-        })
-    } {
-        info!("verify {datastore_name}:{backup_dir_name} - manifest update error: {err}");
-        return Ok(false);
-    }
+        let mut verify_result = VerifyState::Ok;
+        for info in manifest.files() {
+            let result = proxmox_lang::try_block!({
+                info!("  check {}", info.filename);
+                match ArchiveType::from_path(&info.filename)? {
+                    ArchiveType::FixedIndex => self.verify_fixed_index(backup_dir, info),
+                    ArchiveType::DynamicIndex => self.verify_dynamic_index(backup_dir, info),
+                    ArchiveType::Blob => Self::verify_blob(backup_dir, info),
+                }
+            });
 
-    Ok(error_count == 0)
-}
+            self.worker.check_abort()?;
+            self.worker.fail_on_shutdown()?;
 
-/// Verify all backups inside a backup group
-///
-/// Errors are logged to the worker log.
-///
-/// Returns
-/// - Ok((count, failed_dirs)) where failed_dirs had verification errors
-/// - Err(_) if task was aborted
-pub fn verify_backup_group(
-    verify_worker: &VerifyWorker,
-    group: &BackupGroup,
-    progress: &mut StoreProgress,
-    upid: &UPID,
-    filter: Option<&dyn Fn(&BackupManifest) -> bool>,
-) -> Result<Vec<String>, Error> {
-    let mut errors = Vec::new();
-    let mut list = match group.list_backups() {
-        Ok(list) => list,
-        Err(err) => {
-            info!(
-                "verify {}, group {} - unable to list backups: {}",
-                print_store_and_ns(verify_worker.datastore.name(), group.backup_ns()),
-                group.group(),
-                err,
-            );
-            return Ok(errors);
-        }
-    };
-
-    let snapshot_count = list.len();
-    info!(
-        "verify group {}:{} ({} snapshots)",
-        verify_worker.datastore.name(),
-        group.group(),
-        snapshot_count
-    );
-
-    progress.group_snapshots = snapshot_count as u64;
-
-    BackupInfo::sort_list(&mut list, false); // newest first
-    for (pos, info) in list.into_iter().enumerate() {
-        if !verify_backup_dir(verify_worker, &info.backup_dir, upid.clone(), filter)? {
-            errors.push(print_ns_and_snapshot(
-                info.backup_dir.backup_ns(),
-                info.backup_dir.as_ref(),
-            ));
+            if let Err(err) = result {
+                info!(
+                    "verify {datastore_name}:{backup_dir_name}/{file_name} failed: {err}",
+                    file_name = info.filename,
+                );
+                error_count += 1;
+                verify_result = VerifyState::Failed;
+            }
         }
-        progress.done_snapshots = pos as u64 + 1;
-        info!("percentage done: {progress}");
-    }
-    Ok(errors)
-}
 
-/// Verify all (owned) backups inside a datastore
-///
-/// Errors are logged to the worker log.
-///
-/// Returns
-/// - Ok(failed_dirs) where failed_dirs had verification errors
-/// - Err(_) if task was aborted
-pub fn verify_all_backups(
-    verify_worker: &VerifyWorker,
-    upid: &UPID,
-    ns: BackupNamespace,
-    max_depth: Option<usize>,
-    owner: Option<&Authid>,
-    filter: Option<&dyn Fn(&BackupManifest) -> bool>,
-) -> Result<Vec<String>, Error> {
-    let mut errors = Vec::new();
-
-    info!("verify datastore {}", verify_worker.datastore.name());
-
-    let owner_filtered = if let Some(owner) = &owner {
-        info!("limiting to backups owned by {owner}");
-        true
-    } else {
-        false
-    };
-
-    // FIXME: This should probably simply enable recursion (or the call have a recursion parameter)
-    let store = &verify_worker.datastore;
-    let max_depth = max_depth.unwrap_or(pbs_api_types::MAX_NAMESPACE_DEPTH);
-
-    let mut list = match ListAccessibleBackupGroups::new_with_privs(
-        store,
-        ns.clone(),
-        max_depth,
-        Some(PRIV_DATASTORE_VERIFY),
-        Some(PRIV_DATASTORE_BACKUP),
-        owner,
-    ) {
-        Ok(list) => list
-            .filter_map(|group| match group {
-                Ok(group) => Some(group),
-                Err(err) if owner_filtered => {
-                    // intentionally not in task log, the user might not see this group!
-                    println!("error on iterating groups in ns '{ns}' - {err}");
-                    None
-                }
-                Err(err) => {
-                    // we don't filter by owner, but we want to log the error
-                    info!("error on iterating groups in ns '{ns}' - {err}");
-                    errors.push(err.to_string());
-                    None
-                }
-            })
-            .filter(|group| {
-                !(group.backup_type() == BackupType::Host && group.backup_id() == "benchmark")
+        let verify_state = SnapshotVerifyState {
+            state: verify_result,
+            upid,
+        };
+
+        if let Err(err) = {
+            let verify_state = serde_json::to_value(verify_state)?;
+            backup_dir.update_manifest(|manifest| {
+                manifest.unprotected["verify_state"] = verify_state;
             })
-            .collect::<Vec<BackupGroup>>(),
-        Err(err) => {
-            info!("unable to list backups: {err}");
-            return Ok(errors);
+        } {
+            info!("verify {datastore_name}:{backup_dir_name} - manifest update error: {err}");
+            return Ok(false);
         }
-    };
 
-    list.sort_unstable_by(|a, b| a.group().cmp(b.group()));
+        Ok(error_count == 0)
+    }
 
-    let group_count = list.len();
-    info!("found {group_count} groups");
+    /// Verify all backups inside a backup group
+    ///
+    /// Errors are logged to the worker log.
+    ///
+    /// Returns
+    /// - Ok((count, failed_dirs)) where failed_dirs had verification errors
+    /// - Err(_) if task was aborted
+    pub fn verify_backup_group(
+        &self,
+        group: &BackupGroup,
+        progress: &mut StoreProgress,
+        upid: &UPID,
+        filter: Option<&dyn Fn(&BackupManifest) -> bool>,
+    ) -> Result<Vec<String>, Error> {
+        let mut errors = Vec::new();
+        let mut list = match group.list_backups() {
+            Ok(list) => list,
+            Err(err) => {
+                info!(
+                    "verify {}, group {} - unable to list backups: {}",
+                    print_store_and_ns(self.datastore.name(), group.backup_ns()),
+                    group.group(),
+                    err,
+                );
+                return Ok(errors);
+            }
+        };
 
-    let mut progress = StoreProgress::new(group_count as u64);
+        let snapshot_count = list.len();
+        info!(
+            "verify group {}:{} ({} snapshots)",
+            self.datastore.name(),
+            group.group(),
+            snapshot_count
+        );
 
-    for (pos, group) in list.into_iter().enumerate() {
-        progress.done_groups = pos as u64;
-        progress.done_snapshots = 0;
-        progress.group_snapshots = 0;
+        progress.group_snapshots = snapshot_count as u64;
 
-        let mut group_errors =
-            verify_backup_group(verify_worker, &group, &mut progress, upid, filter)?;
-        errors.append(&mut group_errors);
+        BackupInfo::sort_list(&mut list, false); // newest first
+        for (pos, info) in list.into_iter().enumerate() {
+            if !self.verify_backup_dir(&info.backup_dir, upid.clone(), filter)? {
+                errors.push(print_ns_and_snapshot(
+                    info.backup_dir.backup_ns(),
+                    info.backup_dir.as_ref(),
+                ));
+            }
+            progress.done_snapshots = pos as u64 + 1;
+            info!("percentage done: {progress}");
+        }
+        Ok(errors)
     }
 
-    Ok(errors)
-}
+    /// Verify all (owned) backups inside a datastore
+    ///
+    /// Errors are logged to the worker log.
+    ///
+    /// Returns
+    /// - Ok(failed_dirs) where failed_dirs had verification errors
+    /// - Err(_) if task was aborted
+    pub fn verify_all_backups(
+        &self,
+        upid: &UPID,
+        ns: BackupNamespace,
+        max_depth: Option<usize>,
+        owner: Option<&Authid>,
+        filter: Option<&dyn Fn(&BackupManifest) -> bool>,
+    ) -> Result<Vec<String>, Error> {
+        let mut errors = Vec::new();
+
+        info!("verify datastore {}", self.datastore.name());
+
+        let owner_filtered = if let Some(owner) = &owner {
+            info!("limiting to backups owned by {owner}");
+            true
+        } else {
+            false
+        };
+
+        // FIXME: This should probably simply enable recursion (or the call have a recursion parameter)
+        let store = &self.datastore;
+        let max_depth = max_depth.unwrap_or(pbs_api_types::MAX_NAMESPACE_DEPTH);
+
+        let mut list = match ListAccessibleBackupGroups::new_with_privs(
+            store,
+            ns.clone(),
+            max_depth,
+            Some(PRIV_DATASTORE_VERIFY),
+            Some(PRIV_DATASTORE_BACKUP),
+            owner,
+        ) {
+            Ok(list) => list
+                .filter_map(|group| match group {
+                    Ok(group) => Some(group),
+                    Err(err) if owner_filtered => {
+                        // intentionally not in task log, the user might not see this group!
+                        println!("error on iterating groups in ns '{ns}' - {err}");
+                        None
+                    }
+                    Err(err) => {
+                        // we don't filter by owner, but we want to log the error
+                        info!("error on iterating groups in ns '{ns}' - {err}");
+                        errors.push(err.to_string());
+                        None
+                    }
+                })
+                .filter(|group| {
+                    !(group.backup_type() == BackupType::Host && group.backup_id() == "benchmark")
+                })
+                .collect::<Vec<BackupGroup>>(),
+            Err(err) => {
+                info!("unable to list backups: {err}");
+                return Ok(errors);
+            }
+        };
+
+        list.sort_unstable_by(|a, b| a.group().cmp(b.group()));
+
+        let group_count = list.len();
+        info!("found {group_count} groups");
 
-/// Filter out any snapshot from being (re-)verified where this fn returns false.
-pub fn verify_filter(
-    ignore_verified_snapshots: bool,
-    outdated_after: Option<i64>,
-    manifest: &BackupManifest,
-) -> bool {
-    if !ignore_verified_snapshots {
-        return true;
+        let mut progress = StoreProgress::new(group_count as u64);
+
+        for (pos, group) in list.into_iter().enumerate() {
+            progress.done_groups = pos as u64;
+            progress.done_snapshots = 0;
+            progress.group_snapshots = 0;
+
+            let mut group_errors = self.verify_backup_group(&group, &mut progress, upid, filter)?;
+            errors.append(&mut group_errors);
+        }
+
+        Ok(errors)
     }
 
-    match manifest.verify_state() {
-        Err(err) => {
-            warn!("error reading manifest: {err:#}");
-            true
+    /// Filter out any snapshot from being (re-)verified where this fn returns false.
+    pub fn verify_filter(
+        ignore_verified_snapshots: bool,
+        outdated_after: Option<i64>,
+        manifest: &BackupManifest,
+    ) -> bool {
+        if !ignore_verified_snapshots {
+            return true;
         }
-        Ok(None) => true, // no last verification, always include
-        Ok(Some(last_verify)) => {
-            match outdated_after {
-                None => false, // never re-verify if ignored and no max age
-                Some(max_age) => {
-                    let now = proxmox_time::epoch_i64();
-                    let days_since_last_verify = (now - last_verify.upid.starttime) / 86400;
-
-                    days_since_last_verify > max_age
+
+        match manifest.verify_state() {
+            Err(err) => {
+                warn!("error reading manifest: {err:#}");
+                true
+            }
+            Ok(None) => true, // no last verification, always include
+            Ok(Some(last_verify)) => {
+                match outdated_after {
+                    None => false, // never re-verify if ignored and no max age
+                    Some(max_age) => {
+                        let now = proxmox_time::epoch_i64();
+                        let days_since_last_verify = (now - last_verify.upid.starttime) / 86400;
+
+                        days_since_last_verify > max_age
+                    }
                 }
             }
         }
diff --git a/src/server/verify_job.rs b/src/server/verify_job.rs
index a15a257da..95a7b2a9b 100644
--- a/src/server/verify_job.rs
+++ b/src/server/verify_job.rs
@@ -5,10 +5,7 @@ use pbs_api_types::{Authid, Operation, VerificationJobConfig};
 use pbs_datastore::DataStore;
 use proxmox_rest_server::WorkerTask;
 
-use crate::{
-    backup::{verify_all_backups, verify_filter},
-    server::jobstate::Job,
-};
+use crate::{backup::VerifyWorker, server::jobstate::Job};
 
 /// Runs a verification job.
 pub fn do_verification_job(
@@ -44,15 +41,14 @@ pub fn do_verification_job(
                 None => Default::default(),
             };
 
-            let verify_worker = crate::backup::VerifyWorker::new(worker.clone(), datastore);
-            let result = verify_all_backups(
-                &verify_worker,
+            let verify_worker = VerifyWorker::new(worker.clone(), datastore);
+            let result = verify_worker.verify_all_backups(
                 worker.upid(),
                 ns,
                 verification_job.max_depth,
                 None,
                 Some(&move |manifest| {
-                    verify_filter(ignore_verified_snapshots, outdated_after, manifest)
+                    VerifyWorker::verify_filter(ignore_verified_snapshots, outdated_after, manifest)
                 }),
             );
             let job_result = match result {
-- 
2.39.5



_______________________________________________
pbs-devel mailing list
pbs-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel


  parent reply	other threads:[~2025-05-19 11:48 UTC|newest]

Thread overview: 41+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-05-19 11:46 [pbs-devel] [RFC proxmox proxmox-backup 00/39] S3 storage backend for datastores Christian Ebner
2025-05-19 11:46 ` [pbs-devel] [RFC proxmox 1/2] pbs-api-types: add types for S3 client configs and secrets Christian Ebner
2025-05-19 11:46 ` [pbs-devel] [RFC proxmox 2/2] pbs-api-types: extend datastore config by backend config enum Christian Ebner
2025-05-19 11:46 ` [pbs-devel] [RFC proxmox-backup 03/39] fmt: fix minor formatting issues Christian Ebner
2025-05-19 11:46 ` Christian Ebner [this message]
2025-05-19 11:46 ` [pbs-devel] [RFC proxmox-backup 05/39] s3 client: add crate for AWS S3 compatible object store client Christian Ebner
2025-05-19 11:46 ` [pbs-devel] [RFC proxmox-backup 06/39] s3 client: implement AWS signature v4 request authentication Christian Ebner
2025-05-19 11:46 ` [pbs-devel] [RFC proxmox-backup 07/39] s3 client: add dedicated type for s3 object keys Christian Ebner
2025-05-19 11:46 ` [pbs-devel] [RFC proxmox-backup 08/39] s3 client: add helper for last modified timestamp parsing Christian Ebner
2025-05-19 11:46 ` [pbs-devel] [RFC proxmox-backup 09/39] s3 client: add helper to parse http date headers Christian Ebner
2025-05-19 11:46 ` [pbs-devel] [RFC proxmox-backup 10/39] s3 client: implement methods to operate on s3 objects in bucket Christian Ebner
2025-05-19 11:46 ` [pbs-devel] [RFC proxmox-backup 11/39] config: introduce s3 object store client configuration Christian Ebner
2025-05-19 11:46 ` [pbs-devel] [RFC proxmox-backup 12/39] api: config: implement endpoints to manipulate and list s3 configs Christian Ebner
2025-05-19 11:46 ` [pbs-devel] [RFC proxmox-backup 13/39] api: datastore: check S3 backend bucket access on datastore create Christian Ebner
2025-05-19 11:46 ` [pbs-devel] [RFC proxmox-backup 14/39] datastore: allow to get the backend for a datastore Christian Ebner
2025-05-19 11:46 ` [pbs-devel] [RFC proxmox-backup 15/39] api: backup: store datastore backend in runtime environment Christian Ebner
2025-05-19 11:46 ` [pbs-devel] [RFC proxmox-backup 16/39] api: backup: conditionally upload chunks to S3 object store backend Christian Ebner
2025-05-19 11:46 ` [pbs-devel] [RFC proxmox-backup 17/39] api: backup: conditionally upload blobs " Christian Ebner
2025-05-19 11:46 ` [pbs-devel] [RFC proxmox-backup 18/39] api: backup: conditionally upload indices " Christian Ebner
2025-05-19 11:46 ` [pbs-devel] [RFC proxmox-backup 19/39] api: backup: conditionally upload manifest " Christian Ebner
2025-05-19 11:46 ` [pbs-devel] [RFC proxmox-backup 20/39] api: reader: fetch chunks based on datastore backend Christian Ebner
2025-05-19 11:46 ` [pbs-devel] [RFC proxmox-backup 21/39] datastore: local chunk reader: read chunks based on backend Christian Ebner
2025-05-19 11:46 ` [pbs-devel] [RFC proxmox-backup 22/39] verify worker: add datastore backed to verify worker Christian Ebner
2025-05-19 11:46 ` [pbs-devel] [RFC proxmox-backup 23/39] verify: implement chunk verification for stores with s3 backend Christian Ebner
2025-05-19 11:46 ` [pbs-devel] [RFC proxmox-backup 24/39] api: remove snapshot from S3 backend on snapshot delete Christian Ebner
2025-05-19 11:46 ` [pbs-devel] [RFC proxmox-backup 25/39] datastore: prune groups/snapshots from S3 object store backend Christian Ebner
2025-05-19 11:46 ` [pbs-devel] [RFC proxmox-backup 26/39] datastore: implement garbage collection for s3 backend Christian Ebner
2025-05-19 11:46 ` [pbs-devel] [RFC proxmox-backup 27/39] ui: add S3 client edit window for configuration create/edit Christian Ebner
2025-05-19 11:46 ` [pbs-devel] [RFC proxmox-backup 28/39] ui: add S3 client view for configuration Christian Ebner
2025-05-19 11:46 ` [pbs-devel] [RFC proxmox-backup 29/39] ui: expose the S3 client view in the navigation tree Christian Ebner
2025-05-19 11:46 ` [pbs-devel] [RFC proxmox-backup 30/39] ui: add s3 bucket selector and allow to set s3 backend Christian Ebner
2025-05-19 11:46 ` [pbs-devel] [RFC proxmox-backup 31/39] api/bin: add endpoint and command to test s3 backend for datastore Christian Ebner
2025-05-19 11:46 ` [pbs-devel] [RFC proxmox-backup 32/39] tools: lru cache: add removed callback for evicted nodes Christian Ebner
2025-05-19 11:46 ` [pbs-devel] [RFC proxmox-backup 33/39] tools: async lru cache: implement insert, remove and contains methods Christian Ebner
2025-05-19 11:46 ` [pbs-devel] [RFC proxmox-backup 34/39] datastore: add local datastore cache for network attached storages Christian Ebner
2025-05-19 11:46 ` [pbs-devel] [RFC proxmox-backup 35/39] api: backup: use local datastore cache on S3 backend chunk upload Christian Ebner
2025-05-19 11:46 ` [pbs-devel] [RFC proxmox-backup 36/39] api: reader: use local datastore cache on S3 backend chunk fetching Christian Ebner
2025-05-19 11:46 ` [pbs-devel] [RFC proxmox-backup 37/39] api: backup: add no-cache flag to bypass local datastore cache Christian Ebner
2025-05-19 11:46 ` [pbs-devel] [RFC proxmox-backup 38/39] datastore: get and set owner for S3 store backend Christian Ebner
2025-05-19 11:46 ` [pbs-devel] [RFC proxmox-backup 39/39] datastore: create namespace marker in S3 backend Christian Ebner
2025-05-29 14:33 ` [pbs-devel] superseded: [RFC proxmox proxmox-backup 00/39] S3 storage backend for datastores Christian Ebner

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250519114640.303640-5-c.ebner@proxmox.com \
    --to=c.ebner@proxmox.com \
    --cc=pbs-devel@lists.proxmox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox
Service provided by Proxmox Server Solutions GmbH | Privacy | Legal