From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [IPv6:2a01:7e0:0:424::9]) by lore.proxmox.com (Postfix) with ESMTPS id 3D2E31FF16C for ; Tue, 3 Sep 2024 14:34:07 +0200 (CEST) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id 0D860968F; Tue, 3 Sep 2024 14:34:39 +0200 (CEST) From: Hannes Laimer To: pbs-devel@lists.proxmox.com Date: Tue, 3 Sep 2024 14:34:01 +0200 Message-Id: <20240903123401.91513-11-h.laimer@proxmox.com> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20240903123401.91513-1-h.laimer@proxmox.com> References: <20240903123401.91513-1-h.laimer@proxmox.com> MIME-Version: 1.0 X-SPAM-LEVEL: Spam detection results: 0 AWL 0.018 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record T_SCC_BODY_TEXT_LINE -0.01 - Subject: [pbs-devel] [PATCH proxmox-backup RFC 10/10] backup/server/tape: add generics and separate functions into impl blocks X-BeenThere: pbs-devel@lists.proxmox.com X-Mailman-Version: 2.1.29 Precedence: list List-Id: Proxmox Backup Server development discussion List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: Proxmox Backup Server development discussion Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Errors-To: pbs-devel-bounces@lists.proxmox.com Sender: "pbs-devel" Signed-off-by: Hannes Laimer --- src/backup/hierarchy.rs | 26 +++++----- src/backup/verify.rs | 53 +++++++++++---------- src/server/gc_job.rs | 8 ++-- src/server/prune_job.rs | 5 +- src/server/pull.rs | 23 ++++----- src/tape/file_formats/snapshot_archive.rs | 5 +- src/tape/pool_writer/mod.rs | 11 +++-- src/tape/pool_writer/new_chunks_iterator.rs | 7 +-- 8 files changed, 74 insertions(+), 64 deletions(-) diff --git a/src/backup/hierarchy.rs b/src/backup/hierarchy.rs index 640a7762..29f05c9e 100644 --- a/src/backup/hierarchy.rs +++ b/src/backup/hierarchy.rs @@ -7,7 +7,9 @@ use pbs_api_types::{ PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_READ, }; use pbs_config::CachedUserInfo; -use pbs_datastore::{backup_info::BackupGroup, DataStore, ListGroups, ListNamespacesRecursive}; +use pbs_datastore::{ + backup_info::BackupGroup, chunk_store::CanRead, DataStore, ListGroups, ListNamespacesRecursive, +}; /// Asserts that `privs` are fulfilled on datastore + (optional) namespace. pub fn check_ns_privs( @@ -68,8 +70,8 @@ pub fn check_ns_privs_full( ); } -pub fn can_access_any_namespace( - store: Arc, +pub fn can_access_any_namespace( + store: Arc>, auth_id: &Authid, user_info: &CachedUserInfo, ) -> bool { @@ -95,8 +97,8 @@ pub fn can_access_any_namespace( /// /// Is basically just a filter-iter for pbs_datastore::ListNamespacesRecursive including access and /// optional owner checks. -pub struct ListAccessibleBackupGroups<'a> { - store: &'a Arc, +pub struct ListAccessibleBackupGroups<'a, T> { + store: &'a Arc>, auth_id: Option<&'a Authid>, user_info: Arc, /// The priv on NS level that allows auth_id trump the owner check @@ -104,15 +106,15 @@ pub struct ListAccessibleBackupGroups<'a> { /// The priv that auth_id is required to have on NS level additionally to being owner owner_and_priv: u64, /// Contains the intertnal state, group iter and a bool flag for override_owner_priv - state: Option<(ListGroups, bool)>, - ns_iter: ListNamespacesRecursive, + state: Option<(ListGroups, bool)>, + ns_iter: ListNamespacesRecursive, } -impl<'a> ListAccessibleBackupGroups<'a> { +impl<'a, T: CanRead> ListAccessibleBackupGroups<'a, T> { // TODO: builder pattern pub fn new_owned( - store: &'a Arc, + store: &'a Arc>, ns: BackupNamespace, max_depth: usize, auth_id: Option<&'a Authid>, @@ -122,7 +124,7 @@ impl<'a> ListAccessibleBackupGroups<'a> { } pub fn new_with_privs( - store: &'a Arc, + store: &'a Arc>, ns: BackupNamespace, max_depth: usize, override_owner_priv: Option, @@ -145,8 +147,8 @@ impl<'a> ListAccessibleBackupGroups<'a> { pub static NS_PRIVS_OK: u64 = PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP | PRIV_DATASTORE_AUDIT; -impl<'a> Iterator for ListAccessibleBackupGroups<'a> { - type Item = Result; +impl<'a, T: CanRead> Iterator for ListAccessibleBackupGroups<'a, T> { + type Item = Result, Error>; fn next(&mut self) -> Option { loop { diff --git a/src/backup/verify.rs b/src/backup/verify.rs index 6ef7e8eb..1ede08ea 100644 --- a/src/backup/verify.rs +++ b/src/backup/verify.rs @@ -5,6 +5,7 @@ use std::time::Instant; use anyhow::{bail, format_err, Error}; use nix::dir::Dir; +use pbs_datastore::chunk_store::{CanRead, CanWrite}; use tracing::{error, info}; use proxmox_sys::fs::lock_dir_noblock_shared; @@ -25,16 +26,16 @@ use crate::backup::hierarchy::ListAccessibleBackupGroups; /// A VerifyWorker encapsulates a task worker, datastore and information about which chunks have /// already been verified or detected as corrupt. -pub struct VerifyWorker { +pub struct VerifyWorker { worker: Arc, - datastore: Arc, + datastore: Arc>, verified_chunks: Arc>>, corrupt_chunks: Arc>>, } -impl VerifyWorker { +impl VerifyWorker { /// Creates a new VerifyWorker for a given task worker and datastore. - pub fn new(worker: Arc, datastore: Arc) -> Self { + pub fn new(worker: Arc, datastore: Arc>) -> Self { Self { worker, datastore, @@ -46,7 +47,7 @@ impl VerifyWorker { } } -fn verify_blob(backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> { +fn verify_blob(backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> { let blob = backup_dir.load_blob(&info.filename)?; let raw_size = blob.raw_size(); @@ -70,7 +71,7 @@ fn verify_blob(backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> { } } -fn rename_corrupted_chunk(datastore: Arc, digest: &[u8; 32]) { +fn rename_corrupted_chunk(datastore: Arc>, digest: &[u8; 32]) { let (path, digest_str) = datastore.chunk_path(digest); let mut counter = 0; @@ -97,8 +98,8 @@ fn rename_corrupted_chunk(datastore: Arc, digest: &[u8; 32]) { }; } -fn verify_index_chunks( - verify_worker: &VerifyWorker, +fn verify_index_chunks( + verify_worker: &VerifyWorker, index: Box, crypt_mode: CryptMode, ) -> Result<(), Error> { @@ -238,9 +239,9 @@ fn verify_index_chunks( Ok(()) } -fn verify_fixed_index( - verify_worker: &VerifyWorker, - backup_dir: &BackupDir, +fn verify_fixed_index( + verify_worker: &VerifyWorker, + backup_dir: &BackupDir, info: &FileInfo, ) -> Result<(), Error> { let mut path = backup_dir.relative_path(); @@ -260,9 +261,9 @@ fn verify_fixed_index( verify_index_chunks(verify_worker, Box::new(index), info.chunk_crypt_mode()) } -fn verify_dynamic_index( - verify_worker: &VerifyWorker, - backup_dir: &BackupDir, +fn verify_dynamic_index( + verify_worker: &VerifyWorker, + backup_dir: &BackupDir, info: &FileInfo, ) -> Result<(), Error> { let mut path = backup_dir.relative_path(); @@ -291,9 +292,9 @@ fn verify_dynamic_index( /// - Ok(true) if verify is successful /// - Ok(false) if there were verification errors /// - Err(_) if task was aborted -pub fn verify_backup_dir( - verify_worker: &VerifyWorker, - backup_dir: &BackupDir, +pub fn verify_backup_dir( + verify_worker: &VerifyWorker, + backup_dir: &BackupDir, upid: UPID, filter: Option<&dyn Fn(&BackupManifest) -> bool>, ) -> Result { @@ -328,9 +329,9 @@ pub fn verify_backup_dir( } /// See verify_backup_dir -pub fn verify_backup_dir_with_lock( - verify_worker: &VerifyWorker, - backup_dir: &BackupDir, +pub fn verify_backup_dir_with_lock( + verify_worker: &VerifyWorker, + backup_dir: &BackupDir, upid: UPID, filter: Option<&dyn Fn(&BackupManifest) -> bool>, _snap_lock: Dir, @@ -415,9 +416,9 @@ pub fn verify_backup_dir_with_lock( /// Returns /// - Ok((count, failed_dirs)) where failed_dirs had verification errors /// - Err(_) if task was aborted -pub fn verify_backup_group( - verify_worker: &VerifyWorker, - group: &BackupGroup, +pub fn verify_backup_group( + verify_worker: &VerifyWorker, + group: &BackupGroup, progress: &mut StoreProgress, upid: &UPID, filter: Option<&dyn Fn(&BackupManifest) -> bool>, @@ -467,8 +468,8 @@ pub fn verify_backup_group( /// Returns /// - Ok(failed_dirs) where failed_dirs had verification errors /// - Err(_) if task was aborted -pub fn verify_all_backups( - verify_worker: &VerifyWorker, +pub fn verify_all_backups( + verify_worker: &VerifyWorker, upid: &UPID, ns: BackupNamespace, max_depth: Option, @@ -516,7 +517,7 @@ pub fn verify_all_backups( .filter(|group| { !(group.backup_type() == BackupType::Host && group.backup_id() == "benchmark") }) - .collect::>(), + .collect::>>(), Err(err) => { info!("unable to list backups: {err}"); return Ok(errors); diff --git a/src/server/gc_job.rs b/src/server/gc_job.rs index 64835028..4892430c 100644 --- a/src/server/gc_job.rs +++ b/src/server/gc_job.rs @@ -4,15 +4,17 @@ use std::sync::Arc; use tracing::info; use pbs_api_types::Authid; -use pbs_datastore::DataStore; +use pbs_datastore::{chunk_store::CanWrite, DataStore}; use proxmox_rest_server::WorkerTask; use crate::server::{jobstate::Job, send_gc_status}; /// Runs a garbage collection job. -pub fn do_garbage_collection_job( +pub fn do_garbage_collection_job< + T: CanWrite + Send + Sync + std::panic::RefUnwindSafe + 'static, +>( mut job: Job, - datastore: Arc, + datastore: Arc>, auth_id: &Authid, schedule: Option, to_stdout: bool, diff --git a/src/server/prune_job.rs b/src/server/prune_job.rs index 546c0bbd..9024a61f 100644 --- a/src/server/prune_job.rs +++ b/src/server/prune_job.rs @@ -1,6 +1,7 @@ use std::sync::Arc; use anyhow::Error; +use pbs_datastore::chunk_store::CanWrite; use tracing::{info, warn}; use pbs_api_types::{ @@ -14,10 +15,10 @@ use proxmox_rest_server::WorkerTask; use crate::backup::ListAccessibleBackupGroups; use crate::server::jobstate::Job; -pub fn prune_datastore( +pub fn prune_datastore( auth_id: Authid, prune_options: PruneJobOptions, - datastore: Arc, + datastore: Arc>, dry_run: bool, ) -> Result<(), Error> { let store = &datastore.name(); diff --git a/src/server/pull.rs b/src/server/pull.rs index 41ab5e0e..a567c510 100644 --- a/src/server/pull.rs +++ b/src/server/pull.rs @@ -9,6 +9,7 @@ use std::time::{Duration, SystemTime}; use anyhow::{bail, format_err, Error}; use http::StatusCode; +use pbs_datastore::chunk_store::{CanWrite, Read as StoreRead, Write as StoreWrite}; use proxmox_human_byte::HumanByte; use proxmox_router::HttpError; use serde_json::json; @@ -45,11 +46,11 @@ struct RemoteReader { struct LocalReader { _dir_lock: Arc>, path: PathBuf, - datastore: Arc, + datastore: Arc>, } pub(crate) struct PullTarget { - store: Arc, + store: Arc>, ns: BackupNamespace, } @@ -60,7 +61,7 @@ pub(crate) struct RemoteSource { } pub(crate) struct LocalSource { - store: Arc, + store: Arc>, ns: BackupNamespace, } @@ -571,9 +572,9 @@ impl PullParameters { } } -async fn pull_index_chunks( +async fn pull_index_chunks( chunk_reader: Arc, - target: Arc, + target: Arc>, index: I, downloaded_chunks: Arc>>, ) -> Result { @@ -696,9 +697,9 @@ fn verify_archive(info: &FileInfo, csum: &[u8; 32], size: u64) -> Result<(), Err /// -- Verify tmp file checksum /// - if archive is an index, pull referenced chunks /// - Rename tmp file into real path -async fn pull_single_archive<'a>( +async fn pull_single_archive<'a, T: CanWrite + Send + Sync + 'static>( reader: Arc, - snapshot: &'a pbs_datastore::BackupDir, + snapshot: &'a pbs_datastore::BackupDir, archive_info: &'a FileInfo, downloaded_chunks: Arc>>, ) -> Result { @@ -779,9 +780,9 @@ async fn pull_single_archive<'a>( /// -- if file already exists, verify contents /// -- if not, pull it from the remote /// - Download log if not already existing -async fn pull_snapshot<'a>( +async fn pull_snapshot<'a, T: CanWrite + Send + Sync + 'static>( reader: Arc, - snapshot: &'a pbs_datastore::BackupDir, + snapshot: &'a pbs_datastore::BackupDir, downloaded_chunks: Arc>>, ) -> Result { let mut pull_stats = PullStats::default(); @@ -890,9 +891,9 @@ async fn pull_snapshot<'a>( /// /// The `reader` is configured to read from the source backup directory, while the /// `snapshot` is pointing to the local datastore and target namespace. -async fn pull_snapshot_from<'a>( +async fn pull_snapshot_from<'a, T: CanWrite + Send + Sync + 'static>( reader: Arc, - snapshot: &'a pbs_datastore::BackupDir, + snapshot: &'a pbs_datastore::BackupDir, downloaded_chunks: Arc>>, ) -> Result { let (_path, is_new, _snap_lock) = snapshot diff --git a/src/tape/file_formats/snapshot_archive.rs b/src/tape/file_formats/snapshot_archive.rs index f5a588f4..526c69ef 100644 --- a/src/tape/file_formats/snapshot_archive.rs +++ b/src/tape/file_formats/snapshot_archive.rs @@ -2,6 +2,7 @@ use std::io::{Read, Write}; use std::pin::Pin; use std::task::{Context, Poll}; +use pbs_datastore::chunk_store::CanRead; use proxmox_sys::error::SysError; use proxmox_uuid::Uuid; @@ -21,9 +22,9 @@ use crate::tape::file_formats::{ /// `LEOM` was detected before all data was written. The stream is /// marked inclomplete in that case and does not contain all data (The /// backup task must rewrite the whole file on the next media). -pub fn tape_write_snapshot_archive<'a>( +pub fn tape_write_snapshot_archive<'a, T: CanRead>( writer: &mut (dyn TapeWrite + 'a), - snapshot_reader: &SnapshotReader, + snapshot_reader: &SnapshotReader, ) -> Result, std::io::Error> { let backup_dir = snapshot_reader.snapshot(); let snapshot = diff --git a/src/tape/pool_writer/mod.rs b/src/tape/pool_writer/mod.rs index 9731e1cc..54a8715e 100644 --- a/src/tape/pool_writer/mod.rs +++ b/src/tape/pool_writer/mod.rs @@ -3,6 +3,7 @@ pub use catalog_set::*; mod new_chunks_iterator; pub use new_chunks_iterator::*; +use pbs_datastore::chunk_store::CanRead; use std::collections::HashSet; use std::fs::File; @@ -445,9 +446,9 @@ impl PoolWriter { /// archive is marked incomplete, and we do not use it. The caller /// should mark the media as full and try again using another /// media. - pub fn append_snapshot_archive( + pub fn append_snapshot_archive( &mut self, - snapshot_reader: &SnapshotReader, + snapshot_reader: &SnapshotReader, ) -> Result<(bool, usize), Error> { let status = match self.status { Some(ref mut status) => status, @@ -536,10 +537,10 @@ impl PoolWriter { Ok((leom, bytes_written)) } - pub fn spawn_chunk_reader_thread( + pub fn spawn_chunk_reader_thread( &self, - datastore: Arc, - snapshot_reader: Arc>, + datastore: Arc>, + snapshot_reader: Arc>>, ) -> Result<(std::thread::JoinHandle<()>, NewChunksIterator), Error> { NewChunksIterator::spawn(datastore, snapshot_reader, Arc::clone(&self.catalog_set)) } diff --git a/src/tape/pool_writer/new_chunks_iterator.rs b/src/tape/pool_writer/new_chunks_iterator.rs index 1454b33d..b83ddf3e 100644 --- a/src/tape/pool_writer/new_chunks_iterator.rs +++ b/src/tape/pool_writer/new_chunks_iterator.rs @@ -3,6 +3,7 @@ use std::sync::{Arc, Mutex}; use anyhow::{format_err, Error}; +use pbs_datastore::chunk_store::CanRead; use pbs_datastore::{DataBlob, DataStore, SnapshotReader}; use crate::tape::CatalogSet; @@ -20,9 +21,9 @@ impl NewChunksIterator { /// Creates the iterator, spawning a new thread /// /// Make sure to join() the returned thread handle. - pub fn spawn( - datastore: Arc, - snapshot_reader: Arc>, + pub fn spawn( + datastore: Arc>, + snapshot_reader: Arc>>, catalog_set: Arc>, ) -> Result<(std::thread::JoinHandle<()>, Self), Error> { let (tx, rx) = std::sync::mpsc::sync_channel(3); -- 2.39.2 _______________________________________________ pbs-devel mailing list pbs-devel@lists.proxmox.com https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel