all lists on lists.proxmox.com
 help / color / mirror / Atom feed
From: Christian Ebner <c.ebner@proxmox.com>
To: pbs-devel@lists.proxmox.com
Subject: [PATCH proxmox-backup v2 3/4] datastore: refactor datastore lookup parameters into dedicated type
Date: Wed, 11 Mar 2026 10:59:05 +0100	[thread overview]
Message-ID: <20260311095906.202410-6-c.ebner@proxmox.com> (raw)
In-Reply-To: <20260311095906.202410-1-c.ebner@proxmox.com>

This will allow to easily extend the lookup by a callback method to
allow lookup of the nodes proxy config whenever that is required for
the backend implementation.

Move this to a central helper so individual
DataStore::lookup_datastore() calls do not need to individually set
common future parameters.

Signed-off-by: Christian Ebner <c.ebner@proxmox.com>
---
 pbs-datastore/src/datastore.rs       | 36 ++++++++++++++++++----------
 pbs-datastore/src/lib.rs             |  2 +-
 pbs-datastore/src/snapshot_reader.rs |  6 +++--
 src/api2/admin/datastore.rs          | 26 ++++++++++----------
 src/api2/admin/namespace.rs          |  9 ++++---
 src/api2/backup/mod.rs               |  3 ++-
 src/api2/reader/mod.rs               |  3 ++-
 src/api2/status/mod.rs               |  6 +++--
 src/api2/tape/backup.rs              |  6 +++--
 src/api2/tape/restore.rs             |  6 +++--
 src/bin/proxmox-backup-proxy.rs      |  8 ++++---
 src/server/prune_job.rs              |  3 ++-
 src/server/pull.rs                   |  7 ++++--
 src/server/push.rs                   |  3 ++-
 src/server/verify_job.rs             |  3 ++-
 src/tools/mod.rs                     | 10 +++++++-
 16 files changed, 90 insertions(+), 47 deletions(-)

diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs
index b77567e51..6fa533e2f 100644
--- a/pbs-datastore/src/datastore.rs
+++ b/pbs-datastore/src/datastore.rs
@@ -195,6 +195,17 @@ impl DataStoreImpl {
     }
 }
 
+pub struct DataStoreLookup<'a> {
+    name: &'a str,
+    operation: Operation,
+}
+
+impl<'a> DataStoreLookup<'a> {
+    pub fn with(name: &'a str, operation: Operation) -> Self {
+        Self { name, operation }
+    }
+}
+
 pub struct DataStore {
     inner: Arc<DataStoreImpl>,
     operation: Option<Operation>,
@@ -464,18 +475,18 @@ impl DataStore {
         Ok(())
     }
 
-    pub fn lookup_datastore(name: &str, operation: Operation) -> Result<Arc<DataStore>, Error> {
+    pub fn lookup_datastore(lookup: DataStoreLookup) -> Result<Arc<DataStore>, Error> {
         // Avoid TOCTOU between checking maintenance mode and updating active operation counter, as
         // we use it to decide whether it is okay to delete the datastore.
         let _config_lock = pbs_config::datastore::lock_config()?;
 
         // Get the current datastore.cfg generation number and cached config
         let (section_config, gen_num) = datastore_section_config_cached(true)?;
-        let config: DataStoreConfig = section_config.lookup("datastore", name)?;
+        let config: DataStoreConfig = section_config.lookup("datastore", lookup.name)?;
 
         if let Some(maintenance_mode) = config.get_maintenance_mode() {
-            if let Err(error) = maintenance_mode.check(operation) {
-                bail!("datastore '{name}' is unavailable: {error}");
+            if let Err(error) = maintenance_mode.check(lookup.operation) {
+                bail!("datastore '{}' is unavailable: {error}", lookup.name);
             }
         }
 
@@ -486,16 +497,16 @@ impl DataStore {
             bail!("datastore '{}' is not mounted", config.name);
         }
 
-        let entry = datastore_cache.get(name);
+        let entry = datastore_cache.get(lookup.name);
 
         // reuse chunk store so that we keep using the same process locker instance!
         let chunk_store = if let Some(datastore) = &entry {
             // Re-use DataStoreImpl
             if datastore.config_generation == gen_num && gen_num.is_some() {
-                update_active_operations(name, operation, 1)?;
+                update_active_operations(lookup.name, lookup.operation, 1)?;
                 return Ok(Arc::new(Self {
                     inner: Arc::clone(datastore),
-                    operation: Some(operation),
+                    operation: Some(lookup.operation),
                 }));
             }
             Arc::clone(&datastore.chunk_store)
@@ -505,7 +516,7 @@ impl DataStore {
                     .parse_property_string(config.tuning.as_deref().unwrap_or(""))?,
             )?;
             Arc::new(ChunkStore::open(
-                name,
+                lookup.name,
                 config.absolute_path(),
                 tuning.sync_level.unwrap_or_default(),
             )?)
@@ -514,13 +525,13 @@ impl DataStore {
         let datastore = DataStore::with_store_and_config(chunk_store, config, gen_num)?;
 
         let datastore = Arc::new(datastore);
-        datastore_cache.insert(name.to_string(), datastore.clone());
+        datastore_cache.insert(lookup.name.to_string(), datastore.clone());
 
-        update_active_operations(name, operation, 1)?;
+        update_active_operations(lookup.name, lookup.operation, 1)?;
 
         Ok(Arc::new(Self {
             inner: datastore,
-            operation: Some(operation),
+            operation: Some(lookup.operation),
         }))
     }
 
@@ -546,7 +557,8 @@ impl DataStore {
         {
             // the datastore drop handler does the checking if tasks are running and clears the
             // cache entry, so we just have to trigger it here
-            let _ = DataStore::lookup_datastore(name, Operation::Lookup);
+            let lookup = DataStoreLookup::with(name, Operation::Lookup);
+            let _ = DataStore::lookup_datastore(lookup);
         }
 
         Ok(())
diff --git a/pbs-datastore/src/lib.rs b/pbs-datastore/src/lib.rs
index 1f7c54ae8..8770a09ca 100644
--- a/pbs-datastore/src/lib.rs
+++ b/pbs-datastore/src/lib.rs
@@ -217,7 +217,7 @@ pub use store_progress::StoreProgress;
 mod datastore;
 pub use datastore::{
     check_backup_owner, ensure_datastore_is_mounted, get_datastore_mount_status, DataStore,
-    DatastoreBackend, S3_DATASTORE_IN_USE_MARKER,
+    DataStoreLookup, DatastoreBackend, S3_DATASTORE_IN_USE_MARKER,
 };
 
 mod hierarchy;
diff --git a/pbs-datastore/src/snapshot_reader.rs b/pbs-datastore/src/snapshot_reader.rs
index 231b1f493..d522a02d7 100644
--- a/pbs-datastore/src/snapshot_reader.rs
+++ b/pbs-datastore/src/snapshot_reader.rs
@@ -16,6 +16,7 @@ use pbs_api_types::{
 };
 
 use crate::backup_info::BackupDir;
+use crate::datastore::DataStoreLookup;
 use crate::dynamic_index::DynamicIndexReader;
 use crate::fixed_index::FixedIndexReader;
 use crate::index::IndexFile;
@@ -162,10 +163,11 @@ impl<F: Fn(&[u8; 32]) -> bool> Iterator for SnapshotChunkIterator<'_, F> {
                                 ),
                             };
 
-                        let datastore = DataStore::lookup_datastore(
+                        let lookup = DataStoreLookup::with(
                             self.snapshot_reader.datastore_name(),
                             Operation::Read,
-                        )?;
+                        );
+                        let datastore = DataStore::lookup_datastore(lookup)?;
                         let order =
                             datastore.get_chunks_in_order(&*index, &self.skip_fn, |_| Ok(()))?;
 
diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs
index a307e1488..f5bd3f0f4 100644
--- a/src/api2/admin/datastore.rs
+++ b/src/api2/admin/datastore.rs
@@ -71,7 +71,9 @@ use crate::api2::backup::optional_ns_param;
 use crate::api2::node::rrd::create_value_from_rrd;
 use crate::backup::{check_ns_privs_full, ListAccessibleBackupGroups, VerifyWorker, NS_PRIVS_OK};
 use crate::server::jobstate::{compute_schedule_status, Job, JobState};
-use crate::tools::{backup_info_to_snapshot_list_item, get_all_snapshot_files, read_backup_index};
+use crate::tools::{
+    backup_info_to_snapshot_list_item, get_all_snapshot_files, lookup_with, read_backup_index,
+};
 
 // helper to unify common sequence of checks:
 // 1. check privs on NS (full or limited access)
@@ -88,7 +90,7 @@ fn check_privs_and_load_store(
 ) -> Result<Arc<DataStore>, Error> {
     let limited = check_ns_privs_full(store, ns, auth_id, full_access_privs, partial_access_privs)?;
 
-    let datastore = DataStore::lookup_datastore(store, operation)?;
+    let datastore = DataStore::lookup_datastore(lookup_with(store, operation))?;
 
     if limited {
         let owner = datastore.get_owner(ns, backup_group)?;
@@ -134,7 +136,7 @@ pub fn list_groups(
         PRIV_DATASTORE_BACKUP,
     )?;
 
-    let datastore = DataStore::lookup_datastore(&store, Operation::Read)?;
+    let datastore = DataStore::lookup_datastore(lookup_with(&store, Operation::Read))?;
 
     datastore
         .iter_backup_groups(ns.clone())? // FIXME: Namespaces and recursion parameters!
@@ -467,7 +469,7 @@ unsafe fn list_snapshots_blocking(
         PRIV_DATASTORE_BACKUP,
     )?;
 
-    let datastore = DataStore::lookup_datastore(&store, Operation::Read)?;
+    let datastore = DataStore::lookup_datastore(lookup_with(&store, Operation::Read))?;
 
     // FIXME: filter also owner before collecting, for doing that nicely the owner should move into
     // backup group and provide an error free (Err -> None) accessor
@@ -601,7 +603,7 @@ pub async fn status(
         }
     };
 
-    let datastore = DataStore::lookup_datastore(&store, Operation::Read)?;
+    let datastore = DataStore::lookup_datastore(lookup_with(&store, Operation::Read))?;
 
     let (counts, gc_status) = if verbose {
         let filter_owner = if store_privs & PRIV_DATASTORE_AUDIT != 0 {
@@ -724,7 +726,7 @@ pub fn verify(
         PRIV_DATASTORE_BACKUP,
     )?;
 
-    let datastore = DataStore::lookup_datastore(&store, Operation::Read)?;
+    let datastore = DataStore::lookup_datastore(lookup_with(&store, Operation::Read))?;
     let ignore_verified = ignore_verified.unwrap_or(true);
 
     let worker_id;
@@ -1076,7 +1078,7 @@ pub fn prune_datastore(
         true,
     )?;
 
-    let datastore = DataStore::lookup_datastore(&store, Operation::Write)?;
+    let datastore = DataStore::lookup_datastore(lookup_with(&store, Operation::Write))?;
     let ns = prune_options.ns.clone().unwrap_or_default();
     let worker_id = format!("{store}:{ns}");
 
@@ -1114,7 +1116,7 @@ pub fn start_garbage_collection(
     _info: &ApiMethod,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<Value, Error> {
-    let datastore = DataStore::lookup_datastore(&store, Operation::Write)?;
+    let datastore = DataStore::lookup_datastore(lookup_with(&store, Operation::Write))?;
     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
 
     let job = Job::new("garbage_collection", &store)
@@ -1161,7 +1163,7 @@ pub fn garbage_collection_status(
         ..Default::default()
     };
 
-    let datastore = DataStore::lookup_datastore(&store, Operation::Read)?;
+    let datastore = DataStore::lookup_datastore(lookup_with(&store, Operation::Read))?;
     let status_in_memory = datastore.last_gc_status();
     let state_file = JobState::load("garbage_collection", &store)
         .map_err(|err| log::error!("could not open GC statefile for {store}: {err}"))
@@ -1873,7 +1875,7 @@ pub fn get_rrd_stats(
     cf: RrdMode,
     _param: Value,
 ) -> Result<Value, Error> {
-    let datastore = DataStore::lookup_datastore(&store, Operation::Read)?;
+    let datastore = DataStore::lookup_datastore(lookup_with(&store, Operation::Read))?;
     let disk_manager = crate::tools::disks::DiskManage::new();
 
     let mut rrd_fields = vec![
@@ -2249,7 +2251,7 @@ pub async fn set_backup_owner(
             PRIV_DATASTORE_BACKUP,
         )?;
 
-        let datastore = DataStore::lookup_datastore(&store, Operation::Write)?;
+        let datastore = DataStore::lookup_datastore(lookup_with(&store, Operation::Write))?;
 
         let backup_group = datastore.backup_group(ns, backup_group);
         let owner = backup_group.get_owner()?;
@@ -2734,7 +2736,7 @@ pub fn s3_refresh(store: String, rpcenv: &mut dyn RpcEnvironment) -> Result<Valu
 /// Performs an s3 refresh for given datastore. Expects the store to already be in maintenance mode
 /// s3-refresh.
 pub(crate) fn do_s3_refresh(store: &str, worker: &dyn WorkerTaskContext) -> Result<(), Error> {
-    let datastore = DataStore::lookup_datastore(store, Operation::Lookup)?;
+    let datastore = DataStore::lookup_datastore(lookup_with(store, Operation::Lookup))?;
     run_maintenance_locked(store, MaintenanceType::S3Refresh, worker, || {
         proxmox_async::runtime::block_on(datastore.s3_refresh())
     })
diff --git a/src/api2/admin/namespace.rs b/src/api2/admin/namespace.rs
index 30e24d8db..c885ab540 100644
--- a/src/api2/admin/namespace.rs
+++ b/src/api2/admin/namespace.rs
@@ -54,7 +54,8 @@ pub fn create_namespace(
 
     check_ns_modification_privs(&store, &ns, &auth_id)?;
 
-    let datastore = DataStore::lookup_datastore(&store, Operation::Write)?;
+    let lookup = crate::tools::lookup_with(&store, Operation::Write);
+    let datastore = DataStore::lookup_datastore(lookup)?;
 
     datastore.create_namespace(&parent, name)
 }
@@ -97,7 +98,8 @@ pub fn list_namespaces(
     // get result up-front to avoid cloning NS, it's relatively cheap anyway (no IO normally)
     let parent_access = check_ns_privs(&store, &parent, &auth_id, NS_PRIVS_OK);
 
-    let datastore = DataStore::lookup_datastore(&store, Operation::Read)?;
+    let lookup = crate::tools::lookup_with(&store, Operation::Read);
+    let datastore = DataStore::lookup_datastore(lookup)?;
 
     let iter = match datastore.recursive_iter_backup_ns_ok(parent, max_depth) {
         Ok(iter) => iter,
@@ -162,7 +164,8 @@ pub fn delete_namespace(
 
     check_ns_modification_privs(&store, &ns, &auth_id)?;
 
-    let datastore = DataStore::lookup_datastore(&store, Operation::Write)?;
+    let lookup = crate::tools::lookup_with(&store, Operation::Write);
+    let datastore = DataStore::lookup_datastore(lookup)?;
 
     let (removed_all, stats) = datastore.remove_namespace_recursive(&ns, delete_groups)?;
     if !removed_all {
diff --git a/src/api2/backup/mod.rs b/src/api2/backup/mod.rs
index 946510e85..6708f3da3 100644
--- a/src/api2/backup/mod.rs
+++ b/src/api2/backup/mod.rs
@@ -99,7 +99,8 @@ fn upgrade_to_backup_protocol(
             )
             .map_err(|err| http_err!(FORBIDDEN, "{err}"))?;
 
-        let datastore = DataStore::lookup_datastore(&store, Operation::Write)?;
+        let lookup = crate::tools::lookup_with(&store, Operation::Write);
+        let datastore = DataStore::lookup_datastore(lookup)?;
 
         let protocols = parts
             .headers
diff --git a/src/api2/reader/mod.rs b/src/api2/reader/mod.rs
index 9262eb6cb..a814ba5f7 100644
--- a/src/api2/reader/mod.rs
+++ b/src/api2/reader/mod.rs
@@ -96,7 +96,8 @@ fn upgrade_to_backup_reader_protocol(
             bail!("no permissions on /{}", acl_path.join("/"));
         }
 
-        let datastore = DataStore::lookup_datastore(&store, Operation::Read)?;
+        let lookup = crate::tools::lookup_with(&store, Operation::Read);
+        let datastore = DataStore::lookup_datastore(lookup)?;
 
         let backup_dir = pbs_api_types::BackupDir::deserialize(&param)?;
 
diff --git a/src/api2/status/mod.rs b/src/api2/status/mod.rs
index 885fdb0cc..43bb95d19 100644
--- a/src/api2/status/mod.rs
+++ b/src/api2/status/mod.rs
@@ -69,7 +69,8 @@ pub async fn datastore_status(
         };
 
         if !allowed {
-            if let Ok(datastore) = DataStore::lookup_datastore(store, Operation::Lookup) {
+            let lookup = crate::tools::lookup_with(store, Operation::Lookup);
+            if let Ok(datastore) = DataStore::lookup_datastore(lookup) {
                 if can_access_any_namespace(datastore, &auth_id, &user_info) {
                     list.push(DataStoreStatusListItem::empty(store, None, mount_status));
                 }
@@ -77,7 +78,8 @@ pub async fn datastore_status(
             continue;
         }
 
-        let datastore = match DataStore::lookup_datastore(store, Operation::Read) {
+        let lookup = crate::tools::lookup_with(store, Operation::Read);
+        let datastore = match DataStore::lookup_datastore(lookup) {
             Ok(datastore) => datastore,
             Err(err) => {
                 list.push(DataStoreStatusListItem::empty(
diff --git a/src/api2/tape/backup.rs b/src/api2/tape/backup.rs
index 47e8d0209..c254c6d8b 100644
--- a/src/api2/tape/backup.rs
+++ b/src/api2/tape/backup.rs
@@ -152,7 +152,8 @@ pub fn do_tape_backup_job(
 
     let worker_type = job.jobtype().to_string();
 
-    let datastore = DataStore::lookup_datastore(&setup.store, Operation::Read)?;
+    let lookup = crate::tools::lookup_with(&setup.store, Operation::Read);
+    let datastore = DataStore::lookup_datastore(lookup)?;
 
     let (config, _digest) = pbs_config::media_pool::config()?;
     let pool_config: MediaPoolConfig = config.lookup("pool", &setup.pool)?;
@@ -310,7 +311,8 @@ pub fn backup(
 
     check_backup_permission(&auth_id, &setup.store, &setup.pool, &setup.drive)?;
 
-    let datastore = DataStore::lookup_datastore(&setup.store, Operation::Read)?;
+    let lookup = crate::tools::lookup_with(&setup.store, Operation::Read);
+    let datastore = DataStore::lookup_datastore(lookup)?;
 
     let (config, _digest) = pbs_config::media_pool::config()?;
     let pool_config: MediaPoolConfig = config.lookup("pool", &setup.pool)?;
diff --git a/src/api2/tape/restore.rs b/src/api2/tape/restore.rs
index 92529a76d..4356cf748 100644
--- a/src/api2/tape/restore.rs
+++ b/src/api2/tape/restore.rs
@@ -144,10 +144,12 @@ impl TryFrom<String> for DataStoreMap {
             if let Some(index) = store.find('=') {
                 let mut target = store.split_off(index);
                 target.remove(0); // remove '='
-                let datastore = DataStore::lookup_datastore(&target, Operation::Write)?;
+                let lookup = crate::tools::lookup_with(&target, Operation::Write);
+                let datastore = DataStore::lookup_datastore(lookup)?;
                 map.insert(store, datastore);
             } else if default.is_none() {
-                default = Some(DataStore::lookup_datastore(&store, Operation::Write)?);
+                let lookup = crate::tools::lookup_with(&store, Operation::Write);
+                default = Some(DataStore::lookup_datastore(lookup)?);
             } else {
                 bail!("multiple default stores given");
             }
diff --git a/src/bin/proxmox-backup-proxy.rs b/src/bin/proxmox-backup-proxy.rs
index 3be8e8dcf..3014d3092 100644
--- a/src/bin/proxmox-backup-proxy.rs
+++ b/src/bin/proxmox-backup-proxy.rs
@@ -47,7 +47,7 @@ use pbs_api_types::{
 use proxmox_backup::auth_helpers::*;
 use proxmox_backup::config;
 use proxmox_backup::server::{self, metric_collection};
-use proxmox_backup::tools::PROXMOX_BACKUP_TCP_KEEPALIVE_TIME;
+use proxmox_backup::tools::{lookup_with, PROXMOX_BACKUP_TCP_KEEPALIVE_TIME};
 
 use proxmox_backup::api2::tape::backup::do_tape_backup_job;
 use proxmox_backup::server::do_prune_job;
@@ -530,7 +530,8 @@ async fn schedule_datastore_garbage_collection() {
 
         {
             // limit datastore scope due to Op::Lookup
-            let datastore = match DataStore::lookup_datastore(&store, Operation::Lookup) {
+            let lookup = lookup_with(&store, Operation::Lookup);
+            let datastore = match DataStore::lookup_datastore(lookup) {
                 Ok(datastore) => datastore,
                 Err(err) => {
                     eprintln!("lookup_datastore failed - {err}");
@@ -573,7 +574,8 @@ async fn schedule_datastore_garbage_collection() {
             Err(_) => continue, // could not get lock
         };
 
-        let datastore = match DataStore::lookup_datastore(&store, Operation::Write) {
+        let lookup = lookup_with(&store, Operation::Write);
+        let datastore = match DataStore::lookup_datastore(lookup) {
             Ok(datastore) => datastore,
             Err(err) => {
                 log::warn!("skipping scheduled GC on {store}, could look it up - {err}");
diff --git a/src/server/prune_job.rs b/src/server/prune_job.rs
index bb86a323e..ca5c67541 100644
--- a/src/server/prune_job.rs
+++ b/src/server/prune_job.rs
@@ -133,7 +133,8 @@ pub fn do_prune_job(
     auth_id: &Authid,
     schedule: Option<String>,
 ) -> Result<String, Error> {
-    let datastore = DataStore::lookup_datastore(&store, Operation::Write)?;
+    let lookup = crate::tools::lookup_with(&store, Operation::Write);
+    let datastore = DataStore::lookup_datastore(lookup)?;
 
     let worker_type = job.jobtype().to_string();
     let auth_id = auth_id.clone();
diff --git a/src/server/pull.rs b/src/server/pull.rs
index 412a59e66..dece52f34 100644
--- a/src/server/pull.rs
+++ b/src/server/pull.rs
@@ -112,12 +112,15 @@ impl PullParameters {
                 client,
             })
         } else {
+            let lookup = crate::tools::lookup_with(remote_store, Operation::Read);
+            let store = DataStore::lookup_datastore(lookup)?;
             Arc::new(LocalSource {
-                store: DataStore::lookup_datastore(remote_store, Operation::Read)?,
+                store,
                 ns: remote_ns,
             })
         };
-        let store = DataStore::lookup_datastore(store, Operation::Write)?;
+        let lookup = crate::tools::lookup_with(store, Operation::Write);
+        let store = DataStore::lookup_datastore(lookup)?;
         let backend = store.backend()?;
         let target = PullTarget { store, ns, backend };
 
diff --git a/src/server/push.rs b/src/server/push.rs
index 92bbbb9fc..2d335f559 100644
--- a/src/server/push.rs
+++ b/src/server/push.rs
@@ -109,7 +109,8 @@ impl PushParameters {
         let remove_vanished = remove_vanished.unwrap_or(false);
         let encrypted_only = encrypted_only.unwrap_or(false);
         let verified_only = verified_only.unwrap_or(false);
-        let store = DataStore::lookup_datastore(store, Operation::Read)?;
+        let lookup = crate::tools::lookup_with(store, Operation::Read);
+        let store = DataStore::lookup_datastore(lookup)?;
 
         if !store.namespace_exists(&ns) {
             bail!(
diff --git a/src/server/verify_job.rs b/src/server/verify_job.rs
index 2ec8c5138..ab14c7389 100644
--- a/src/server/verify_job.rs
+++ b/src/server/verify_job.rs
@@ -15,7 +15,8 @@ pub fn do_verification_job(
     schedule: Option<String>,
     to_stdout: bool,
 ) -> Result<String, Error> {
-    let datastore = DataStore::lookup_datastore(&verification_job.store, Operation::Read)?;
+    let lookup = crate::tools::lookup_with(&verification_job.store, Operation::Read);
+    let datastore = DataStore::lookup_datastore(lookup)?;
 
     let outdated_after = verification_job.outdated_after;
     let ignore_verified_snapshots = verification_job.ignore_verified.unwrap_or(true);
diff --git a/src/tools/mod.rs b/src/tools/mod.rs
index 93b4d8ea4..4e9f9928c 100644
--- a/src/tools/mod.rs
+++ b/src/tools/mod.rs
@@ -6,12 +6,14 @@ use anyhow::{bail, Error};
 use std::collections::HashSet;
 
 use pbs_api_types::{
-    Authid, BackupContent, CryptMode, SnapshotListItem, SnapshotVerifyState, MANIFEST_BLOB_NAME,
+    Authid, BackupContent, CryptMode, Operation, SnapshotListItem, SnapshotVerifyState,
+    MANIFEST_BLOB_NAME,
 };
 use proxmox_http::{client::Client, HttpOptions, ProxyConfig};
 
 use pbs_datastore::backup_info::{BackupDir, BackupInfo};
 use pbs_datastore::manifest::BackupManifest;
+use pbs_datastore::DataStoreLookup;
 
 use crate::config::node;
 
@@ -197,3 +199,9 @@ pub(super) fn node_proxy_config() -> Option<proxmox_http::ProxyConfig> {
         None
     }
 }
+
+/// Read the nodes http proxy config from the node config.
+#[inline(always)]
+pub fn lookup_with<'a>(name: &'a str, operation: Operation) -> DataStoreLookup<'a> {
+    DataStoreLookup::with(name, operation)
+}
-- 
2.47.3





  parent reply	other threads:[~2026-03-11  9:59 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-03-11  9:59 [PATCH proxmox{,-backup} v2 0/6] fix #6716: Add support for http proxy configuration for S3 endpoints Christian Ebner
2026-03-11  9:59 ` [PATCH proxmox v2 1/2] pbs-api-types: make operation non-optional for maintenance-mode check Christian Ebner
2026-03-11  9:59 ` [PATCH proxmox v2 2/2] s3-client: add proxy configuration as optional client option Christian Ebner
2026-03-11  9:59 ` [PATCH proxmox-backup v2 1/4] datastore: make operation non-optional in lookups Christian Ebner
2026-03-11  9:59 ` [PATCH proxmox-backup v2 2/4] tools: factor out node proxy config read helper Christian Ebner
2026-03-11  9:59 ` Christian Ebner [this message]
2026-03-11  9:59 ` [PATCH proxmox-backup v2 4/4] fix #6716: pass node http proxy config to s3 backend Christian Ebner
2026-03-12 11:43 ` [PATCH proxmox{,-backup} v2 0/6] fix #6716: Add support for http proxy configuration for S3 endpoints Christian Ebner

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260311095906.202410-6-c.ebner@proxmox.com \
    --to=c.ebner@proxmox.com \
    --cc=pbs-devel@lists.proxmox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.
Service provided by Proxmox Server Solutions GmbH | Privacy | Legal