public inbox for pbs-devel@lists.proxmox.com
 help / color / mirror / Atom feed
From: Christian Ebner <c.ebner@proxmox.com>
To: pbs-devel@lists.proxmox.com
Subject: [pbs-devel] [PATCH proxmox-backup 4/4] fix #6716: pass node http proxy config to s3 backend
Date: Thu, 22 Jan 2026 16:11:25 +0100	[thread overview]
Message-ID: <20260122151125.832787-7-c.ebner@proxmox.com> (raw)
In-Reply-To: <20260122151125.832787-1-c.ebner@proxmox.com>

To avoid passing crate boundaries, reading the http proxy from the
node config is not an option and passing it unconditionally must also
be avoided to not read the node config on each datastore lookup, also
with non-s3 datastores which never need it anyways.

Rather pass a callback method to the datastore on instantiation,
allowing it's backend implementation to fetch the nodes proxy config
when actually needed to instantiate the s3-client.

Fixes: https://bugzilla.proxmox.com/show_bug.cgi?id=6716
Signed-off-by: Christian Ebner <c.ebner@proxmox.com>
---
 pbs-datastore/src/datastore.rs              | 51 +++++++++++++++++----
 pbs-datastore/src/snapshot_reader.rs        | 14 ++++--
 src/api2/admin/s3.rs                        |  2 +
 src/api2/config/datastore.rs                | 16 +++++--
 src/api2/config/s3.rs                       | 11 ++++-
 src/tape/pool_writer/new_chunks_iterator.rs | 15 +++---
 src/tools/mod.rs                            |  3 +-
 7 files changed, 88 insertions(+), 24 deletions(-)

diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs
index 6fa533e2f..efd747367 100644
--- a/pbs-datastore/src/datastore.rs
+++ b/pbs-datastore/src/datastore.rs
@@ -11,6 +11,7 @@ use http_body_util::BodyExt;
 use hyper::body::Bytes;
 use nix::unistd::{unlinkat, UnlinkatFlags};
 use pbs_tools::lru_cache::LruCache;
+use proxmox_http::ProxyConfig;
 use tokio::io::AsyncWriteExt;
 use tracing::{info, warn};
 
@@ -195,20 +196,32 @@ impl DataStoreImpl {
     }
 }
 
+pub type ProxyConfigCallback = fn() -> Option<ProxyConfig>;
+
 pub struct DataStoreLookup<'a> {
     name: &'a str,
     operation: Operation,
+    proxy_config_callback: Arc<Option<ProxyConfigCallback>>,
 }
 
 impl<'a> DataStoreLookup<'a> {
-    pub fn with(name: &'a str, operation: Operation) -> Self {
-        Self { name, operation }
+    pub fn with(
+        name: &'a str,
+        operation: Operation,
+        proxy_config_callback: Arc<Option<ProxyConfigCallback>>,
+    ) -> Self {
+        Self {
+            name,
+            operation,
+            proxy_config_callback,
+        }
     }
 }
 
 pub struct DataStore {
     inner: Arc<DataStoreImpl>,
     operation: Option<Operation>,
+    proxy_config_callback: Arc<Option<ProxyConfigCallback>>,
 }
 
 impl Clone for DataStore {
@@ -224,6 +237,7 @@ impl Clone for DataStore {
         DataStore {
             inner: self.inner.clone(),
             operation: new_operation,
+            proxy_config_callback: Arc::clone(&self.proxy_config_callback),
         }
     }
 }
@@ -409,6 +423,7 @@ impl DataStore {
         Arc::new(Self {
             inner: unsafe { DataStoreImpl::new_test() },
             operation: None,
+            proxy_config_callback: Arc::new(None),
         })
     }
 
@@ -437,6 +452,7 @@ impl DataStore {
                     user: pbs_config::backup_user()?,
                     base_path: S3_CLIENT_RATE_LIMITER_BASE_PATH.into(),
                 };
+                let proxy_config = self.proxy_config_callback.map(|cb| cb()).flatten();
 
                 let options = S3ClientOptions::from_config(
                     config.config,
@@ -444,6 +460,7 @@ impl DataStore {
                     Some(bucket),
                     self.name().to_owned(),
                     Some(rate_limiter_options),
+                    proxy_config,
                 );
                 let s3_client = S3Client::new(options)?;
                 DatastoreBackend::S3(Arc::new(s3_client))
@@ -507,6 +524,7 @@ impl DataStore {
                 return Ok(Arc::new(Self {
                     inner: Arc::clone(datastore),
                     operation: Some(lookup.operation),
+                    proxy_config_callback: Arc::clone(&lookup.proxy_config_callback),
                 }));
             }
             Arc::clone(&datastore.chunk_store)
@@ -532,6 +550,7 @@ impl DataStore {
         Ok(Arc::new(Self {
             inner: datastore,
             operation: Some(lookup.operation),
+            proxy_config_callback: Arc::clone(&lookup.proxy_config_callback),
         }))
     }
 
@@ -557,7 +576,7 @@ impl DataStore {
         {
             // the datastore drop handler does the checking if tasks are running and clears the
             // cache entry, so we just have to trigger it here
-            let lookup = DataStoreLookup::with(name, Operation::Lookup);
+            let lookup = DataStoreLookup::with(name, Operation::Lookup, Arc::new(None));
             let _ = DataStore::lookup_datastore(lookup);
         }
 
@@ -617,7 +636,11 @@ impl DataStore {
             update_active_operations(&name, operation, 1)?;
         }
 
-        Ok(Arc::new(Self { inner, operation }))
+        Ok(Arc::new(Self {
+            inner,
+            operation,
+            proxy_config_callback: Arc::new(None),
+        }))
     }
 
     fn with_store_and_config(
@@ -2352,7 +2375,11 @@ impl DataStore {
     /// Destroy a datastore. This requires that there are no active operations on the datastore.
     ///
     /// This is a synchronous operation and should be run in a worker-thread.
-    pub fn destroy(name: &str, destroy_data: bool) -> Result<(), Error> {
+    pub fn destroy(
+        name: &str,
+        destroy_data: bool,
+        proxy_config_callback: ProxyConfigCallback,
+    ) -> Result<(), Error> {
         let config_lock = pbs_config::datastore::lock_config()?;
 
         let (mut config, _digest) = pbs_config::datastore::config()?;
@@ -2401,9 +2428,10 @@ impl DataStore {
                 }
             }
 
-            if let (_backend, Some(s3_client)) =
-                Self::s3_client_and_backend_from_datastore_config(&datastore_config)?
-            {
+            if let (_backend, Some(s3_client)) = Self::s3_client_and_backend_from_datastore_config(
+                &datastore_config,
+                proxy_config_callback,
+            )? {
                 // Delete all objects within the datastore prefix
                 let prefix = S3PathPrefix::Some(String::default());
                 let delete_objects_error =
@@ -2418,7 +2446,10 @@ impl DataStore {
                 remove(".chunks", &mut ok);
             }
         } else if let (_backend, Some(s3_client)) =
-            Self::s3_client_and_backend_from_datastore_config(&datastore_config)?
+            Self::s3_client_and_backend_from_datastore_config(
+                &datastore_config,
+                proxy_config_callback,
+            )?
         {
             // Only delete in-use marker so datastore can be re-imported
             let object_key = S3ObjectKey::try_from(S3_DATASTORE_IN_USE_MARKER)
@@ -2637,6 +2668,7 @@ impl DataStore {
 
     pub fn s3_client_and_backend_from_datastore_config(
         datastore_config: &DataStoreConfig,
+        proxy_config_callback: ProxyConfigCallback,
     ) -> Result<(DatastoreBackendType, Option<S3Client>), Error> {
         let backend_config: DatastoreBackendConfig =
             datastore_config.backend.as_deref().unwrap_or("").parse()?;
@@ -2671,6 +2703,7 @@ impl DataStore {
             Some(bucket),
             datastore_config.name.to_owned(),
             Some(rate_limiter_options),
+            proxy_config_callback(),
         );
         let s3_client = S3Client::new(options)
             .context("failed to create s3 client")
diff --git a/pbs-datastore/src/snapshot_reader.rs b/pbs-datastore/src/snapshot_reader.rs
index d522a02d7..ddae632cc 100644
--- a/pbs-datastore/src/snapshot_reader.rs
+++ b/pbs-datastore/src/snapshot_reader.rs
@@ -16,7 +16,7 @@ use pbs_api_types::{
 };
 
 use crate::backup_info::BackupDir;
-use crate::datastore::DataStoreLookup;
+use crate::datastore::{DataStoreLookup, ProxyConfigCallback};
 use crate::dynamic_index::DynamicIndexReader;
 use crate::fixed_index::FixedIndexReader;
 use crate::index::IndexFile;
@@ -125,8 +125,9 @@ impl SnapshotReader {
     pub fn chunk_iterator<F: Fn(&[u8; 32]) -> bool>(
         &'_ self,
         skip_fn: F,
+        proxy_config_callback: Arc<Option<ProxyConfigCallback>>,
     ) -> Result<SnapshotChunkIterator<'_, F>, Error> {
-        SnapshotChunkIterator::new(self, skip_fn)
+        SnapshotChunkIterator::new(self, skip_fn, proxy_config_callback)
     }
 }
 
@@ -139,6 +140,7 @@ pub struct SnapshotChunkIterator<'a, F: Fn(&[u8; 32]) -> bool> {
     snapshot_reader: &'a SnapshotReader,
     todo_list: Vec<String>,
     skip_fn: F,
+    proxy_config_callback: Arc<Option<ProxyConfigCallback>>,
     #[allow(clippy::type_complexity)]
     current_index: Option<(Rc<Box<dyn IndexFile + Send>>, usize, Vec<(usize, u64)>)>,
 }
@@ -166,6 +168,7 @@ impl<F: Fn(&[u8; 32]) -> bool> Iterator for SnapshotChunkIterator<'_, F> {
                         let lookup = DataStoreLookup::with(
                             self.snapshot_reader.datastore_name(),
                             Operation::Read,
+                            Arc::clone(&self.proxy_config_callback),
                         );
                         let datastore = DataStore::lookup_datastore(lookup)?;
                         let order =
@@ -192,7 +195,11 @@ impl<F: Fn(&[u8; 32]) -> bool> Iterator for SnapshotChunkIterator<'_, F> {
 }
 
 impl<'a, F: Fn(&[u8; 32]) -> bool> SnapshotChunkIterator<'a, F> {
-    pub fn new(snapshot_reader: &'a SnapshotReader, skip_fn: F) -> Result<Self, Error> {
+    pub fn new(
+        snapshot_reader: &'a SnapshotReader,
+        skip_fn: F,
+        proxy_config_callback: Arc<Option<ProxyConfigCallback>>,
+    ) -> Result<Self, Error> {
         let mut todo_list = Vec::new();
 
         for filename in snapshot_reader.file_list() {
@@ -209,6 +216,7 @@ impl<'a, F: Fn(&[u8; 32]) -> bool> SnapshotChunkIterator<'a, F> {
             todo_list,
             current_index: None,
             skip_fn,
+            proxy_config_callback,
         })
     }
 }
diff --git a/src/api2/admin/s3.rs b/src/api2/admin/s3.rs
index 73388281b..ea3b15979 100644
--- a/src/api2/admin/s3.rs
+++ b/src/api2/admin/s3.rs
@@ -47,6 +47,7 @@ pub async fn check(
     let config: S3ClientConf = config
         .lookup(S3_CFG_TYPE_ID, &s3_client_id)
         .context("config lookup failed")?;
+    let http_proxy = crate::tools::node_proxy_config();
 
     let store_prefix = store_prefix.unwrap_or_default();
     let options = S3ClientOptions::from_config(
@@ -55,6 +56,7 @@ pub async fn check(
         Some(bucket),
         store_prefix,
         None,
+        http_proxy,
     );
 
     let test_object_key =
diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs
index f845fe2d0..45a251851 100644
--- a/src/api2/config/datastore.rs
+++ b/src/api2/config/datastore.rs
@@ -128,7 +128,10 @@ pub(crate) fn do_create_datastore(
     )?;
 
     let (backend_type, backend_s3_client) =
-        match DataStore::s3_client_and_backend_from_datastore_config(&datastore)? {
+        match DataStore::s3_client_and_backend_from_datastore_config(
+            &datastore,
+            crate::tools::node_proxy_config,
+        )? {
             (backend_type, Some(s3_client)) => {
                 if !overwrite_in_use {
                     let object_key = S3ObjectKey::try_from(S3_DATASTORE_IN_USE_MARKER)
@@ -342,7 +345,10 @@ pub fn create_datastore(
 
     let store_name = config.name.to_string();
 
-    let (backend, s3_client) = DataStore::s3_client_and_backend_from_datastore_config(&config)?;
+    let (backend, s3_client) = DataStore::s3_client_and_backend_from_datastore_config(
+        &config,
+        crate::tools::node_proxy_config,
+    )?;
     if let Some(s3_client) = s3_client {
         proxmox_async::runtime::block_on(s3_client.head_bucket())
             .context("failed to access bucket")
@@ -770,7 +776,11 @@ pub async fn delete_datastore(
         auth_id.to_string(),
         to_stdout,
         move |_worker| {
-            pbs_datastore::DataStore::destroy(&name, destroy_data)?;
+            pbs_datastore::DataStore::destroy(
+                &name,
+                destroy_data,
+                crate::tools::node_proxy_config,
+            )?;
 
             // ignore errors
             let _ = jobstate::remove_state_file("prune", &name);
diff --git a/src/api2/config/s3.rs b/src/api2/config/s3.rs
index 27b3c4cc2..20508fe33 100644
--- a/src/api2/config/s3.rs
+++ b/src/api2/config/s3.rs
@@ -348,10 +348,17 @@ pub async fn list_buckets(
     let config: S3ClientConf = config
         .lookup(S3_CFG_TYPE_ID, &id)
         .context("config lookup failed")?;
+    let http_proxy = crate::tools::node_proxy_config();
 
     let empty_prefix = String::new();
-    let options =
-        S3ClientOptions::from_config(config.config, config.secret_key, None, empty_prefix, None);
+    let options = S3ClientOptions::from_config(
+        config.config,
+        config.secret_key,
+        None,
+        empty_prefix,
+        None,
+        http_proxy,
+    );
     let client = S3Client::new(options).context("client creation failed")?;
     let list_buckets_response = client
         .list_buckets()
diff --git a/src/tape/pool_writer/new_chunks_iterator.rs b/src/tape/pool_writer/new_chunks_iterator.rs
index 0e29516f8..f8040056a 100644
--- a/src/tape/pool_writer/new_chunks_iterator.rs
+++ b/src/tape/pool_writer/new_chunks_iterator.rs
@@ -39,12 +39,15 @@ impl NewChunksIterator {
             let datastore_name = snapshot_reader.datastore_name().to_string();
 
             let result: Result<(), Error> = proxmox_lang::try_block!({
-                let chunk_iter = snapshot_reader.chunk_iterator(move |digest| {
-                    catalog_set
-                        .lock()
-                        .unwrap()
-                        .contains_chunk(&datastore_name, digest)
-                })?;
+                let chunk_iter = snapshot_reader.chunk_iterator(
+                    move |digest| {
+                        catalog_set
+                            .lock()
+                            .unwrap()
+                            .contains_chunk(&datastore_name, digest)
+                    },
+                    Arc::new(None), // FIXME: required once S3 <-> tape is implemented
+                )?;
 
                 let reader_pool =
                     ParallelHandler::new("tape backup chunk reader pool", read_threads, {
diff --git a/src/tools/mod.rs b/src/tools/mod.rs
index 4e9f9928c..5f3505417 100644
--- a/src/tools/mod.rs
+++ b/src/tools/mod.rs
@@ -4,6 +4,7 @@
 
 use anyhow::{bail, Error};
 use std::collections::HashSet;
+use std::sync::Arc;
 
 use pbs_api_types::{
     Authid, BackupContent, CryptMode, Operation, SnapshotListItem, SnapshotVerifyState,
@@ -203,5 +204,5 @@ pub(super) fn node_proxy_config() -> Option<proxmox_http::ProxyConfig> {
 /// Read the nodes http proxy config from the node config.
 #[inline(always)]
 pub fn lookup_with<'a>(name: &'a str, operation: Operation) -> DataStoreLookup<'a> {
-    DataStoreLookup::with(name, operation)
+    DataStoreLookup::with(name, operation, Arc::new(Some(node_proxy_config)))
 }
-- 
2.47.3



_______________________________________________
pbs-devel mailing list
pbs-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel


      parent reply	other threads:[~2026-01-22 15:11 UTC|newest]

Thread overview: 7+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-01-22 15:11 [pbs-devel] [PATCH proxmox{, -backup} 0/6] fix #6716: Add support for http proxy configuration for S3 endpoints Christian Ebner
2026-01-22 15:11 ` [pbs-devel] [PATCH proxmox 1/2] pbs-api-types: make operation non-optional for maintenance-mode check Christian Ebner
2026-01-22 15:11 ` [pbs-devel] [PATCH proxmox 2/2] s3-client: add proxy configuration as optional client option Christian Ebner
2026-01-22 15:11 ` [pbs-devel] [PATCH proxmox-backup 1/4] datastore: make operation non-optional in lookups Christian Ebner
2026-01-22 15:11 ` [pbs-devel] [PATCH proxmox-backup 2/4] tools: factor out node proxy config read helper Christian Ebner
2026-01-22 15:11 ` [pbs-devel] [PATCH proxmox-backup 3/4] datastore: refactor datastore lookup parameters into dedicated type Christian Ebner
2026-01-22 15:11 ` Christian Ebner [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260122151125.832787-7-c.ebner@proxmox.com \
    --to=c.ebner@proxmox.com \
    --cc=pbs-devel@lists.proxmox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox
Service provided by Proxmox Server Solutions GmbH | Privacy | Legal