From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [IPv6:2a01:7e0:0:424::9]) by lore.proxmox.com (Postfix) with ESMTPS id 14E081FF17A for ; Thu, 22 Jan 2026 16:11:24 +0100 (CET) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id 0F2AC19C9A; Thu, 22 Jan 2026 16:11:43 +0100 (CET) From: Christian Ebner To: pbs-devel@lists.proxmox.com Date: Thu, 22 Jan 2026 16:11:25 +0100 Message-ID: <20260122151125.832787-7-c.ebner@proxmox.com> X-Mailer: git-send-email 2.47.3 In-Reply-To: <20260122151125.832787-1-c.ebner@proxmox.com> References: <20260122151125.832787-1-c.ebner@proxmox.com> MIME-Version: 1.0 X-Bm-Milter-Handled: 55990f41-d878-4baa-be0a-ee34c49e34d2 X-Bm-Transport-Timestamp: 1769094639081 X-SPAM-LEVEL: Spam detection results: 0 AWL 0.048 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record Subject: [pbs-devel] [PATCH proxmox-backup 4/4] fix #6716: pass node http proxy config to s3 backend X-BeenThere: pbs-devel@lists.proxmox.com X-Mailman-Version: 2.1.29 Precedence: list List-Id: Proxmox Backup Server development discussion List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: Proxmox Backup Server development discussion Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Errors-To: pbs-devel-bounces@lists.proxmox.com Sender: "pbs-devel" To avoid passing crate boundaries, reading the http proxy from the node config is not an option and passing it unconditionally must also be avoided to not read the node config on each datastore lookup, also with non-s3 datastores which never need it anyways. Rather pass a callback method to the datastore on instantiation, allowing it's backend implementation to fetch the nodes proxy config when actually needed to instantiate the s3-client. Fixes: https://bugzilla.proxmox.com/show_bug.cgi?id=6716 Signed-off-by: Christian Ebner --- pbs-datastore/src/datastore.rs | 51 +++++++++++++++++---- pbs-datastore/src/snapshot_reader.rs | 14 ++++-- src/api2/admin/s3.rs | 2 + src/api2/config/datastore.rs | 16 +++++-- src/api2/config/s3.rs | 11 ++++- src/tape/pool_writer/new_chunks_iterator.rs | 15 +++--- src/tools/mod.rs | 3 +- 7 files changed, 88 insertions(+), 24 deletions(-) diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs index 6fa533e2f..efd747367 100644 --- a/pbs-datastore/src/datastore.rs +++ b/pbs-datastore/src/datastore.rs @@ -11,6 +11,7 @@ use http_body_util::BodyExt; use hyper::body::Bytes; use nix::unistd::{unlinkat, UnlinkatFlags}; use pbs_tools::lru_cache::LruCache; +use proxmox_http::ProxyConfig; use tokio::io::AsyncWriteExt; use tracing::{info, warn}; @@ -195,20 +196,32 @@ impl DataStoreImpl { } } +pub type ProxyConfigCallback = fn() -> Option; + pub struct DataStoreLookup<'a> { name: &'a str, operation: Operation, + proxy_config_callback: Arc>, } impl<'a> DataStoreLookup<'a> { - pub fn with(name: &'a str, operation: Operation) -> Self { - Self { name, operation } + pub fn with( + name: &'a str, + operation: Operation, + proxy_config_callback: Arc>, + ) -> Self { + Self { + name, + operation, + proxy_config_callback, + } } } pub struct DataStore { inner: Arc, operation: Option, + proxy_config_callback: Arc>, } impl Clone for DataStore { @@ -224,6 +237,7 @@ impl Clone for DataStore { DataStore { inner: self.inner.clone(), operation: new_operation, + proxy_config_callback: Arc::clone(&self.proxy_config_callback), } } } @@ -409,6 +423,7 @@ impl DataStore { Arc::new(Self { inner: unsafe { DataStoreImpl::new_test() }, operation: None, + proxy_config_callback: Arc::new(None), }) } @@ -437,6 +452,7 @@ impl DataStore { user: pbs_config::backup_user()?, base_path: S3_CLIENT_RATE_LIMITER_BASE_PATH.into(), }; + let proxy_config = self.proxy_config_callback.map(|cb| cb()).flatten(); let options = S3ClientOptions::from_config( config.config, @@ -444,6 +460,7 @@ impl DataStore { Some(bucket), self.name().to_owned(), Some(rate_limiter_options), + proxy_config, ); let s3_client = S3Client::new(options)?; DatastoreBackend::S3(Arc::new(s3_client)) @@ -507,6 +524,7 @@ impl DataStore { return Ok(Arc::new(Self { inner: Arc::clone(datastore), operation: Some(lookup.operation), + proxy_config_callback: Arc::clone(&lookup.proxy_config_callback), })); } Arc::clone(&datastore.chunk_store) @@ -532,6 +550,7 @@ impl DataStore { Ok(Arc::new(Self { inner: datastore, operation: Some(lookup.operation), + proxy_config_callback: Arc::clone(&lookup.proxy_config_callback), })) } @@ -557,7 +576,7 @@ impl DataStore { { // the datastore drop handler does the checking if tasks are running and clears the // cache entry, so we just have to trigger it here - let lookup = DataStoreLookup::with(name, Operation::Lookup); + let lookup = DataStoreLookup::with(name, Operation::Lookup, Arc::new(None)); let _ = DataStore::lookup_datastore(lookup); } @@ -617,7 +636,11 @@ impl DataStore { update_active_operations(&name, operation, 1)?; } - Ok(Arc::new(Self { inner, operation })) + Ok(Arc::new(Self { + inner, + operation, + proxy_config_callback: Arc::new(None), + })) } fn with_store_and_config( @@ -2352,7 +2375,11 @@ impl DataStore { /// Destroy a datastore. This requires that there are no active operations on the datastore. /// /// This is a synchronous operation and should be run in a worker-thread. - pub fn destroy(name: &str, destroy_data: bool) -> Result<(), Error> { + pub fn destroy( + name: &str, + destroy_data: bool, + proxy_config_callback: ProxyConfigCallback, + ) -> Result<(), Error> { let config_lock = pbs_config::datastore::lock_config()?; let (mut config, _digest) = pbs_config::datastore::config()?; @@ -2401,9 +2428,10 @@ impl DataStore { } } - if let (_backend, Some(s3_client)) = - Self::s3_client_and_backend_from_datastore_config(&datastore_config)? - { + if let (_backend, Some(s3_client)) = Self::s3_client_and_backend_from_datastore_config( + &datastore_config, + proxy_config_callback, + )? { // Delete all objects within the datastore prefix let prefix = S3PathPrefix::Some(String::default()); let delete_objects_error = @@ -2418,7 +2446,10 @@ impl DataStore { remove(".chunks", &mut ok); } } else if let (_backend, Some(s3_client)) = - Self::s3_client_and_backend_from_datastore_config(&datastore_config)? + Self::s3_client_and_backend_from_datastore_config( + &datastore_config, + proxy_config_callback, + )? { // Only delete in-use marker so datastore can be re-imported let object_key = S3ObjectKey::try_from(S3_DATASTORE_IN_USE_MARKER) @@ -2637,6 +2668,7 @@ impl DataStore { pub fn s3_client_and_backend_from_datastore_config( datastore_config: &DataStoreConfig, + proxy_config_callback: ProxyConfigCallback, ) -> Result<(DatastoreBackendType, Option), Error> { let backend_config: DatastoreBackendConfig = datastore_config.backend.as_deref().unwrap_or("").parse()?; @@ -2671,6 +2703,7 @@ impl DataStore { Some(bucket), datastore_config.name.to_owned(), Some(rate_limiter_options), + proxy_config_callback(), ); let s3_client = S3Client::new(options) .context("failed to create s3 client") diff --git a/pbs-datastore/src/snapshot_reader.rs b/pbs-datastore/src/snapshot_reader.rs index d522a02d7..ddae632cc 100644 --- a/pbs-datastore/src/snapshot_reader.rs +++ b/pbs-datastore/src/snapshot_reader.rs @@ -16,7 +16,7 @@ use pbs_api_types::{ }; use crate::backup_info::BackupDir; -use crate::datastore::DataStoreLookup; +use crate::datastore::{DataStoreLookup, ProxyConfigCallback}; use crate::dynamic_index::DynamicIndexReader; use crate::fixed_index::FixedIndexReader; use crate::index::IndexFile; @@ -125,8 +125,9 @@ impl SnapshotReader { pub fn chunk_iterator bool>( &'_ self, skip_fn: F, + proxy_config_callback: Arc>, ) -> Result, Error> { - SnapshotChunkIterator::new(self, skip_fn) + SnapshotChunkIterator::new(self, skip_fn, proxy_config_callback) } } @@ -139,6 +140,7 @@ pub struct SnapshotChunkIterator<'a, F: Fn(&[u8; 32]) -> bool> { snapshot_reader: &'a SnapshotReader, todo_list: Vec, skip_fn: F, + proxy_config_callback: Arc>, #[allow(clippy::type_complexity)] current_index: Option<(Rc>, usize, Vec<(usize, u64)>)>, } @@ -166,6 +168,7 @@ impl bool> Iterator for SnapshotChunkIterator<'_, F> { let lookup = DataStoreLookup::with( self.snapshot_reader.datastore_name(), Operation::Read, + Arc::clone(&self.proxy_config_callback), ); let datastore = DataStore::lookup_datastore(lookup)?; let order = @@ -192,7 +195,11 @@ impl bool> Iterator for SnapshotChunkIterator<'_, F> { } impl<'a, F: Fn(&[u8; 32]) -> bool> SnapshotChunkIterator<'a, F> { - pub fn new(snapshot_reader: &'a SnapshotReader, skip_fn: F) -> Result { + pub fn new( + snapshot_reader: &'a SnapshotReader, + skip_fn: F, + proxy_config_callback: Arc>, + ) -> Result { let mut todo_list = Vec::new(); for filename in snapshot_reader.file_list() { @@ -209,6 +216,7 @@ impl<'a, F: Fn(&[u8; 32]) -> bool> SnapshotChunkIterator<'a, F> { todo_list, current_index: None, skip_fn, + proxy_config_callback, }) } } diff --git a/src/api2/admin/s3.rs b/src/api2/admin/s3.rs index 73388281b..ea3b15979 100644 --- a/src/api2/admin/s3.rs +++ b/src/api2/admin/s3.rs @@ -47,6 +47,7 @@ pub async fn check( let config: S3ClientConf = config .lookup(S3_CFG_TYPE_ID, &s3_client_id) .context("config lookup failed")?; + let http_proxy = crate::tools::node_proxy_config(); let store_prefix = store_prefix.unwrap_or_default(); let options = S3ClientOptions::from_config( @@ -55,6 +56,7 @@ pub async fn check( Some(bucket), store_prefix, None, + http_proxy, ); let test_object_key = diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs index f845fe2d0..45a251851 100644 --- a/src/api2/config/datastore.rs +++ b/src/api2/config/datastore.rs @@ -128,7 +128,10 @@ pub(crate) fn do_create_datastore( )?; let (backend_type, backend_s3_client) = - match DataStore::s3_client_and_backend_from_datastore_config(&datastore)? { + match DataStore::s3_client_and_backend_from_datastore_config( + &datastore, + crate::tools::node_proxy_config, + )? { (backend_type, Some(s3_client)) => { if !overwrite_in_use { let object_key = S3ObjectKey::try_from(S3_DATASTORE_IN_USE_MARKER) @@ -342,7 +345,10 @@ pub fn create_datastore( let store_name = config.name.to_string(); - let (backend, s3_client) = DataStore::s3_client_and_backend_from_datastore_config(&config)?; + let (backend, s3_client) = DataStore::s3_client_and_backend_from_datastore_config( + &config, + crate::tools::node_proxy_config, + )?; if let Some(s3_client) = s3_client { proxmox_async::runtime::block_on(s3_client.head_bucket()) .context("failed to access bucket") @@ -770,7 +776,11 @@ pub async fn delete_datastore( auth_id.to_string(), to_stdout, move |_worker| { - pbs_datastore::DataStore::destroy(&name, destroy_data)?; + pbs_datastore::DataStore::destroy( + &name, + destroy_data, + crate::tools::node_proxy_config, + )?; // ignore errors let _ = jobstate::remove_state_file("prune", &name); diff --git a/src/api2/config/s3.rs b/src/api2/config/s3.rs index 27b3c4cc2..20508fe33 100644 --- a/src/api2/config/s3.rs +++ b/src/api2/config/s3.rs @@ -348,10 +348,17 @@ pub async fn list_buckets( let config: S3ClientConf = config .lookup(S3_CFG_TYPE_ID, &id) .context("config lookup failed")?; + let http_proxy = crate::tools::node_proxy_config(); let empty_prefix = String::new(); - let options = - S3ClientOptions::from_config(config.config, config.secret_key, None, empty_prefix, None); + let options = S3ClientOptions::from_config( + config.config, + config.secret_key, + None, + empty_prefix, + None, + http_proxy, + ); let client = S3Client::new(options).context("client creation failed")?; let list_buckets_response = client .list_buckets() diff --git a/src/tape/pool_writer/new_chunks_iterator.rs b/src/tape/pool_writer/new_chunks_iterator.rs index 0e29516f8..f8040056a 100644 --- a/src/tape/pool_writer/new_chunks_iterator.rs +++ b/src/tape/pool_writer/new_chunks_iterator.rs @@ -39,12 +39,15 @@ impl NewChunksIterator { let datastore_name = snapshot_reader.datastore_name().to_string(); let result: Result<(), Error> = proxmox_lang::try_block!({ - let chunk_iter = snapshot_reader.chunk_iterator(move |digest| { - catalog_set - .lock() - .unwrap() - .contains_chunk(&datastore_name, digest) - })?; + let chunk_iter = snapshot_reader.chunk_iterator( + move |digest| { + catalog_set + .lock() + .unwrap() + .contains_chunk(&datastore_name, digest) + }, + Arc::new(None), // FIXME: required once S3 <-> tape is implemented + )?; let reader_pool = ParallelHandler::new("tape backup chunk reader pool", read_threads, { diff --git a/src/tools/mod.rs b/src/tools/mod.rs index 4e9f9928c..5f3505417 100644 --- a/src/tools/mod.rs +++ b/src/tools/mod.rs @@ -4,6 +4,7 @@ use anyhow::{bail, Error}; use std::collections::HashSet; +use std::sync::Arc; use pbs_api_types::{ Authid, BackupContent, CryptMode, Operation, SnapshotListItem, SnapshotVerifyState, @@ -203,5 +204,5 @@ pub(super) fn node_proxy_config() -> Option { /// Read the nodes http proxy config from the node config. #[inline(always)] pub fn lookup_with<'a>(name: &'a str, operation: Operation) -> DataStoreLookup<'a> { - DataStoreLookup::with(name, operation) + DataStoreLookup::with(name, operation, Arc::new(Some(node_proxy_config))) } -- 2.47.3 _______________________________________________ pbs-devel mailing list pbs-devel@lists.proxmox.com https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel