From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [212.224.123.68]) by lore.proxmox.com (Postfix) with ESMTPS id 370741FF138 for ; Wed, 04 Feb 2026 16:27:13 +0100 (CET) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id 32922183E8; Wed, 4 Feb 2026 16:27:42 +0100 (CET) From: Lukas Wagner To: pdm-devel@lists.proxmox.com Subject: [PATCH datacenter-manager 2/5] parallel fetcher: make sure to inherit log context Date: Wed, 4 Feb 2026 16:27:20 +0100 Message-ID: <20260204152723.482258-3-l.wagner@proxmox.com> X-Mailer: git-send-email 2.47.3 In-Reply-To: <20260204152723.482258-1-l.wagner@proxmox.com> References: <20260204152723.482258-1-l.wagner@proxmox.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Bm-Milter-Handled: 55990f41-d878-4baa-be0a-ee34c49e34d2 X-Bm-Transport-Timestamp: 1770218781025 X-SPAM-LEVEL: Spam detection results: 0 AWL 0.036 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment RCVD_IN_VALIDITY_CERTIFIED_BLOCKED 0.001 ADMINISTRATOR NOTICE: The query to Validity was blocked. See https://knowledge.validity.com/hc/en-us/articles/20961730681243 for more information. RCVD_IN_VALIDITY_RPBL_BLOCKED 0.001 ADMINISTRATOR NOTICE: The query to Validity was blocked. See https://knowledge.validity.com/hc/en-us/articles/20961730681243 for more information. RCVD_IN_VALIDITY_SAFE_BLOCKED 0.001 ADMINISTRATOR NOTICE: The query to Validity was blocked. See https://knowledge.validity.com/hc/en-us/articles/20961730681243 for more information. SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record Message-ID-Hash: X3ME4LEF3CNVPS4AZHZTPP25EXBAG4K5 X-Message-ID-Hash: X3ME4LEF3CNVPS4AZHZTPP25EXBAG4K5 X-MailFrom: l.wagner@proxmox.com X-Mailman-Rule-Misses: dmarc-mitigation; no-senders; approved; loop; banned-address; emergency; member-moderation; nonmember-moderation; administrivia; implicit-dest; max-recipients; max-size; news-moderation; no-subject; digests; suspicious-header X-Mailman-Version: 3.3.10 Precedence: list List-Id: Proxmox Datacenter Manager development discussion List-Help: List-Owner: List-Post: List-Subscribe: List-Unsubscribe: If ParallelFetcher is used in a worker task, we need to ensure to inherit the log context from the current task, otherwise log messages printed in the handler function are not printed to the task log. Signed-off-by: Lukas Wagner --- server/src/parallel_fetcher.rs | 33 +++++++++++++++++++++++++-------- 1 file changed, 25 insertions(+), 8 deletions(-) diff --git a/server/src/parallel_fetcher.rs b/server/src/parallel_fetcher.rs index 7fb0d162..f07a2de3 100644 --- a/server/src/parallel_fetcher.rs +++ b/server/src/parallel_fetcher.rs @@ -8,6 +8,7 @@ use anyhow::Error; use tokio::sync::{OwnedSemaphorePermit, Semaphore}; use tokio::task::JoinSet; +use proxmox_log::LogContext; use pve_api_types::ClusterNodeIndexResponse; use pdm_api_types::remotes::{Remote, RemoteType}; @@ -82,14 +83,19 @@ impl ParallelFetcher { let semaphore = Arc::clone(&total_connections_semaphore); let f = func.clone(); - - remote_join_set.spawn(Self::fetch_remote( + let future = Self::fetch_remote( remote, self.context.clone(), semaphore, f, self.max_connections_per_remote, - )); + ); + + if let Some(log_context) = LogContext::current() { + remote_join_set.spawn(log_context.scope(future)); + } else { + remote_join_set.spawn(future); + } } let mut results = FetchResults::default(); @@ -160,14 +166,20 @@ impl ParallelFetcher { let node_name = node.node.clone(); let context_clone = context.clone(); - nodes_join_set.spawn(Self::fetch_node( + let future = Self::fetch_node( func_clone, context_clone, remote_clone, node_name, permit, Some(per_remote_connections_permit), - )); + ); + + if let Some(log_context) = LogContext::current() { + nodes_join_set.spawn(log_context.scope(future)); + } else { + nodes_join_set.spawn(future); + } } while let Some(join_result) = nodes_join_set.join_next().await { @@ -251,15 +263,20 @@ impl ParallelFetcher { let remote_id = remote.id.clone(); let context = self.context.clone(); let func = func.clone(); - - node_join_set.spawn(async move { + let future = async move { let permit = total_connections_semaphore.acquire_owned().await.unwrap(); ( remote_id, Self::fetch_node(func, context, remote, "localhost".into(), permit, None).await, ) - }); + }; + + if let Some(log_context) = LogContext::current() { + node_join_set.spawn(log_context.scope(future)); + } else { + node_join_set.spawn(future); + } } while let Some(a) = node_join_set.join_next().await { -- 2.47.3