From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [IPv6:2a01:7e0:0:424::9]) by lore.proxmox.com (Postfix) with ESMTPS id CADC61FF13E for ; Fri, 17 Apr 2026 11:26:43 +0200 (CEST) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id CEDF41BA25; Fri, 17 Apr 2026 11:26:42 +0200 (CEST) From: Christian Ebner To: pbs-devel@lists.proxmox.com Subject: [PATCH proxmox-backup v6 07/15] sync: pull: revert avoiding reinstantiation for encountered chunks map Date: Fri, 17 Apr 2026 11:26:13 +0200 Message-ID: <20260417092621.455374-8-c.ebner@proxmox.com> X-Mailer: git-send-email 2.47.3 In-Reply-To: <20260417092621.455374-1-c.ebner@proxmox.com> References: <20260417092621.455374-1-c.ebner@proxmox.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Bm-Milter-Handled: 55990f41-d878-4baa-be0a-ee34c49e34d2 X-Bm-Transport-Timestamp: 1776417913955 X-SPAM-LEVEL: Spam detection results: 0 AWL 0.070 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record Message-ID-Hash: N56LQNHR6GSHX376LS22PA3RDBF435SW X-Message-ID-Hash: N56LQNHR6GSHX376LS22PA3RDBF435SW X-MailFrom: c.ebner@proxmox.com X-Mailman-Rule-Misses: dmarc-mitigation; no-senders; approved; loop; banned-address; emergency; member-moderation; nonmember-moderation; administrivia; implicit-dest; max-recipients; max-size; news-moderation; no-subject; digests; suspicious-header X-Mailman-Version: 3.3.10 Precedence: list List-Id: Proxmox Backup Server development discussion List-Help: List-Owner: List-Post: List-Subscribe: List-Unsubscribe: While keeping a store wide instance to avoid reinstantiation on each group is desired when iteratively processing groups, this cannot work when performing the sync of multiple groups in parallel. This is in preparation for parallel group syncs and reverts commit ecdec5bc ("sync: pull: avoid reinstantiation for encountered chunks map"). Signed-off-by: Christian Ebner --- changes since version 5: - no changes src/server/pull.rs | 26 +++++--------------------- 1 file changed, 5 insertions(+), 21 deletions(-) diff --git a/src/server/pull.rs b/src/server/pull.rs index ca17eb243..45fe9f8b1 100644 --- a/src/server/pull.rs +++ b/src/server/pull.rs @@ -620,7 +620,6 @@ async fn pull_group( source_namespace: &BackupNamespace, group: &BackupGroup, progress: &mut StoreProgress, - encountered_chunks: Arc>, ) -> Result { let mut already_synced_skip_info = SkipInfo::new(SkipReason::AlreadySynced); let mut transfer_last_skip_info = SkipInfo::new(SkipReason::TransferLast); @@ -721,6 +720,9 @@ async fn pull_group( transfer_last_skip_info.reset(); } + // start with 65536 chunks (up to 256 GiB) + let encountered_chunks = Arc::new(Mutex::new(EncounteredChunks::with_capacity(1024 * 64))); + let backup_group = params .target .store @@ -984,9 +986,6 @@ pub(crate) async fn pull_store(mut params: PullParameters) -> Result Result { errors |= ns_errors; @@ -1066,7 +1065,6 @@ pub(crate) async fn pull_store(mut params: PullParameters) -> Result>, ) -> Result<(StoreProgress, SyncStats, bool), Error> { let list: Vec = params.source.list_groups(namespace, ¶ms.owner).await?; @@ -1125,16 +1123,7 @@ async fn pull_ns( ); errors = true; // do not stop here, instead continue } else { - encountered_chunks.lock().unwrap().clear(); - match pull_group( - params, - namespace, - &group, - &mut progress, - encountered_chunks.clone(), - ) - .await - { + match pull_group(params, namespace, &group, &mut progress).await { Ok(stats) => sync_stats.add(stats), Err(err) => { info!("sync group {} failed - {err:#}", &group); @@ -1255,9 +1244,4 @@ impl EncounteredChunks { } } } - - /// Clear all entries - fn clear(&mut self) { - self.chunk_set.clear(); - } } -- 2.47.3