From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [IPv6:2a01:7e0:0:424::9]) by lore.proxmox.com (Postfix) with ESMTPS id DAD201FF13B for ; Wed, 22 Apr 2026 15:19:15 +0200 (CEST) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id 46A8C1F8F9; Wed, 22 Apr 2026 15:19:14 +0200 (CEST) From: Christian Ebner To: pbs-devel@lists.proxmox.com Subject: [PATCH proxmox-backup v8 09/10] sync: move in-progress snapshot filter to helper and use log line sender Date: Wed, 22 Apr 2026 15:18:19 +0200 Message-ID: <20260422131820.769620-10-c.ebner@proxmox.com> X-Mailer: git-send-email 2.47.3 In-Reply-To: <20260422131820.769620-1-c.ebner@proxmox.com> References: <20260422131820.769620-1-c.ebner@proxmox.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Bm-Milter-Handled: 55990f41-d878-4baa-be0a-ee34c49e34d2 X-Bm-Transport-Timestamp: 1776863831475 X-SPAM-LEVEL: Spam detection results: 0 AWL 0.071 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record Message-ID-Hash: SK6CYLI6MHCXOLFTNLMRI4YKS5VJ2A7X X-Message-ID-Hash: SK6CYLI6MHCXOLFTNLMRI4YKS5VJ2A7X X-MailFrom: c.ebner@proxmox.com X-Mailman-Rule-Misses: dmarc-mitigation; no-senders; approved; loop; banned-address; emergency; member-moderation; nonmember-moderation; administrivia; implicit-dest; max-recipients; max-size; news-moderation; no-subject; digests; suspicious-header X-Mailman-Version: 3.3.10 Precedence: list List-Id: Proxmox Backup Server development discussion List-Help: List-Owner: List-Post: List-Subscribe: List-Unsubscribe: Currently, in-progress snapshots are being filtered out from the list of source snapshots by pre-filtering and logging skipped snapshots after gathering the list. For parallel sync jobs, logging now requires however to go through the BufferedLogger, by sending the logs via the LogLineSender. This however requires to await inside an async context, which cannot happen within the filter_map() closure. Therefore, factor out the filtering to a dedicated helper in order to avoid pollution of the SyncSource trait with a completely unrelated parameter and refactor the filtering within that helper so the logging can happen in async context. Signed-off-by: Christian Ebner --- src/server/pull.rs | 3 ++- src/server/push.rs | 3 ++- src/server/sync.rs | 39 ++++++++++++++++++++++++++------------- 3 files changed, 30 insertions(+), 15 deletions(-) diff --git a/src/server/pull.rs b/src/server/pull.rs index 97def85a5..47c568376 100644 --- a/src/server/pull.rs +++ b/src/server/pull.rs @@ -31,7 +31,7 @@ use pbs_tools::buffered_logger::{BufferedLogger, LogLineSender}; use pbs_tools::sha::sha256; use super::sync::{ - check_namespace_depth_limit, exclude_not_verified_or_encrypted, + check_namespace_depth_limit, exclude_not_verified_or_encrypted, filter_out_in_progress, ignore_not_verified_or_encrypted, LocalSource, RemoteSource, RemovedVanishedStats, SkipInfo, SkipReason, SyncSource, SyncSourceReader, SyncStats, }; @@ -732,6 +732,7 @@ async fn pull_group( .source .list_backup_snapshots(source_namespace, group) .await?; + raw_list = filter_out_in_progress(raw_list, Arc::clone(&log_sender)).await?; raw_list.sort_unstable_by_key(|a| a.backup.time); let target_ns = source_namespace.map_prefix(¶ms.source.get_ns(), ¶ms.target.ns)?; diff --git a/src/server/push.rs b/src/server/push.rs index 2ff46211c..1fbb82ebe 100644 --- a/src/server/push.rs +++ b/src/server/push.rs @@ -34,7 +34,7 @@ use pbs_tools::buffered_logger::{BufferedLogger, LogLineSender}; use proxmox_human_byte::HumanByte; use super::sync::{ - check_namespace_depth_limit, exclude_not_verified_or_encrypted, + check_namespace_depth_limit, exclude_not_verified_or_encrypted, filter_out_in_progress, ignore_not_verified_or_encrypted, LocalSource, RemovedVanishedStats, SkipInfo, SkipReason, SyncSource, SyncStats, }; @@ -777,6 +777,7 @@ pub(crate) async fn push_group( .source .list_backup_snapshots(namespace, group) .await?; + snapshots = filter_out_in_progress(snapshots, Arc::clone(&log_sender)).await?; snapshots.sort_unstable_by_key(|a| a.backup.time); if snapshots.is_empty() { diff --git a/src/server/sync.rs b/src/server/sync.rs index 17ed4839f..4827dc3f2 100644 --- a/src/server/sync.rs +++ b/src/server/sync.rs @@ -13,7 +13,7 @@ use futures::{future::FutureExt, select}; use hyper::http::StatusCode; use pbs_config::BackupLockGuard; use serde_json::json; -use tracing::{info, warn}; +use tracing::{info, warn, Level}; use proxmox_human_byte::HumanByte; use proxmox_rest_server::WorkerTask; @@ -28,6 +28,7 @@ use pbs_client::{BackupReader, BackupRepository, HttpClient, RemoteChunkReader}; use pbs_datastore::data_blob::DataBlob; use pbs_datastore::read_chunk::AsyncReadChunk; use pbs_datastore::{BackupManifest, DataStore, ListNamespacesRecursive, LocalChunkReader}; +use pbs_tools::buffered_logger::LogLineSender; use crate::backup::ListAccessibleBackupGroups; use crate::server::jobstate::Job; @@ -375,18 +376,7 @@ impl SyncSource for RemoteSource { let mut result = self.client.get(&path, Some(args)).await?; let snapshot_list: Vec = serde_json::from_value(result["data"].take())?; - Ok(snapshot_list - .into_iter() - .filter_map(|item: SnapshotListItem| { - // in-progress backups can't be synced - if item.size.is_none() { - info!("skipping snapshot {} - in-progress backup", item.backup); - return None; - } - - Some(item) - }) - .collect::>()) + Ok(snapshot_list) } fn get_ns(&self) -> BackupNamespace { @@ -736,6 +726,29 @@ pub fn do_sync_job( Ok(upid_str) } +pub(super) async fn filter_out_in_progress( + snapshots: Vec, + log_sender: Arc, +) -> Result, Error> { + let mut filtered = Vec::with_capacity(snapshots.len()); + + for item in snapshots { + // in-progress backups can't be synced + if item.size.is_none() { + log_sender + .log( + Level::INFO, + format!("skipping snapshot {} - in-progress backup", item.backup), + ) + .await?; + } else { + filtered.push(item); + } + } + + Ok(filtered) +} + pub(super) fn ignore_not_verified_or_encrypted( manifest: &BackupManifest, snapshot: &BackupDir, -- 2.47.3