From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [212.224.123.68]) by lore.proxmox.com (Postfix) with ESMTPS id B799B1FF178 for ; Mon, 15 Dec 2025 14:24:22 +0100 (CET) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id A2410BC56; Mon, 15 Dec 2025 14:25:07 +0100 (CET) From: Christian Ebner To: pbs-devel@lists.proxmox.com Date: Mon, 15 Dec 2025 14:24:54 +0100 Message-ID: <20251215132454.628090-1-c.ebner@proxmox.com> X-Mailer: git-send-email 2.47.3 MIME-Version: 1.0 X-Bm-Milter-Handled: 55990f41-d878-4baa-be0a-ee34c49e34d2 X-Bm-Transport-Timestamp: 1765805097437 X-SPAM-LEVEL: Spam detection results: 0 AWL 0.048 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record Subject: [pbs-devel] [PATCH proxmox-backup] GC: log progress output for phase 2 on datastores backed by s3 X-BeenThere: pbs-devel@lists.proxmox.com X-Mailman-Version: 2.1.29 Precedence: list List-Id: Proxmox Backup Server development discussion List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: Proxmox Backup Server development discussion Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Errors-To: pbs-devel-bounces@lists.proxmox.com Sender: "pbs-devel" Currently there is no progress output for phase 2 of garbage collection on datastores with s3 backend. Since spamming of the task log must be avoided and the number of chunks to be processed cannot be inferred neither during phase 1, nor by the S3 API, use a time based approach for this. Spawn a new thread for logging and keep track of the processed chunks by an atomic counter. The progress handle and a channel are used to abort the logging thread in case garbage collection is finished or an abort has been triggered. Signed-off-by: Christian Ebner --- pbs-datastore/Cargo.toml | 1 + pbs-datastore/src/datastore.rs | 47 +++++++++++++++++++++++++++++++--- 2 files changed, 45 insertions(+), 3 deletions(-) diff --git a/pbs-datastore/Cargo.toml b/pbs-datastore/Cargo.toml index 8ce930a94..80e06cb0e 100644 --- a/pbs-datastore/Cargo.toml +++ b/pbs-datastore/Cargo.toml @@ -38,6 +38,7 @@ proxmox-http.workspace = true proxmox-human-byte.workspace = true proxmox-io.workspace = true proxmox-lang.workspace=true +proxmox-log.workspace=true proxmox-s3-client = { workspace = true, features = [ "impl" ] } proxmox-schema = { workspace = true, features = [ "api-macro" ] } proxmox-serde = { workspace = true, features = [ "serde_json" ] } diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs index 9c57aaac1..84866a529 100644 --- a/pbs-datastore/src/datastore.rs +++ b/pbs-datastore/src/datastore.rs @@ -3,6 +3,8 @@ use std::io::{self, Write}; use std::os::unix::ffi::OsStrExt; use std::os::unix::io::AsRawFd; use std::path::{Path, PathBuf}; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::mpsc::RecvTimeoutError; use std::sync::{Arc, LazyLock, Mutex}; use std::time::{Duration, SystemTime}; @@ -15,6 +17,7 @@ use tokio::io::AsyncWriteExt; use tracing::{info, warn}; use proxmox_human_byte::HumanByte; +use proxmox_log::LogContext; use proxmox_s3_client::{ S3Client, S3ClientConf, S3ClientOptions, S3ObjectKey, S3PathPrefix, S3RateLimiterOptions, }; @@ -1682,7 +1685,7 @@ impl DataStore { info!("Start GC phase2 (sweep unused chunks)"); if let Some(ref s3_client) = s3_client { - let mut chunk_count = 0; + let chunk_count = Arc::new(AtomicUsize::new(0)); let prefix = S3PathPrefix::Some(".chunks/".to_string()); // Operates in batches of 1000 objects max per request let mut list_bucket_result = @@ -1692,6 +1695,34 @@ impl DataStore { let mut delete_list = Vec::with_capacity(S3_DELETE_BATCH_LIMIT); let mut delete_list_age = epoch_i64(); + let (abort_tx, abort_rx) = std::sync::mpsc::channel(); + let counter = Arc::clone(&chunk_count); + let log_context = LogContext::current(); + + let progress_log_handle = std::thread::spawn(move || { + if let Some(log_context) = log_context { + log_context.sync_scope(|| { + let mut timeout = Duration::from_secs(60); + let mut iterations = 0; + while let Err(RecvTimeoutError::Timeout) = abort_rx.recv_timeout(timeout) { + let count = counter.load(Ordering::SeqCst); + info!("sweeped chunks: {count}"); + + iterations += 1; + if iterations >= 60 && timeout < Duration::from_secs(60 * 60) { + timeout = timeout.saturating_mul(60); + info!("log output reduced to once per hour"); + } else if iterations >= 60 * 60 + && timeout < Duration::from_secs(24 * 60 * 60) + { + timeout = timeout.saturating_mul(24); + info!("log output reduced to once per day"); + } + } + }) + } + }); + let s3_delete_batch = |delete_list: &mut Vec<(S3ObjectKey, BackupLockGuard)>, s3_client: &Arc| -> Result<(), Error> { @@ -1800,7 +1831,7 @@ impl DataStore { ); } - chunk_count += 1; + chunk_count.fetch_add(1, Ordering::SeqCst); // drop guard because of async S3 call below drop(_guard); @@ -1831,7 +1862,17 @@ impl DataStore { s3_delete_batch(&mut delete_list, s3_client)?; } - info!("processed {chunk_count} total chunks"); + abort_tx + .send(()) + .context("failed to abort progress logging")?; + progress_log_handle + .join() + .map_err(|_err| format_err!("failed to join progress log handle"))?; + + info!( + "processed {} total chunks", + chunk_count.load(Ordering::SeqCst), + ); // Phase 2 GC of Filesystem backed storage is phase 3 for S3 backed GC info!("Start GC phase3 (sweep unused chunk markers)"); -- 2.47.3 _______________________________________________ pbs-devel mailing list pbs-devel@lists.proxmox.com https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel