From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [IPv6:2a01:7e0:0:424::9]) by lore.proxmox.com (Postfix) with ESMTPS id C4B061FF164 for ; Wed, 23 Oct 2024 11:10:52 +0200 (CEST) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id 625BA1A786; Wed, 23 Oct 2024 11:11:29 +0200 (CEST) From: Christian Ebner To: pbs-devel@lists.proxmox.com Date: Wed, 23 Oct 2024 11:11:03 +0200 Message-Id: <20241023091103.80792-6-c.ebner@proxmox.com> X-Mailer: git-send-email 2.39.5 In-Reply-To: <20241023091103.80792-1-c.ebner@proxmox.com> References: <20241023091103.80792-1-c.ebner@proxmox.com> MIME-Version: 1.0 X-SPAM-LEVEL: Spam detection results: 0 AWL 0.028 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record Subject: [pbs-devel] [PATCH v2 proxmox-backup 5/5] client: progress log: allow to specify backup log interval X-BeenThere: pbs-devel@lists.proxmox.com X-Mailman-Version: 2.1.29 Precedence: list List-Id: Proxmox Backup Server development discussion List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: Proxmox Backup Server development discussion Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Errors-To: pbs-devel-bounces@lists.proxmox.com Sender: "pbs-devel" Adds the optional parameters `progress-interval` and `progress-size-interval` which allow to specify the interval to use for backup progress log output. The progress can be specified as time based value given as a `TimeSpan` to `progress-interval` or as size based progress log, giving a `HumanByte` compatible value to `progress-size-interval`. The options are in conflict with each other, only one can be set on invocation. Minimum values of 1s and 100MiB are set for the corresponding variant to protect from excessive output. Values below the respective minimum disable the progress log output althogehter. Examplary client invocations are: - no progress logging: `proxmox-backup-client backup root.pxar:/ --progress-interval=0` - time based progress logging with 1m 30s interval `proxmox-backup-client backup root.pxar:/ --progress-interval="1m 30s"` - size based progress logging with 512MiB interval `proxmox-backup-client backup root.pxar:/ --progress-size-interval=512MiB` Without providing the optional parameter, the current default is set to time based logging with an interval of 1 minute. Signed-off-by: Christian Ebner --- changes since version 1: - Adapt to separate optional parameters for time and size based intervals - Disable logging if given values are below corresponding limits - Define respective minimum values as constants pbs-client/src/backup_writer.rs | 93 +++++++++++++++++++++++++------ proxmox-backup-client/src/main.rs | 54 ++++++++++++++++-- 2 files changed, 126 insertions(+), 21 deletions(-) diff --git a/pbs-client/src/backup_writer.rs b/pbs-client/src/backup_writer.rs index 37ee39e2e..f0b31443d 100644 --- a/pbs-client/src/backup_writer.rs +++ b/pbs-client/src/backup_writer.rs @@ -1,5 +1,6 @@ use std::collections::HashSet; use std::future::Future; +use std::str::FromStr; use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering}; use std::sync::{Arc, Mutex}; use std::time::Instant; @@ -53,6 +54,7 @@ pub struct UploadOptions { pub compress: bool, pub encrypt: bool, pub fixed_size: Option, + pub progress_log_interval: Option, } struct UploadStats { @@ -72,6 +74,19 @@ struct ChunkUploadResponse { size: usize, } +#[derive(Clone)] +pub enum LogInterval { + None, + Size(HumanByte), + Time(TimeSpan), +} + +impl Default for LogInterval { + fn default() -> Self { + Self::Time(TimeSpan::from_str("60s").unwrap()) + } +} + type UploadQueueSender = mpsc::Sender<(MergedChunkInfo, Option)>; type UploadResultReceiver = oneshot::Receiver>; @@ -359,6 +374,7 @@ impl BackupWriter { options.compress, injections, archive, + options.progress_log_interval, ) .await?; @@ -653,6 +669,7 @@ impl BackupWriter { compress: bool, injections: Option>, archive: &str, + progress_log_interval: Option, ) -> impl Future> { let total_chunks = Arc::new(AtomicUsize::new(0)); let total_chunks2 = total_chunks.clone(); @@ -671,6 +688,8 @@ impl BackupWriter { let injected_len = Arc::new(AtomicUsize::new(0)); let injected_len2 = injected_len.clone(); let uploaded_len = Arc::new(AtomicUsize::new(0)); + let uploaded_len2 = uploaded_len.clone(); + let previous_byte_fraction = Arc::new(AtomicUsize::new(0)); let append_chunk_path = format!("{}_index", prefix); let upload_chunk_path = format!("{}_chunk", prefix); @@ -684,23 +703,34 @@ impl BackupWriter { let index_csum = Arc::new(Mutex::new(Some(openssl::sha::Sha256::new()))); let index_csum_2 = index_csum.clone(); - let progress_handle = if archive.ends_with(".img") - || archive.ends_with(".pxar") - || archive.ends_with(".ppxar") - { - Some(tokio::spawn(async move { - loop { - tokio::time::sleep(tokio::time::Duration::from_secs(60)).await; - progress_log( - stream_len3.load(Ordering::SeqCst), - uploaded_len.load(Ordering::SeqCst), - &start_time, - ); + let mut progress_handle = None; + let mut progress_byte_interval = 0; + match progress_log_interval { + Some(LogInterval::Time(ref time_span)) => { + if archive.ends_with(".img") + || archive.ends_with(".pxar") + || archive.ends_with(".ppxar") + { + let duration = std::primitive::f64::from(time_span.clone()); + progress_handle = Some(tokio::spawn(async move { + loop { + tokio::time::sleep(tokio::time::Duration::from_secs_f64(duration)) + .await; + progress_log( + stream_len3.load(Ordering::SeqCst), + uploaded_len.load(Ordering::SeqCst), + &start_time, + ) + } + })) } - })) - } else { - None - }; + } + Some(LogInterval::Size(ref human_byte)) => { + progress_byte_interval = human_byte.as_u64() as usize + } + Some(LogInterval::None) => {} + None => {} + } stream .inject_reused_chunks(injections, stream_len.clone()) @@ -717,6 +747,15 @@ impl BackupWriter { for chunk in chunks { let offset = stream_len.fetch_add(chunk.size() as usize, Ordering::SeqCst) as u64; + + progress_log_by_byte_interval( + progress_byte_interval, + (offset + chunk.size()) as usize, + &previous_byte_fraction, + &uploaded_len2, + &start_time, + ); + reused_len.fetch_add(chunk.size() as usize, Ordering::SeqCst); injected_len.fetch_add(chunk.size() as usize, Ordering::SeqCst); let digest = chunk.digest(); @@ -734,6 +773,14 @@ impl BackupWriter { total_chunks.fetch_add(1, Ordering::SeqCst); let offset = stream_len.fetch_add(chunk_len, Ordering::SeqCst) as u64; + progress_log_by_byte_interval( + progress_byte_interval, + offset as usize + chunk_len, + &previous_byte_fraction, + &uploaded_len2, + &start_time, + ); + let mut chunk_builder = DataChunkBuilder::new(data.as_ref()).compress(compress); if let Some(ref crypt_config) = crypt_config { @@ -922,6 +969,20 @@ impl BackupWriter { } } +#[inline(always)] +fn progress_log_by_byte_interval( + interval: usize, + pos: usize, + previous: &Arc, + uploaded: &Arc, + start_time: &Instant, +) { + if interval > 0 && pos / interval > previous.load(Ordering::SeqCst) { + previous.store(pos / interval, Ordering::SeqCst); + progress_log(pos, uploaded.load(Ordering::SeqCst), start_time); + } +} + #[inline(always)] fn progress_log(size: usize, size_uploaded: usize, start_time: &Instant) { let size = HumanByte::from(size); diff --git a/proxmox-backup-client/src/main.rs b/proxmox-backup-client/src/main.rs index e4034aa99..47afaa446 100644 --- a/proxmox-backup-client/src/main.rs +++ b/proxmox-backup-client/src/main.rs @@ -4,6 +4,7 @@ use std::path::{Path, PathBuf}; use std::pin::Pin; use std::sync::{Arc, Mutex}; use std::task::Context; +use std::time::Duration; use anyhow::{bail, format_err, Error}; use futures::stream::{StreamExt, TryStreamExt}; @@ -15,20 +16,21 @@ use xdg::BaseDirectories; use pathpatterns::{MatchEntry, MatchType, PatternFlag}; use proxmox_async::blocking::TokioWriterAdapter; +use proxmox_human_byte::HumanByte; use proxmox_io::StdChannelWriter; use proxmox_log::init_cli_logger; use proxmox_router::{cli::*, ApiMethod, RpcEnvironment}; use proxmox_schema::api; use proxmox_sys::fs::{file_get_json, image_size, replace_file, CreateOptions}; -use proxmox_time::{epoch_i64, strftime_local}; +use proxmox_time::{epoch_i64, strftime_local, TimeSpan}; use pxar::accessor::aio::Accessor; use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation}; use pbs_api_types::{ Authid, BackupDir, BackupGroup, BackupNamespace, BackupPart, BackupType, ClientRateLimitConfig, CryptMode, Fingerprint, GroupListItem, PruneJobOptions, PruneListItem, RateLimitConfig, - SnapshotListItem, StorageStatus, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, - BACKUP_TYPE_SCHEMA, + SnapshotListItem, StorageStatus, TimeInterval, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, + BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, }; use pbs_client::catalog_shell::Shell; use pbs_client::pxar::{ErrorHandler as PxarErrorHandler, MetadataArchiveReader, PxarPrevRef}; @@ -46,8 +48,8 @@ use pbs_client::tools::{ use pbs_client::{ delete_ticket_info, parse_backup_specification, view_task_result, BackupDetectionMode, BackupReader, BackupRepository, BackupSpecificationType, BackupStats, BackupWriter, - ChunkStream, FixedChunkStream, HttpClient, InjectionData, PxarBackupStream, RemoteChunkReader, - UploadOptions, BACKUP_SOURCE_SCHEMA, + ChunkStream, FixedChunkStream, HttpClient, InjectionData, LogInterval, PxarBackupStream, + RemoteChunkReader, UploadOptions, BACKUP_SOURCE_SCHEMA, }; use pbs_datastore::catalog::{BackupCatalogWriter, CatalogReader, CatalogWriter}; use pbs_datastore::chunk_store::verify_chunk_size; @@ -87,6 +89,9 @@ pub use snapshot::*; mod task; pub use task::*; +const PROGRESS_LOG_TIME_MIN: u64 = 1; +const PROGRESS_LOG_SIZE_MIN: u64 = 100 * 1024 * 1024; + fn record_repository(repo: &BackupRepository) { let base = match BaseDirectories::with_prefix("proxmox-backup") { Ok(v) => v, @@ -734,6 +739,14 @@ fn spawn_catalog_upload( optional: true, default: false, }, + "progress-interval": { + type: TimeInterval, + optional: true, + }, + "progress-size-interval": { + type: HumanByte, + optional: true, + }, } } )] @@ -746,6 +759,8 @@ async fn create_backup( dry_run: bool, skip_e2big_xattr: bool, limit: ClientRateLimitConfig, + progress_interval: Option, + progress_size_interval: Option, _info: &ApiMethod, _rpcenv: &mut dyn RpcEnvironment, ) -> Result { @@ -782,6 +797,33 @@ async fn create_backup( let empty = Vec::new(); let exclude_args = param["exclude"].as_array().unwrap_or(&empty); + let mut progress_log_interval = LogInterval::default(); + if let Some(time_interval) = progress_interval { + if progress_size_interval.is_some() { + bail!("'progress-interval' and 'progress-size-interval' are in conflict"); + } + + if time_interval.as_f64() >= PROGRESS_LOG_TIME_MIN as f64 { + progress_log_interval = LogInterval::Time(time_interval.into()); + } else { + progress_log_interval = LogInterval::None; + log::info!( + "Log interval '{time_interval}' below minimum '{}', progress log disabled.", + TimeSpan::from(Duration::from_secs(PROGRESS_LOG_TIME_MIN)), + ); + } + } else if let Some(human_byte) = progress_size_interval { + if human_byte.as_u64() >= PROGRESS_LOG_SIZE_MIN { + progress_log_interval = LogInterval::Size(human_byte); + } else { + progress_log_interval = LogInterval::None; + log::info!( + "Log interval '{human_byte}' below minimum '{}', progress log disabled.", + HumanByte::from(PROGRESS_LOG_SIZE_MIN), + ); + } + } + let mut pattern_list = Vec::with_capacity(exclude_args.len()); for entry in exclude_args { let entry = entry @@ -1132,6 +1174,7 @@ async fn create_backup( previous_manifest: previous_manifest.clone(), compress: true, encrypt: crypto.mode == CryptMode::Encrypt, + progress_log_interval: Some(progress_log_interval.clone()), ..UploadOptions::default() }; @@ -1169,6 +1212,7 @@ async fn create_backup( fixed_size: Some(size), compress: true, encrypt: crypto.mode == CryptMode::Encrypt, + progress_log_interval: Some(progress_log_interval.clone()), }; let stats = -- 2.39.5 _______________________________________________ pbs-devel mailing list pbs-devel@lists.proxmox.com https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel