From: Christian Ebner <c.ebner@proxmox.com>
To: pbs-devel@lists.proxmox.com
Subject: [pbs-devel] [PATCH v2 proxmox-backup 5/5] client: progress log: allow to specify backup log interval
Date: Wed, 23 Oct 2024 11:11:03 +0200 [thread overview]
Message-ID: <20241023091103.80792-6-c.ebner@proxmox.com> (raw)
In-Reply-To: <20241023091103.80792-1-c.ebner@proxmox.com>
Adds the optional parameters `progress-interval` and
`progress-size-interval` which allow to specify the interval to use
for backup progress log output.
The progress can be specified as time based value given as a
`TimeSpan` to `progress-interval` or as size based progress log,
giving a `HumanByte` compatible value to `progress-size-interval`.
The options are in conflict with each other, only one can be set on
invocation.
Minimum values of 1s and 100MiB are set for the corresponding variant
to protect from excessive output. Values below the respective minimum
disable the progress log output althogehter.
Examplary client invocations are:
- no progress logging:
`proxmox-backup-client backup root.pxar:/ --progress-interval=0`
- time based progress logging with 1m 30s interval
`proxmox-backup-client backup root.pxar:/ --progress-interval="1m 30s"`
- size based progress logging with 512MiB interval
`proxmox-backup-client backup root.pxar:/ --progress-size-interval=512MiB`
Without providing the optional parameter, the current default is set
to time based logging with an interval of 1 minute.
Signed-off-by: Christian Ebner <c.ebner@proxmox.com>
---
changes since version 1:
- Adapt to separate optional parameters for time and size based
intervals
- Disable logging if given values are below corresponding limits
- Define respective minimum values as constants
pbs-client/src/backup_writer.rs | 93 +++++++++++++++++++++++++------
proxmox-backup-client/src/main.rs | 54 ++++++++++++++++--
2 files changed, 126 insertions(+), 21 deletions(-)
diff --git a/pbs-client/src/backup_writer.rs b/pbs-client/src/backup_writer.rs
index 37ee39e2e..f0b31443d 100644
--- a/pbs-client/src/backup_writer.rs
+++ b/pbs-client/src/backup_writer.rs
@@ -1,5 +1,6 @@
use std::collections::HashSet;
use std::future::Future;
+use std::str::FromStr;
use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
use std::sync::{Arc, Mutex};
use std::time::Instant;
@@ -53,6 +54,7 @@ pub struct UploadOptions {
pub compress: bool,
pub encrypt: bool,
pub fixed_size: Option<u64>,
+ pub progress_log_interval: Option<LogInterval>,
}
struct UploadStats {
@@ -72,6 +74,19 @@ struct ChunkUploadResponse {
size: usize,
}
+#[derive(Clone)]
+pub enum LogInterval {
+ None,
+ Size(HumanByte),
+ Time(TimeSpan),
+}
+
+impl Default for LogInterval {
+ fn default() -> Self {
+ Self::Time(TimeSpan::from_str("60s").unwrap())
+ }
+}
+
type UploadQueueSender = mpsc::Sender<(MergedChunkInfo, Option<ChunkUploadResponse>)>;
type UploadResultReceiver = oneshot::Receiver<Result<(), Error>>;
@@ -359,6 +374,7 @@ impl BackupWriter {
options.compress,
injections,
archive,
+ options.progress_log_interval,
)
.await?;
@@ -653,6 +669,7 @@ impl BackupWriter {
compress: bool,
injections: Option<std::sync::mpsc::Receiver<InjectChunks>>,
archive: &str,
+ progress_log_interval: Option<LogInterval>,
) -> impl Future<Output = Result<UploadStats, Error>> {
let total_chunks = Arc::new(AtomicUsize::new(0));
let total_chunks2 = total_chunks.clone();
@@ -671,6 +688,8 @@ impl BackupWriter {
let injected_len = Arc::new(AtomicUsize::new(0));
let injected_len2 = injected_len.clone();
let uploaded_len = Arc::new(AtomicUsize::new(0));
+ let uploaded_len2 = uploaded_len.clone();
+ let previous_byte_fraction = Arc::new(AtomicUsize::new(0));
let append_chunk_path = format!("{}_index", prefix);
let upload_chunk_path = format!("{}_chunk", prefix);
@@ -684,23 +703,34 @@ impl BackupWriter {
let index_csum = Arc::new(Mutex::new(Some(openssl::sha::Sha256::new())));
let index_csum_2 = index_csum.clone();
- let progress_handle = if archive.ends_with(".img")
- || archive.ends_with(".pxar")
- || archive.ends_with(".ppxar")
- {
- Some(tokio::spawn(async move {
- loop {
- tokio::time::sleep(tokio::time::Duration::from_secs(60)).await;
- progress_log(
- stream_len3.load(Ordering::SeqCst),
- uploaded_len.load(Ordering::SeqCst),
- &start_time,
- );
+ let mut progress_handle = None;
+ let mut progress_byte_interval = 0;
+ match progress_log_interval {
+ Some(LogInterval::Time(ref time_span)) => {
+ if archive.ends_with(".img")
+ || archive.ends_with(".pxar")
+ || archive.ends_with(".ppxar")
+ {
+ let duration = std::primitive::f64::from(time_span.clone());
+ progress_handle = Some(tokio::spawn(async move {
+ loop {
+ tokio::time::sleep(tokio::time::Duration::from_secs_f64(duration))
+ .await;
+ progress_log(
+ stream_len3.load(Ordering::SeqCst),
+ uploaded_len.load(Ordering::SeqCst),
+ &start_time,
+ )
+ }
+ }))
}
- }))
- } else {
- None
- };
+ }
+ Some(LogInterval::Size(ref human_byte)) => {
+ progress_byte_interval = human_byte.as_u64() as usize
+ }
+ Some(LogInterval::None) => {}
+ None => {}
+ }
stream
.inject_reused_chunks(injections, stream_len.clone())
@@ -717,6 +747,15 @@ impl BackupWriter {
for chunk in chunks {
let offset =
stream_len.fetch_add(chunk.size() as usize, Ordering::SeqCst) as u64;
+
+ progress_log_by_byte_interval(
+ progress_byte_interval,
+ (offset + chunk.size()) as usize,
+ &previous_byte_fraction,
+ &uploaded_len2,
+ &start_time,
+ );
+
reused_len.fetch_add(chunk.size() as usize, Ordering::SeqCst);
injected_len.fetch_add(chunk.size() as usize, Ordering::SeqCst);
let digest = chunk.digest();
@@ -734,6 +773,14 @@ impl BackupWriter {
total_chunks.fetch_add(1, Ordering::SeqCst);
let offset = stream_len.fetch_add(chunk_len, Ordering::SeqCst) as u64;
+ progress_log_by_byte_interval(
+ progress_byte_interval,
+ offset as usize + chunk_len,
+ &previous_byte_fraction,
+ &uploaded_len2,
+ &start_time,
+ );
+
let mut chunk_builder = DataChunkBuilder::new(data.as_ref()).compress(compress);
if let Some(ref crypt_config) = crypt_config {
@@ -922,6 +969,20 @@ impl BackupWriter {
}
}
+#[inline(always)]
+fn progress_log_by_byte_interval(
+ interval: usize,
+ pos: usize,
+ previous: &Arc<AtomicUsize>,
+ uploaded: &Arc<AtomicUsize>,
+ start_time: &Instant,
+) {
+ if interval > 0 && pos / interval > previous.load(Ordering::SeqCst) {
+ previous.store(pos / interval, Ordering::SeqCst);
+ progress_log(pos, uploaded.load(Ordering::SeqCst), start_time);
+ }
+}
+
#[inline(always)]
fn progress_log(size: usize, size_uploaded: usize, start_time: &Instant) {
let size = HumanByte::from(size);
diff --git a/proxmox-backup-client/src/main.rs b/proxmox-backup-client/src/main.rs
index e4034aa99..47afaa446 100644
--- a/proxmox-backup-client/src/main.rs
+++ b/proxmox-backup-client/src/main.rs
@@ -4,6 +4,7 @@ use std::path::{Path, PathBuf};
use std::pin::Pin;
use std::sync::{Arc, Mutex};
use std::task::Context;
+use std::time::Duration;
use anyhow::{bail, format_err, Error};
use futures::stream::{StreamExt, TryStreamExt};
@@ -15,20 +16,21 @@ use xdg::BaseDirectories;
use pathpatterns::{MatchEntry, MatchType, PatternFlag};
use proxmox_async::blocking::TokioWriterAdapter;
+use proxmox_human_byte::HumanByte;
use proxmox_io::StdChannelWriter;
use proxmox_log::init_cli_logger;
use proxmox_router::{cli::*, ApiMethod, RpcEnvironment};
use proxmox_schema::api;
use proxmox_sys::fs::{file_get_json, image_size, replace_file, CreateOptions};
-use proxmox_time::{epoch_i64, strftime_local};
+use proxmox_time::{epoch_i64, strftime_local, TimeSpan};
use pxar::accessor::aio::Accessor;
use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
use pbs_api_types::{
Authid, BackupDir, BackupGroup, BackupNamespace, BackupPart, BackupType, ClientRateLimitConfig,
CryptMode, Fingerprint, GroupListItem, PruneJobOptions, PruneListItem, RateLimitConfig,
- SnapshotListItem, StorageStatus, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA,
- BACKUP_TYPE_SCHEMA,
+ SnapshotListItem, StorageStatus, TimeInterval, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA,
+ BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA,
};
use pbs_client::catalog_shell::Shell;
use pbs_client::pxar::{ErrorHandler as PxarErrorHandler, MetadataArchiveReader, PxarPrevRef};
@@ -46,8 +48,8 @@ use pbs_client::tools::{
use pbs_client::{
delete_ticket_info, parse_backup_specification, view_task_result, BackupDetectionMode,
BackupReader, BackupRepository, BackupSpecificationType, BackupStats, BackupWriter,
- ChunkStream, FixedChunkStream, HttpClient, InjectionData, PxarBackupStream, RemoteChunkReader,
- UploadOptions, BACKUP_SOURCE_SCHEMA,
+ ChunkStream, FixedChunkStream, HttpClient, InjectionData, LogInterval, PxarBackupStream,
+ RemoteChunkReader, UploadOptions, BACKUP_SOURCE_SCHEMA,
};
use pbs_datastore::catalog::{BackupCatalogWriter, CatalogReader, CatalogWriter};
use pbs_datastore::chunk_store::verify_chunk_size;
@@ -87,6 +89,9 @@ pub use snapshot::*;
mod task;
pub use task::*;
+const PROGRESS_LOG_TIME_MIN: u64 = 1;
+const PROGRESS_LOG_SIZE_MIN: u64 = 100 * 1024 * 1024;
+
fn record_repository(repo: &BackupRepository) {
let base = match BaseDirectories::with_prefix("proxmox-backup") {
Ok(v) => v,
@@ -734,6 +739,14 @@ fn spawn_catalog_upload(
optional: true,
default: false,
},
+ "progress-interval": {
+ type: TimeInterval,
+ optional: true,
+ },
+ "progress-size-interval": {
+ type: HumanByte,
+ optional: true,
+ },
}
}
)]
@@ -746,6 +759,8 @@ async fn create_backup(
dry_run: bool,
skip_e2big_xattr: bool,
limit: ClientRateLimitConfig,
+ progress_interval: Option<TimeInterval>,
+ progress_size_interval: Option<HumanByte>,
_info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
@@ -782,6 +797,33 @@ async fn create_backup(
let empty = Vec::new();
let exclude_args = param["exclude"].as_array().unwrap_or(&empty);
+ let mut progress_log_interval = LogInterval::default();
+ if let Some(time_interval) = progress_interval {
+ if progress_size_interval.is_some() {
+ bail!("'progress-interval' and 'progress-size-interval' are in conflict");
+ }
+
+ if time_interval.as_f64() >= PROGRESS_LOG_TIME_MIN as f64 {
+ progress_log_interval = LogInterval::Time(time_interval.into());
+ } else {
+ progress_log_interval = LogInterval::None;
+ log::info!(
+ "Log interval '{time_interval}' below minimum '{}', progress log disabled.",
+ TimeSpan::from(Duration::from_secs(PROGRESS_LOG_TIME_MIN)),
+ );
+ }
+ } else if let Some(human_byte) = progress_size_interval {
+ if human_byte.as_u64() >= PROGRESS_LOG_SIZE_MIN {
+ progress_log_interval = LogInterval::Size(human_byte);
+ } else {
+ progress_log_interval = LogInterval::None;
+ log::info!(
+ "Log interval '{human_byte}' below minimum '{}', progress log disabled.",
+ HumanByte::from(PROGRESS_LOG_SIZE_MIN),
+ );
+ }
+ }
+
let mut pattern_list = Vec::with_capacity(exclude_args.len());
for entry in exclude_args {
let entry = entry
@@ -1132,6 +1174,7 @@ async fn create_backup(
previous_manifest: previous_manifest.clone(),
compress: true,
encrypt: crypto.mode == CryptMode::Encrypt,
+ progress_log_interval: Some(progress_log_interval.clone()),
..UploadOptions::default()
};
@@ -1169,6 +1212,7 @@ async fn create_backup(
fixed_size: Some(size),
compress: true,
encrypt: crypto.mode == CryptMode::Encrypt,
+ progress_log_interval: Some(progress_log_interval.clone()),
};
let stats =
--
2.39.5
_______________________________________________
pbs-devel mailing list
pbs-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
prev parent reply other threads:[~2024-10-23 9:10 UTC|newest]
Thread overview: 6+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-10-23 9:10 [pbs-devel] [PATCH v2 proxmox proxmox-backup 0/5] backup client progress " Christian Ebner
2024-10-23 9:10 ` [pbs-devel] [PATCH v2 proxmox 1/5] time: fix typos in `TimeSpan` related docstring Christian Ebner
2024-10-23 9:11 ` [pbs-devel] [PATCH v2 proxmox 2/5] time: also implement `From<&TimeSpan> for f64` Christian Ebner
2024-10-23 9:11 ` [pbs-devel] [PATCH v2 proxmox-backup 3/5] api-types: client: add wrapper api type for TimeSpan Christian Ebner
2024-10-23 9:11 ` [pbs-devel] [PATCH v2 proxmox-backup 4/5] client: progress log: factor out log message generation Christian Ebner
2024-10-23 9:11 ` Christian Ebner [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20241023091103.80792-6-c.ebner@proxmox.com \
--to=c.ebner@proxmox.com \
--cc=pbs-devel@lists.proxmox.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox