From: Christian Ebner <c.ebner@proxmox.com>
To: pbs-devel@lists.proxmox.com
Subject: [pbs-devel] [PATCH v3 proxmox-backup 7/7] server: push: prefix log messages and add additional logging
Date: Tue, 18 Mar 2025 13:24:23 +0100 [thread overview]
Message-ID: <20250318122423.385684-8-c.ebner@proxmox.com> (raw)
In-Reply-To: <20250318122423.385684-1-c.ebner@proxmox.com>
Pushing groups and therefore also snapshots in parallel leads to
unordered log outputs, making it mostly impossible to relate a log
message to a backup snapshot/group.
Therefore, prefix push job log messages by the corresponding group or
snapshot.
Also, be more verbose for push syncs, adding additional log output
for the groups, snapshots and archives being pushed.
Signed-off-by: Christian Ebner <c.ebner@proxmox.com>
---
changes since version 2:
- no changes
src/server/push.rs | 49 ++++++++++++++++++++++++++++++++++++----------
1 file changed, 39 insertions(+), 10 deletions(-)
diff --git a/src/server/push.rs b/src/server/push.rs
index 4188955f5..17fc411fe 100644
--- a/src/server/push.rs
+++ b/src/server/push.rs
@@ -25,6 +25,8 @@ use pbs_datastore::index::IndexFile;
use pbs_datastore::read_chunk::AsyncReadChunk;
use pbs_datastore::{DataStore, StoreProgress};
+use proxmox_human_byte::HumanByte;
+
use super::sync::{
check_namespace_depth_limit, LocalSource, RemovedVanishedStats, SkipInfo, SkipReason,
SyncSource, SyncStats,
@@ -701,6 +703,7 @@ pub(crate) async fn push_group(
group: &BackupGroup,
store_progress: Arc<Mutex<StoreProgress>>,
) -> Result<SyncStats, Error> {
+ let prefix = format!("Group {group}");
let mut already_synced_skip_info = SkipInfo::new(SkipReason::AlreadySynced);
let mut transfer_last_skip_info = SkipInfo::new(SkipReason::TransferLast);
@@ -746,11 +749,11 @@ pub(crate) async fn push_group(
.collect();
if already_synced_skip_info.count > 0 {
- info!("{already_synced_skip_info}");
+ info!("{prefix}: {already_synced_skip_info}");
already_synced_skip_info.reset();
}
if transfer_last_skip_info.count > 0 {
- info!("{transfer_last_skip_info}");
+ info!("{prefix}: {transfer_last_skip_info}");
transfer_last_skip_info.reset();
}
@@ -760,6 +763,7 @@ pub(crate) async fn push_group(
let mut stats = SyncStats::default();
let mut fetch_previous_manifest = !target_snapshots.is_empty();
for (pos, source_snapshot) in snapshots.into_iter().enumerate() {
+ info!("Snapshot {source_snapshot}: start sync");
let result =
push_snapshot(params, namespace, &source_snapshot, fetch_previous_manifest).await;
fetch_previous_manifest = true;
@@ -768,10 +772,11 @@ pub(crate) async fn push_group(
local_progress.done_snapshots = pos as u64 + 1;
// Update done groups progress by other parallel running pushes
local_progress.done_groups = store_progress.lock().unwrap().done_groups;
- info!("Percentage done: {local_progress}");
// stop on error
let sync_stats = result?;
+ info!("Snapshot {source_snapshot}: sync done");
+ info!("Percentage done: {local_progress}");
stats.add(sync_stats);
}
@@ -782,7 +787,7 @@ pub(crate) async fn push_group(
}
if snapshot.protected {
info!(
- "Kept protected snapshot {name} on remote",
+ "{prefix}: Kept protected snapshot {name} on remote",
name = snapshot.backup
);
continue;
@@ -790,14 +795,14 @@ pub(crate) async fn push_group(
match forget_target_snapshot(params, &target_namespace, &snapshot.backup).await {
Ok(()) => {
info!(
- "Removed vanished snapshot {name} from remote",
+ "{prefix}: Removed vanished snapshot {name} from remote",
name = snapshot.backup
);
}
Err(err) => {
- warn!("Encountered errors: {err:#}");
+ warn!("{prefix}: Encountered errors: {err:#}");
warn!(
- "Failed to remove vanished snapshot {name} from remote!",
+ "{prefix}: Failed to remove vanished snapshot {name} from remote!",
name = snapshot.backup
);
}
@@ -825,6 +830,7 @@ pub(crate) async fn push_snapshot(
snapshot: &BackupDir,
fetch_previous_manifest: bool,
) -> Result<SyncStats, Error> {
+ let prefix = format!("Snapshot {snapshot}");
let mut stats = SyncStats::default();
let target_ns = params.map_to_target(namespace)?;
let backup_dir = params
@@ -840,8 +846,8 @@ pub(crate) async fn push_snapshot(
Ok((manifest, _raw_size)) => manifest,
Err(err) => {
// No manifest in snapshot or failed to read, warn and skip
- log::warn!("Encountered errors: {err:#}");
- log::warn!("Failed to load manifest for '{snapshot}'!");
+ warn!("{prefix}: Encountered errors: {err:#}");
+ warn!("{prefix}: Failed to load manifest!");
return Ok(stats);
}
};
@@ -863,7 +869,7 @@ pub(crate) async fn push_snapshot(
if fetch_previous_manifest {
match backup_writer.download_previous_manifest().await {
Ok(manifest) => previous_manifest = Some(Arc::new(manifest)),
- Err(err) => log::info!("Could not download previous manifest - {err}"),
+ Err(err) => info!("{prefix}: Could not download previous manifest - {err}"),
}
};
@@ -892,12 +898,21 @@ pub(crate) async fn push_snapshot(
path.push(&entry.filename);
if path.try_exists()? {
let archive_name = BackupArchiveName::from_path(&entry.filename)?;
+ info!("{prefix}: sync archive {archive_name}");
+ let prefix = format!("Snapshot {snapshot}: archive {archive_name}");
match archive_name.archive_type() {
ArchiveType::Blob => {
let file = std::fs::File::open(&path)?;
let backup_stats = backup_writer
.upload_blob(file, archive_name.as_ref())
.await?;
+ info!(
+ "{prefix}: uploaded {} ({}/s)",
+ HumanByte::from(backup_stats.size),
+ HumanByte::new_binary(
+ backup_stats.size as f64 / backup_stats.duration.as_secs_f64()
+ ),
+ );
stats.add(SyncStats {
chunk_count: backup_stats.chunk_count as usize,
bytes: backup_stats.size as usize,
@@ -927,6 +942,13 @@ pub(crate) async fn push_snapshot(
known_chunks.clone(),
)
.await?;
+ info!(
+ "{prefix}: uploaded {} ({}/s)",
+ HumanByte::from(sync_stats.bytes),
+ HumanByte::new_binary(
+ sync_stats.bytes as f64 / sync_stats.elapsed.as_secs_f64()
+ ),
+ );
stats.add(sync_stats);
}
ArchiveType::FixedIndex => {
@@ -952,6 +974,13 @@ pub(crate) async fn push_snapshot(
known_chunks.clone(),
)
.await?;
+ info!(
+ "{prefix}: uploaded {} ({}/s)",
+ HumanByte::from(sync_stats.bytes),
+ HumanByte::new_binary(
+ sync_stats.bytes as f64 / sync_stats.elapsed.as_secs_f64()
+ ),
+ );
stats.add(sync_stats);
}
}
--
2.39.5
_______________________________________________
pbs-devel mailing list
pbs-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
next prev parent reply other threads:[~2025-03-18 12:24 UTC|newest]
Thread overview: 9+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-03-18 12:24 [pbs-devel] [PATCH v3 proxmox proxmox-backup 0/7] fix #4182: concurrent group pull/push support for sync jobs Christian Ebner
2025-03-18 12:24 ` [pbs-devel] [PATCH v3 proxmox 1/7] pbs api types: add 'parallel-groups' to sync job config Christian Ebner
2025-03-18 12:24 ` [pbs-devel] [PATCH v3 proxmox-backup 2/7] client: backup writer: fix upload stats size and rate for push sync Christian Ebner
2025-03-18 12:24 ` [pbs-devel] [PATCH v3 proxmox-backup 3/7] api: config/sync: add optional `parallel-groups` property Christian Ebner
2025-03-18 12:24 ` [pbs-devel] [PATCH v3 proxmox-backup 4/7] fix #4182: server: sync: allow pulling groups concurrently Christian Ebner
2025-03-18 12:24 ` [pbs-devel] [PATCH v3 proxmox-backup 5/7] server: pull: prefix log messages and add error context Christian Ebner
2025-03-18 12:24 ` [pbs-devel] [PATCH v3 proxmox-backup 6/7] server: sync: allow pushing groups concurrently Christian Ebner
2025-03-18 12:24 ` Christian Ebner [this message]
2025-04-04 13:51 ` [pbs-devel] superseded: [PATCH v3 proxmox proxmox-backup 0/7] fix #4182: concurrent group pull/push support for sync jobs Christian Ebner
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250318122423.385684-8-c.ebner@proxmox.com \
--to=c.ebner@proxmox.com \
--cc=pbs-devel@lists.proxmox.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox
Service provided by Proxmox Server Solutions GmbH | Privacy | Legal