all lists on lists.proxmox.com
 help / color / mirror / Atom feed
From: Christian Ebner <c.ebner@proxmox.com>
To: pbs-devel@lists.proxmox.com
Subject: [PATCH proxmox-backup v5 10/11] server: push: prefix log messages and add additional logging
Date: Mon,  9 Mar 2026 17:20:49 +0100	[thread overview]
Message-ID: <20260309162050.1047341-12-c.ebner@proxmox.com> (raw)
In-Reply-To: <20260309162050.1047341-1-c.ebner@proxmox.com>

Pushing groups and therefore also snapshots in parallel leads to
unordered log outputs, making it mostly impossible to relate a log
message to a backup snapshot/group.

Therefore, prefix push job log messages by the corresponding group or
snapshot.

Also, be more verbose for push syncs, adding additional log output
for the groups, snapshots and archives being pushed.

Signed-off-by: Christian Ebner <c.ebner@proxmox.com>
---
 src/server/push.rs | 49 ++++++++++++++++++++++++++++++++++++----------
 1 file changed, 39 insertions(+), 10 deletions(-)

diff --git a/src/server/push.rs b/src/server/push.rs
index e7d56cc2a..fe404b8a2 100644
--- a/src/server/push.rs
+++ b/src/server/push.rs
@@ -28,6 +28,8 @@ use pbs_datastore::index::IndexFile;
 use pbs_datastore::read_chunk::AsyncReadChunk;
 use pbs_datastore::{DataStore, StoreProgress};
 
+use proxmox_human_byte::HumanByte;
+
 use super::sync::{
     check_namespace_depth_limit, exclude_not_verified_or_encrypted,
     ignore_not_verified_or_encrypted, LocalSource, RemovedVanishedStats, SkipInfo, SkipReason,
@@ -722,6 +724,7 @@ pub(crate) async fn push_group(
     group: &BackupGroup,
     shared_group_progress: Arc<SharedGroupProgress>,
 ) -> Result<SyncStats, Error> {
+    let prefix = format!("Group {group}");
     let mut already_synced_skip_info = SkipInfo::new(SkipReason::AlreadySynced);
     let mut transfer_last_skip_info = SkipInfo::new(SkipReason::TransferLast);
 
@@ -780,11 +783,11 @@ pub(crate) async fn push_group(
         .collect();
 
     if already_synced_skip_info.count > 0 {
-        info!("{already_synced_skip_info}");
+        info!("{prefix}: {already_synced_skip_info}");
         already_synced_skip_info.reset();
     }
     if transfer_last_skip_info.count > 0 {
-        info!("{transfer_last_skip_info}");
+        info!("{prefix}: {transfer_last_skip_info}");
         transfer_last_skip_info.reset();
     }
 
@@ -794,6 +797,7 @@ pub(crate) async fn push_group(
     let mut stats = SyncStats::default();
     let mut fetch_previous_manifest = !target_snapshots.is_empty();
     for (pos, source_snapshot) in snapshots.into_iter().enumerate() {
+        info!("Snapshot {source_snapshot}: start sync");
         let result = push_snapshot(
             &params,
             namespace,
@@ -806,10 +810,11 @@ pub(crate) async fn push_group(
         // Update done groups progress by other parallel running pushes
         local_progress.done_groups = shared_group_progress.load_done();
         local_progress.done_snapshots = pos as u64 + 1;
-        info!("Percentage done: group {group}: {local_progress}");
 
         // stop on error
         let sync_stats = result?;
+        info!("Snapshot {source_snapshot}: sync done");
+        info!("Percentage done: group {group}: {local_progress}");
         stats.add(sync_stats);
     }
 
@@ -820,7 +825,7 @@ pub(crate) async fn push_group(
             }
             if snapshot.protected {
                 info!(
-                    "Kept protected snapshot {name} on remote",
+                    "{prefix}: Kept protected snapshot {name} on remote",
                     name = snapshot.backup
                 );
                 continue;
@@ -828,14 +833,14 @@ pub(crate) async fn push_group(
             match forget_target_snapshot(&params, &target_namespace, &snapshot.backup).await {
                 Ok(()) => {
                     info!(
-                        "Removed vanished snapshot {name} from remote",
+                        "{prefix}: Removed vanished snapshot {name} from remote",
                         name = snapshot.backup
                     );
                 }
                 Err(err) => {
-                    warn!("Encountered errors: {err:#}");
+                    warn!("{prefix}: Encountered errors: {err:#}");
                     warn!(
-                        "Failed to remove vanished snapshot {name} from remote!",
+                        "{prefix}: Failed to remove vanished snapshot {name} from remote!",
                         name = snapshot.backup
                     );
                 }
@@ -863,6 +868,7 @@ pub(crate) async fn push_snapshot(
     snapshot: &BackupDir,
     fetch_previous_manifest: bool,
 ) -> Result<SyncStats, Error> {
+    let prefix = format!("Snapshot {snapshot}");
     let mut stats = SyncStats::default();
     let target_ns = params.map_to_target(namespace)?;
     let backup_dir = params
@@ -878,8 +884,8 @@ pub(crate) async fn push_snapshot(
         Ok((manifest, _raw_size)) => manifest,
         Err(err) => {
             // No manifest in snapshot or failed to read, warn and skip
-            log::warn!("Encountered errors: {err:#}");
-            log::warn!("Failed to load manifest for '{snapshot}'!");
+            warn!("{prefix}: Encountered errors: {err:#}");
+            warn!("{prefix}: Failed to load manifest!");
             return Ok(stats);
         }
     };
@@ -913,7 +919,7 @@ pub(crate) async fn push_snapshot(
     if fetch_previous_manifest {
         match backup_writer.download_previous_manifest().await {
             Ok(manifest) => previous_manifest = Some(Arc::new(manifest)),
-            Err(err) => log::info!("Could not download previous manifest - {err}"),
+            Err(err) => info!("{prefix}: Could not download previous manifest - {err}"),
         }
     };
 
@@ -942,12 +948,21 @@ pub(crate) async fn push_snapshot(
         path.push(&entry.filename);
         if path.try_exists()? {
             let archive_name = BackupArchiveName::from_path(&entry.filename)?;
+            info!("{prefix}: sync archive {archive_name}");
+            let prefix = format!("Snapshot {snapshot}: archive {archive_name}");
             match archive_name.archive_type() {
                 ArchiveType::Blob => {
                     let file = std::fs::File::open(&path)?;
                     let backup_stats = backup_writer
                         .upload_blob(file, archive_name.as_ref())
                         .await?;
+                    info!(
+                        "{prefix}: uploaded {} ({}/s)",
+                        HumanByte::from(backup_stats.size),
+                        HumanByte::new_binary(
+                            backup_stats.size as f64 / backup_stats.duration.as_secs_f64()
+                        ),
+                    );
                     stats.add(SyncStats {
                         chunk_count: backup_stats.chunk_count as usize,
                         bytes: backup_stats.size as usize,
@@ -979,6 +994,13 @@ pub(crate) async fn push_snapshot(
                         known_chunks.clone(),
                     )
                     .await?;
+                    info!(
+                        "{prefix}: uploaded {} ({}/s)",
+                        HumanByte::from(sync_stats.bytes),
+                        HumanByte::new_binary(
+                            sync_stats.bytes as f64 / sync_stats.elapsed.as_secs_f64()
+                        ),
+                    );
                     stats.add(sync_stats);
                 }
                 ArchiveType::FixedIndex => {
@@ -1006,6 +1028,13 @@ pub(crate) async fn push_snapshot(
                         known_chunks.clone(),
                     )
                     .await?;
+                    info!(
+                        "{prefix}: uploaded {} ({}/s)",
+                        HumanByte::from(sync_stats.bytes),
+                        HumanByte::new_binary(
+                            sync_stats.bytes as f64 / sync_stats.elapsed.as_secs_f64()
+                        ),
+                    );
                     stats.add(sync_stats);
                 }
             }
-- 
2.47.3





  parent reply	other threads:[~2026-03-09 16:21 UTC|newest]

Thread overview: 13+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-03-09 16:20 [PATCH proxmox{,-backup} v5 00/12] fix #4182: concurrent group pull/push support for sync jobs Christian Ebner
2026-03-09 16:20 ` [PATCH proxmox v5 1/1] pbs api types: add `worker-threads` to sync job config Christian Ebner
2026-03-09 16:20 ` [PATCH proxmox-backup v5 01/11] client: backup writer: fix upload stats size and rate for push sync Christian Ebner
2026-03-09 16:20 ` [PATCH proxmox-backup v5 02/11] api: config/sync: add optional `worker-threads` property Christian Ebner
2026-03-09 16:20 ` [PATCH proxmox-backup v5 03/11] sync: pull: revert avoiding reinstantiation for encountered chunks map Christian Ebner
2026-03-09 16:20 ` [PATCH proxmox-backup v5 04/11] sync: pull: factor out backup group locking and owner check Christian Ebner
2026-03-09 16:20 ` [PATCH proxmox-backup v5 05/11] sync: pull: prepare pull parameters to be shared across parallel tasks Christian Ebner
2026-03-09 16:20 ` [PATCH proxmox-backup v5 06/11] fix #4182: server: sync: allow pulling backup groups in parallel Christian Ebner
2026-03-09 16:20 ` [PATCH proxmox-backup v5 07/11] server: pull: prefix log messages and add error context Christian Ebner
2026-03-09 16:20 ` [PATCH proxmox-backup v5 08/11] sync: push: prepare push parameters to be shared across parallel tasks Christian Ebner
2026-03-09 16:20 ` [PATCH proxmox-backup v5 09/11] server: sync: allow pushing groups concurrently Christian Ebner
2026-03-09 16:20 ` Christian Ebner [this message]
2026-03-09 16:20 ` [PATCH proxmox-backup v5 11/11] ui: expose group worker setting in sync job edit window Christian Ebner

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260309162050.1047341-12-c.ebner@proxmox.com \
    --to=c.ebner@proxmox.com \
    --cc=pbs-devel@lists.proxmox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.
Service provided by Proxmox Server Solutions GmbH | Privacy | Legal