From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <pbs-devel-bounces@lists.proxmox.com>
Received: from firstgate.proxmox.com (firstgate.proxmox.com [212.224.123.68])
	by lore.proxmox.com (Postfix) with ESMTPS id 7A6B21FF168
	for <inbox@lore.proxmox.com>; Tue, 18 Mar 2025 13:24:59 +0100 (CET)
Received: from firstgate.proxmox.com (localhost [127.0.0.1])
	by firstgate.proxmox.com (Proxmox) with ESMTP id 016671E8C0;
	Tue, 18 Mar 2025 13:24:45 +0100 (CET)
From: Christian Ebner <c.ebner@proxmox.com>
To: pbs-devel@lists.proxmox.com
Date: Tue, 18 Mar 2025 13:24:23 +0100
Message-Id: <20250318122423.385684-8-c.ebner@proxmox.com>
X-Mailer: git-send-email 2.39.5
In-Reply-To: <20250318122423.385684-1-c.ebner@proxmox.com>
References: <20250318122423.385684-1-c.ebner@proxmox.com>
MIME-Version: 1.0
X-SPAM-LEVEL: Spam detection results:  0
 AWL 0.026 Adjusted score from AWL reputation of From: address
 BAYES_00                 -1.9 Bayes spam probability is 0 to 1%
 DMARC_MISSING             0.1 Missing DMARC policy
 KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment
 SPF_HELO_NONE           0.001 SPF: HELO does not publish an SPF Record
 SPF_PASS               -0.001 SPF: sender matches SPF record
 T_FILL_THIS_FORM_SHORT   0.01 Fill in a short form with personal information
Subject: [pbs-devel] [PATCH v3 proxmox-backup 7/7] server: push: prefix log
 messages and add additional logging
X-BeenThere: pbs-devel@lists.proxmox.com
X-Mailman-Version: 2.1.29
Precedence: list
List-Id: Proxmox Backup Server development discussion
 <pbs-devel.lists.proxmox.com>
List-Unsubscribe: <https://lists.proxmox.com/cgi-bin/mailman/options/pbs-devel>, 
 <mailto:pbs-devel-request@lists.proxmox.com?subject=unsubscribe>
List-Archive: <http://lists.proxmox.com/pipermail/pbs-devel/>
List-Post: <mailto:pbs-devel@lists.proxmox.com>
List-Help: <mailto:pbs-devel-request@lists.proxmox.com?subject=help>
List-Subscribe: <https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel>, 
 <mailto:pbs-devel-request@lists.proxmox.com?subject=subscribe>
Reply-To: Proxmox Backup Server development discussion
 <pbs-devel@lists.proxmox.com>
Content-Type: text/plain; charset="us-ascii"
Content-Transfer-Encoding: 7bit
Errors-To: pbs-devel-bounces@lists.proxmox.com
Sender: "pbs-devel" <pbs-devel-bounces@lists.proxmox.com>

Pushing groups and therefore also snapshots in parallel leads to
unordered log outputs, making it mostly impossible to relate a log
message to a backup snapshot/group.

Therefore, prefix push job log messages by the corresponding group or
snapshot.

Also, be more verbose for push syncs, adding additional log output
for the groups, snapshots and archives being pushed.

Signed-off-by: Christian Ebner <c.ebner@proxmox.com>
---
changes since version 2:
- no changes

 src/server/push.rs | 49 ++++++++++++++++++++++++++++++++++++----------
 1 file changed, 39 insertions(+), 10 deletions(-)

diff --git a/src/server/push.rs b/src/server/push.rs
index 4188955f5..17fc411fe 100644
--- a/src/server/push.rs
+++ b/src/server/push.rs
@@ -25,6 +25,8 @@ use pbs_datastore::index::IndexFile;
 use pbs_datastore::read_chunk::AsyncReadChunk;
 use pbs_datastore::{DataStore, StoreProgress};
 
+use proxmox_human_byte::HumanByte;
+
 use super::sync::{
     check_namespace_depth_limit, LocalSource, RemovedVanishedStats, SkipInfo, SkipReason,
     SyncSource, SyncStats,
@@ -701,6 +703,7 @@ pub(crate) async fn push_group(
     group: &BackupGroup,
     store_progress: Arc<Mutex<StoreProgress>>,
 ) -> Result<SyncStats, Error> {
+    let prefix = format!("Group {group}");
     let mut already_synced_skip_info = SkipInfo::new(SkipReason::AlreadySynced);
     let mut transfer_last_skip_info = SkipInfo::new(SkipReason::TransferLast);
 
@@ -746,11 +749,11 @@ pub(crate) async fn push_group(
         .collect();
 
     if already_synced_skip_info.count > 0 {
-        info!("{already_synced_skip_info}");
+        info!("{prefix}: {already_synced_skip_info}");
         already_synced_skip_info.reset();
     }
     if transfer_last_skip_info.count > 0 {
-        info!("{transfer_last_skip_info}");
+        info!("{prefix}: {transfer_last_skip_info}");
         transfer_last_skip_info.reset();
     }
 
@@ -760,6 +763,7 @@ pub(crate) async fn push_group(
     let mut stats = SyncStats::default();
     let mut fetch_previous_manifest = !target_snapshots.is_empty();
     for (pos, source_snapshot) in snapshots.into_iter().enumerate() {
+        info!("Snapshot {source_snapshot}: start sync");
         let result =
             push_snapshot(params, namespace, &source_snapshot, fetch_previous_manifest).await;
         fetch_previous_manifest = true;
@@ -768,10 +772,11 @@ pub(crate) async fn push_group(
         local_progress.done_snapshots = pos as u64 + 1;
         // Update done groups progress by other parallel running pushes
         local_progress.done_groups = store_progress.lock().unwrap().done_groups;
-        info!("Percentage done: {local_progress}");
 
         // stop on error
         let sync_stats = result?;
+        info!("Snapshot {source_snapshot}: sync done");
+        info!("Percentage done: {local_progress}");
         stats.add(sync_stats);
     }
 
@@ -782,7 +787,7 @@ pub(crate) async fn push_group(
             }
             if snapshot.protected {
                 info!(
-                    "Kept protected snapshot {name} on remote",
+                    "{prefix}: Kept protected snapshot {name} on remote",
                     name = snapshot.backup
                 );
                 continue;
@@ -790,14 +795,14 @@ pub(crate) async fn push_group(
             match forget_target_snapshot(params, &target_namespace, &snapshot.backup).await {
                 Ok(()) => {
                     info!(
-                        "Removed vanished snapshot {name} from remote",
+                        "{prefix}: Removed vanished snapshot {name} from remote",
                         name = snapshot.backup
                     );
                 }
                 Err(err) => {
-                    warn!("Encountered errors: {err:#}");
+                    warn!("{prefix}: Encountered errors: {err:#}");
                     warn!(
-                        "Failed to remove vanished snapshot {name} from remote!",
+                        "{prefix}: Failed to remove vanished snapshot {name} from remote!",
                         name = snapshot.backup
                     );
                 }
@@ -825,6 +830,7 @@ pub(crate) async fn push_snapshot(
     snapshot: &BackupDir,
     fetch_previous_manifest: bool,
 ) -> Result<SyncStats, Error> {
+    let prefix = format!("Snapshot {snapshot}");
     let mut stats = SyncStats::default();
     let target_ns = params.map_to_target(namespace)?;
     let backup_dir = params
@@ -840,8 +846,8 @@ pub(crate) async fn push_snapshot(
         Ok((manifest, _raw_size)) => manifest,
         Err(err) => {
             // No manifest in snapshot or failed to read, warn and skip
-            log::warn!("Encountered errors: {err:#}");
-            log::warn!("Failed to load manifest for '{snapshot}'!");
+            warn!("{prefix}: Encountered errors: {err:#}");
+            warn!("{prefix}: Failed to load manifest!");
             return Ok(stats);
         }
     };
@@ -863,7 +869,7 @@ pub(crate) async fn push_snapshot(
     if fetch_previous_manifest {
         match backup_writer.download_previous_manifest().await {
             Ok(manifest) => previous_manifest = Some(Arc::new(manifest)),
-            Err(err) => log::info!("Could not download previous manifest - {err}"),
+            Err(err) => info!("{prefix}: Could not download previous manifest - {err}"),
         }
     };
 
@@ -892,12 +898,21 @@ pub(crate) async fn push_snapshot(
         path.push(&entry.filename);
         if path.try_exists()? {
             let archive_name = BackupArchiveName::from_path(&entry.filename)?;
+            info!("{prefix}: sync archive {archive_name}");
+            let prefix = format!("Snapshot {snapshot}: archive {archive_name}");
             match archive_name.archive_type() {
                 ArchiveType::Blob => {
                     let file = std::fs::File::open(&path)?;
                     let backup_stats = backup_writer
                         .upload_blob(file, archive_name.as_ref())
                         .await?;
+                    info!(
+                        "{prefix}: uploaded {} ({}/s)",
+                        HumanByte::from(backup_stats.size),
+                        HumanByte::new_binary(
+                            backup_stats.size as f64 / backup_stats.duration.as_secs_f64()
+                        ),
+                    );
                     stats.add(SyncStats {
                         chunk_count: backup_stats.chunk_count as usize,
                         bytes: backup_stats.size as usize,
@@ -927,6 +942,13 @@ pub(crate) async fn push_snapshot(
                         known_chunks.clone(),
                     )
                     .await?;
+                    info!(
+                        "{prefix}: uploaded {} ({}/s)",
+                        HumanByte::from(sync_stats.bytes),
+                        HumanByte::new_binary(
+                            sync_stats.bytes as f64 / sync_stats.elapsed.as_secs_f64()
+                        ),
+                    );
                     stats.add(sync_stats);
                 }
                 ArchiveType::FixedIndex => {
@@ -952,6 +974,13 @@ pub(crate) async fn push_snapshot(
                         known_chunks.clone(),
                     )
                     .await?;
+                    info!(
+                        "{prefix}: uploaded {} ({}/s)",
+                        HumanByte::from(sync_stats.bytes),
+                        HumanByte::new_binary(
+                            sync_stats.bytes as f64 / sync_stats.elapsed.as_secs_f64()
+                        ),
+                    );
                     stats.add(sync_stats);
                 }
             }
-- 
2.39.5



_______________________________________________
pbs-devel mailing list
pbs-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel