From: Christian Ebner <c.ebner@proxmox.com>
To: pbs-devel@lists.proxmox.com
Subject: [PATCH proxmox-backup v5 07/11] server: pull: prefix log messages and add error context
Date: Mon, 9 Mar 2026 17:20:46 +0100 [thread overview]
Message-ID: <20260309162050.1047341-9-c.ebner@proxmox.com> (raw)
In-Reply-To: <20260309162050.1047341-1-c.ebner@proxmox.com>
Pulling groups and therefore also snapshots in parallel leads to
unordered log outputs, making it mostly impossible to relate a log
message to a backup snapshot/group.
Therefore, prefix pull job log messages by the corresponding group or
snapshot and set the error context accordingly.
Also, reword some messages, inline variables in format strings and
start log lines with capital letters to get consistent output.
Example output for a sequential pull job:
```
...
Snapshot ct/100/2025-01-15T12:29:44Z: start sync
Snapshot ct/100/2025-01-15T12:29:44Z: sync archive pct.conf.blob
Snapshot ct/100/2025-01-15T12:29:44Z: sync archive root.pxar.didx
Snapshot ct/100/2025-01-15T12:29:44Z: archive root.pxar.didx: downloaded 171.851 MiB (111.223 MiB/s)
Snapshot ct/100/2025-01-15T12:29:44Z: sync archive catalog.pcat1.didx
Snapshot ct/100/2025-01-15T12:29:44Z: archive catalog.pcat1.didx: downloaded 180.195 KiB (19.884 MiB/s)
Snapshot ct/100/2025-01-15T12:29:44Z: got backup log file client.log.blob
Snapshot ct/100/2025-01-15T12:29:44Z: sync done
...
```
Example output for a parallel pull job:
```
...
Snapshot ct/100/2025-01-15T12:29:44Z: start sync
Snapshot ct/100/2025-01-15T12:29:44Z: sync archive pct.conf.blob
Snapshot ct/100/2025-01-15T12:29:44Z: sync archive root.pxar.didx
Snapshot vm/200/2025-01-15T12:30:06Z: start sync
Snapshot vm/200/2025-01-15T12:30:06Z: sync archive qemu-server.conf.blob
Snapshot vm/200/2025-01-15T12:30:06Z: sync archive drive-scsi0.img.fidx
Snapshot ct/100/2025-01-15T12:29:44Z: archive root.pxar.didx: downloaded 171.851 MiB (206.124 MiB/s)
Snapshot ct/100/2025-01-15T12:29:44Z: sync archive catalog.pcat1.didx
Snapshot ct/100/2025-01-15T12:29:44Z: archive catalog.pcat1.didx: downloaded 180.195 KiB (1.972 MiB/s)
Snapshot ct/100/2025-01-15T12:29:44Z: got backup log file client.log.blob
Snapshot ct/100/2025-01-15T12:29:44Z: sync done
...
Signed-off-by: Christian Ebner <c.ebner@proxmox.com>
---
src/server/pull.rs | 93 +++++++++++++++++++++++++++-------------------
src/server/sync.rs | 7 ++--
2 files changed, 59 insertions(+), 41 deletions(-)
diff --git a/src/server/pull.rs b/src/server/pull.rs
index b11e93e6c..2ed78b840 100644
--- a/src/server/pull.rs
+++ b/src/server/pull.rs
@@ -149,6 +149,7 @@ async fn pull_index_chunks<I: IndexFile>(
index: I,
encountered_chunks: Arc<Mutex<EncounteredChunks>>,
backend: &DatastoreBackend,
+ prefix: &str,
) -> Result<SyncStats, Error> {
use futures::stream::{self, StreamExt, TryStreamExt};
@@ -244,7 +245,7 @@ async fn pull_index_chunks<I: IndexFile>(
let chunk_count = chunk_count.load(Ordering::SeqCst);
info!(
- "downloaded {} ({}/s)",
+ "{prefix}: downloaded {} ({}/s)",
HumanByte::from(bytes),
HumanByte::new_binary(bytes as f64 / elapsed.as_secs_f64()),
);
@@ -289,6 +290,8 @@ async fn pull_single_archive<'a>(
encountered_chunks: Arc<Mutex<EncounteredChunks>>,
backend: &DatastoreBackend,
) -> Result<SyncStats, Error> {
+ let prefix = format!("Snapshot {}", snapshot.dir());
+
let archive_name = &archive_info.filename;
let mut path = snapshot.full_path();
path.push(archive_name);
@@ -298,22 +301,29 @@ async fn pull_single_archive<'a>(
let mut sync_stats = SyncStats::default();
- info!("sync archive {archive_name}");
+ info!("{prefix}: sync archive {archive_name}");
+
+ let prefix = format!("Snapshot {}: archive {archive_name}", snapshot.dir());
reader.load_file_into(archive_name, &tmp_path).await?;
- let mut tmpfile = std::fs::OpenOptions::new().read(true).open(&tmp_path)?;
+ let mut tmpfile = std::fs::OpenOptions::new()
+ .read(true)
+ .open(&tmp_path)
+ .context(format!("archive {archive_name}"))?;
match ArchiveType::from_path(archive_name)? {
ArchiveType::DynamicIndex => {
let index = DynamicIndexReader::new(tmpfile).map_err(|err| {
- format_err!("unable to read dynamic index {:?} - {}", tmp_path, err)
+ format_err!(
+ "archive {archive_name}: unable to read dynamic index {tmp_path:?} - {err}"
+ )
})?;
let (csum, size) = index.compute_csum();
- verify_archive(archive_info, &csum, size)?;
+ verify_archive(archive_info, &csum, size).context(format!("archive {archive_name}"))?;
if reader.skip_chunk_sync(snapshot.datastore().name()) {
- info!("skipping chunk sync for same datastore");
+ info!("{prefix}: skipping chunk sync for same datastore");
} else {
let stats = pull_index_chunks(
reader
@@ -323,6 +333,7 @@ async fn pull_single_archive<'a>(
index,
encountered_chunks,
backend,
+ &prefix,
)
.await?;
sync_stats.add(stats);
@@ -330,13 +341,15 @@ async fn pull_single_archive<'a>(
}
ArchiveType::FixedIndex => {
let index = FixedIndexReader::new(tmpfile).map_err(|err| {
- format_err!("unable to read fixed index '{:?}' - {}", tmp_path, err)
+ format_err!(
+ "archive {archive_name}: unable to read fixed index '{tmp_path:?}' - {err}"
+ )
})?;
let (csum, size) = index.compute_csum();
- verify_archive(archive_info, &csum, size)?;
+ verify_archive(archive_info, &csum, size).context(format!("archive {archive_name}"))?;
if reader.skip_chunk_sync(snapshot.datastore().name()) {
- info!("skipping chunk sync for same datastore");
+ info!("{prefix}: skipping chunk sync for same datastore");
} else {
let stats = pull_index_chunks(
reader
@@ -346,6 +359,7 @@ async fn pull_single_archive<'a>(
index,
encountered_chunks,
backend,
+ &prefix,
)
.await?;
sync_stats.add(stats);
@@ -354,11 +368,11 @@ async fn pull_single_archive<'a>(
ArchiveType::Blob => {
tmpfile.rewind()?;
let (csum, size) = sha256(&mut tmpfile)?;
- verify_archive(archive_info, &csum, size)?;
+ verify_archive(archive_info, &csum, size).context(prefix.clone())?;
}
}
if let Err(err) = std::fs::rename(&tmp_path, &path) {
- bail!("Atomic rename file {:?} failed - {}", path, err);
+ bail!("archive {archive_name}: Atomic rename file {path:?} failed - {err}");
}
backend
@@ -386,13 +400,14 @@ async fn pull_snapshot<'a>(
is_new: bool,
) -> Result<SyncStats, Error> {
if is_new {
- info!("sync snapshot {}", snapshot.dir());
+ info!("{}: start sync", snapshot.dir());
} else if corrupt {
info!("re-sync snapshot {} due to corruption", snapshot.dir());
} else {
info!("re-sync snapshot {}", snapshot.dir());
}
+ let prefix = format!("Snapshot {}", snapshot.dir());
let mut sync_stats = SyncStats::default();
let mut manifest_name = snapshot.full_path();
manifest_name.push(MANIFEST_BLOB_NAME.as_ref());
@@ -405,7 +420,8 @@ async fn pull_snapshot<'a>(
let tmp_manifest_blob;
if let Some(data) = reader
.load_file_into(MANIFEST_BLOB_NAME.as_ref(), &tmp_manifest_name)
- .await?
+ .await
+ .context(prefix.clone())?
{
tmp_manifest_blob = data;
} else {
@@ -415,21 +431,21 @@ async fn pull_snapshot<'a>(
if manifest_name.exists() && !corrupt {
let manifest_blob = proxmox_lang::try_block!({
let mut manifest_file = std::fs::File::open(&manifest_name).map_err(|err| {
- format_err!("unable to open local manifest {manifest_name:?} - {err}")
+ format_err!("{prefix}: unable to open local manifest {manifest_name:?} - {err}")
})?;
let manifest_blob = DataBlob::load_from_reader(&mut manifest_file)?;
Ok(manifest_blob)
})
.map_err(|err: Error| {
- format_err!("unable to read local manifest {manifest_name:?} - {err}")
+ format_err!("{prefix}: unable to read local manifest {manifest_name:?} - {err}")
})?;
if manifest_blob.raw_data() == tmp_manifest_blob.raw_data() {
if !client_log_name.exists() {
reader.try_download_client_log(&client_log_name).await?;
};
- info!("no data changes");
+ info!("{prefix}: no data changes");
let _ = std::fs::remove_file(&tmp_manifest_name);
return Ok(sync_stats); // nothing changed
}
@@ -468,7 +484,7 @@ async fn pull_snapshot<'a>(
match manifest.verify_file(&filename, &csum, size) {
Ok(_) => continue,
Err(err) => {
- info!("detected changed file {path:?} - {err}");
+ info!("{prefix}: detected changed file {path:?} - {err}");
}
}
}
@@ -478,7 +494,7 @@ async fn pull_snapshot<'a>(
match manifest.verify_file(&filename, &csum, size) {
Ok(_) => continue,
Err(err) => {
- info!("detected changed file {path:?} - {err}");
+ info!("{prefix}: detected changed file {path:?} - {err}");
}
}
}
@@ -488,7 +504,7 @@ async fn pull_snapshot<'a>(
match manifest.verify_file(&filename, &csum, size) {
Ok(_) => continue,
Err(err) => {
- info!("detected changed file {path:?} - {err}");
+ info!("{prefix}: detected changed file {path:?} - {err}");
}
}
}
@@ -507,7 +523,7 @@ async fn pull_snapshot<'a>(
}
if let Err(err) = std::fs::rename(&tmp_manifest_name, &manifest_name) {
- bail!("Atomic rename file {:?} failed - {}", manifest_name, err);
+ bail!("{prefix}: Atomic rename file {manifest_name:?} failed - {err}");
}
if let DatastoreBackend::S3(s3_client) = backend {
let object_key = pbs_datastore::s3::object_key_from_path(
@@ -546,7 +562,7 @@ async fn pull_snapshot<'a>(
};
snapshot
.cleanup_unreferenced_files(&manifest)
- .map_err(|err| format_err!("failed to cleanup unreferenced files - {err}"))?;
+ .map_err(|err| format_err!("{prefix}: failed to cleanup unreferenced files - {err}"))?;
Ok(sync_stats)
}
@@ -562,9 +578,12 @@ async fn pull_snapshot_from<'a>(
encountered_chunks: Arc<Mutex<EncounteredChunks>>,
corrupt: bool,
) -> Result<SyncStats, Error> {
+ let prefix = format!("Snapshot {}", snapshot.dir());
+
let (_path, is_new, _snap_lock) = snapshot
.datastore()
- .create_locked_backup_dir(snapshot.backup_ns(), snapshot.as_ref())?;
+ .create_locked_backup_dir(snapshot.backup_ns(), snapshot.as_ref())
+ .context(prefix.clone())?;
let result = pull_snapshot(
params,
@@ -585,11 +604,11 @@ async fn pull_snapshot_from<'a>(
snapshot.as_ref(),
true,
) {
- info!("cleanup error - {cleanup_err}");
+ info!("{prefix}: cleanup error - {cleanup_err}");
}
return Err(err);
}
- Ok(_) => info!("sync snapshot {} done", snapshot.dir()),
+ Ok(_) => info!("{prefix}: sync done"),
}
}
@@ -619,6 +638,7 @@ async fn pull_group(
group: &BackupGroup,
shared_group_progress: Arc<SharedGroupProgress>,
) -> Result<SyncStats, Error> {
+ let prefix = format!("Group {group}");
let mut already_synced_skip_info = SkipInfo::new(SkipReason::AlreadySynced);
let mut transfer_last_skip_info = SkipInfo::new(SkipReason::TransferLast);
@@ -710,11 +730,11 @@ async fn pull_group(
.collect();
if already_synced_skip_info.count > 0 {
- info!("{already_synced_skip_info}");
+ info!("{prefix}: {already_synced_skip_info}");
already_synced_skip_info.reset();
}
if transfer_last_skip_info.count > 0 {
- info!("{transfer_last_skip_info}");
+ info!("{prefix}: {transfer_last_skip_info}");
transfer_last_skip_info.reset();
}
@@ -807,7 +827,7 @@ async fn pull_group(
// Update done groups progress by other parallel running pulls
local_progress.done_groups = shared_group_progress.load_done();
local_progress.done_snapshots = pos as u64 + 1;
- info!("percentage done: group {group}: {local_progress}");
+ info!("{prefix}: percentage done: {local_progress}");
let stats = result?; // stop on error
sync_stats.add(stats);
@@ -826,12 +846,12 @@ async fn pull_group(
}
if snapshot.is_protected() {
info!(
- "don't delete vanished snapshot {} (protected)",
- snapshot.dir()
+ "{prefix}: don't delete vanished snapshot {} (protected)",
+ snapshot.dir(),
);
continue;
}
- info!("delete vanished snapshot {}", snapshot.dir());
+ info!("{prefix}: delete vanished snapshot {}", snapshot.dir());
params
.target
.store
@@ -1031,10 +1051,7 @@ pub(crate) async fn pull_store(mut params: PullParameters) -> Result<SyncStats,
}
Err(err) => {
errors = true;
- info!(
- "Encountered errors while syncing namespace {} - {err}",
- &namespace,
- );
+ info!("Encountered errors while syncing namespace {namespace} - {err}");
}
};
}
@@ -1113,7 +1130,7 @@ async fn pull_ns(
list.sort_unstable();
info!(
- "found {} groups to sync (out of {unfiltered_count} total)",
+ "Found {} groups to sync (out of {unfiltered_count} total)",
list.len()
);
@@ -1182,7 +1199,7 @@ async fn pull_ns(
if !local_group.apply_filters(¶ms.group_filter) {
continue;
}
- info!("delete vanished group '{local_group}'");
+ info!("Delete vanished group '{local_group}'");
let delete_stats_result = params
.target
.store
@@ -1191,7 +1208,7 @@ async fn pull_ns(
match delete_stats_result {
Ok(stats) => {
if !stats.all_removed() {
- info!("kept some protected snapshots of group '{local_group}'");
+ info!("Kept some protected snapshots of group '{local_group}'");
sync_stats.add(SyncStats::from(RemovedVanishedStats {
snapshots: stats.removed_snapshots(),
groups: 0,
@@ -1214,7 +1231,7 @@ async fn pull_ns(
Ok(())
});
if let Err(err) = result {
- info!("error during cleanup: {err}");
+ info!("Error during cleanup: {err}");
errors = true;
};
}
diff --git a/src/server/sync.rs b/src/server/sync.rs
index 17d736c41..416bc943d 100644
--- a/src/server/sync.rs
+++ b/src/server/sync.rs
@@ -138,13 +138,13 @@ impl SyncSourceReader for RemoteSourceReader {
Some(HttpError { code, message }) => match *code {
StatusCode::NOT_FOUND => {
info!(
- "skipping snapshot {} - vanished since start of sync",
+ "Snapshot {}: skipped because vanished since start of sync",
&self.dir
);
return Ok(None);
}
_ => {
- bail!("HTTP error {code} - {message}");
+ bail!("Snapshot {}: HTTP error {code} - {message}", &self.dir);
}
},
None => {
@@ -178,7 +178,8 @@ impl SyncSourceReader for RemoteSourceReader {
bail!("Atomic rename file {to_path:?} failed - {err}");
}
info!(
- "got backup log file {client_log_name}",
+ "Snapshot {snapshot}: got backup log file {client_log_name}",
+ snapshot = &self.dir,
client_log_name = client_log_name.deref()
);
}
--
2.47.3
next prev parent reply other threads:[~2026-03-09 16:21 UTC|newest]
Thread overview: 13+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-09 16:20 [PATCH proxmox{,-backup} v5 00/12] fix #4182: concurrent group pull/push support for sync jobs Christian Ebner
2026-03-09 16:20 ` [PATCH proxmox v5 1/1] pbs api types: add `worker-threads` to sync job config Christian Ebner
2026-03-09 16:20 ` [PATCH proxmox-backup v5 01/11] client: backup writer: fix upload stats size and rate for push sync Christian Ebner
2026-03-09 16:20 ` [PATCH proxmox-backup v5 02/11] api: config/sync: add optional `worker-threads` property Christian Ebner
2026-03-09 16:20 ` [PATCH proxmox-backup v5 03/11] sync: pull: revert avoiding reinstantiation for encountered chunks map Christian Ebner
2026-03-09 16:20 ` [PATCH proxmox-backup v5 04/11] sync: pull: factor out backup group locking and owner check Christian Ebner
2026-03-09 16:20 ` [PATCH proxmox-backup v5 05/11] sync: pull: prepare pull parameters to be shared across parallel tasks Christian Ebner
2026-03-09 16:20 ` [PATCH proxmox-backup v5 06/11] fix #4182: server: sync: allow pulling backup groups in parallel Christian Ebner
2026-03-09 16:20 ` Christian Ebner [this message]
2026-03-09 16:20 ` [PATCH proxmox-backup v5 08/11] sync: push: prepare push parameters to be shared across parallel tasks Christian Ebner
2026-03-09 16:20 ` [PATCH proxmox-backup v5 09/11] server: sync: allow pushing groups concurrently Christian Ebner
2026-03-09 16:20 ` [PATCH proxmox-backup v5 10/11] server: push: prefix log messages and add additional logging Christian Ebner
2026-03-09 16:20 ` [PATCH proxmox-backup v5 11/11] ui: expose group worker setting in sync job edit window Christian Ebner
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260309162050.1047341-9-c.ebner@proxmox.com \
--to=c.ebner@proxmox.com \
--cc=pbs-devel@lists.proxmox.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.