public inbox for pbs-devel@lists.proxmox.com
 help / color / mirror / Atom feed
* Re: [pbs-devel] [PATCH proxmox-backup v2 5/7] api2: tape/backup: commit pool_writer even on error
@ 2021-07-28  8:52 Dietmar Maurer
  0 siblings, 0 replies; 2+ messages in thread
From: Dietmar Maurer @ 2021-07-28  8:52 UTC (permalink / raw)
  To: Proxmox Backup Server development discussion, Dominik Csapak

> On 07/22/2021 3:41 PM Dominik Csapak <d.csapak@proxmox.com> wrote:
> 
>  
> this way we store all finished snapshots/chunk archives we in the
> catalog, and not onlye those until the last commit

I guess this is wrong, because there is no guarrantee that this data is flushed to tape?




^ permalink raw reply	[flat|nested] 2+ messages in thread

* [pbs-devel] [PATCH proxmox-backup v2 5/7] api2: tape/backup: commit pool_writer even on error
  2021-07-22 13:40 [pbs-devel] [PATCH proxmox-backup v2 0/7] improve catalog handling Dominik Csapak
@ 2021-07-22 13:41 ` Dominik Csapak
  0 siblings, 0 replies; 2+ messages in thread
From: Dominik Csapak @ 2021-07-22 13:41 UTC (permalink / raw)
  To: pbs-devel

this way we store all finished snapshots/chunk archives we in the
catalog, and not onlye those until the last commit

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
---
 src/api2/tape/backup.rs | 115 +++++++++++++++++++++-------------------
 1 file changed, 60 insertions(+), 55 deletions(-)

diff --git a/src/api2/tape/backup.rs b/src/api2/tape/backup.rs
index 45ee4bad..c78b697b 100644
--- a/src/api2/tape/backup.rs
+++ b/src/api2/tape/backup.rs
@@ -456,72 +456,77 @@ fn backup_worker(
 
     let mut need_catalog = false; // avoid writing catalog for empty jobs
 
-    for (group_number, group) in group_list.into_iter().enumerate() {
-        progress.done_groups = group_number as u64;
-        progress.done_snapshots = 0;
-        progress.group_snapshots = 0;
-
-        let snapshot_list = group.list_backups(&datastore.base_path())?;
-
-        // filter out unfinished backups
-        let mut snapshot_list = snapshot_list
-            .into_iter()
-            .filter(|item| item.is_finished())
-            .collect();
-
-        BackupInfo::sort_list(&mut snapshot_list, true); // oldest first
-
-        if latest_only {
-            progress.group_snapshots = 1;
-            if let Some(info) = snapshot_list.pop() {
-                if pool_writer.contains_snapshot(datastore_name, &info.backup_dir.to_string()) {
-                    task_log!(worker, "skip snapshot {}", info.backup_dir);
-                    continue;
-                }
+    let res: Result<(), Error> = proxmox::try_block!({
+        for (group_number, group) in group_list.into_iter().enumerate() {
+            progress.done_groups = group_number as u64;
+            progress.done_snapshots = 0;
+            progress.group_snapshots = 0;
+
+            let snapshot_list = group.list_backups(&datastore.base_path())?;
+
+            // filter out unfinished backups
+            let mut snapshot_list = snapshot_list
+                .into_iter()
+                .filter(|item| item.is_finished())
+                .collect();
+
+            BackupInfo::sort_list(&mut snapshot_list, true); // oldest first
+
+            if latest_only {
+                progress.group_snapshots = 1;
+                if let Some(info) = snapshot_list.pop() {
+                    if pool_writer.contains_snapshot(datastore_name, &info.backup_dir.to_string()) {
+                        task_log!(worker, "skip snapshot {}", info.backup_dir);
+                        continue;
+                    }
 
-                need_catalog = true;
+                    need_catalog = true;
 
-                let snapshot_name = info.backup_dir.to_string();
-                if !backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)? {
-                    errors = true;
-                } else {
-                    summary.snapshot_list.push(snapshot_name);
-                }
-                progress.done_snapshots = 1;
-                task_log!(
-                    worker,
-                    "percentage done: {}",
-                    progress
-                );
-            }
-        } else {
-            progress.group_snapshots = snapshot_list.len() as u64;
-            for (snapshot_number, info) in snapshot_list.into_iter().enumerate() {
-                if pool_writer.contains_snapshot(datastore_name, &info.backup_dir.to_string()) {
-                    task_log!(worker, "skip snapshot {}", info.backup_dir);
-                    continue;
+                    let snapshot_name = info.backup_dir.to_string();
+                    if !backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)? {
+                        errors = true;
+                    } else {
+                        summary.snapshot_list.push(snapshot_name);
+                    }
+                    progress.done_snapshots = 1;
+                    task_log!(
+                        worker,
+                        "percentage done: {}",
+                        progress
+                    );
                 }
+            } else {
+                progress.group_snapshots = snapshot_list.len() as u64;
+                for (snapshot_number, info) in snapshot_list.into_iter().enumerate() {
+                    if pool_writer.contains_snapshot(datastore_name, &info.backup_dir.to_string()) {
+                        task_log!(worker, "skip snapshot {}", info.backup_dir);
+                        continue;
+                    }
 
-                need_catalog = true;
+                    need_catalog = true;
 
-                let snapshot_name = info.backup_dir.to_string();
-                if !backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)? {
-                    errors = true;
-                } else {
-                    summary.snapshot_list.push(snapshot_name);
+                    let snapshot_name = info.backup_dir.to_string();
+                    if !backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)? {
+                        errors = true;
+                    } else {
+                        summary.snapshot_list.push(snapshot_name);
+                    }
+                    progress.done_snapshots = snapshot_number as u64 + 1;
+                    task_log!(
+                        worker,
+                        "percentage done: {}",
+                        progress
+                    );
                 }
-                progress.done_snapshots = snapshot_number as u64 + 1;
-                task_log!(
-                    worker,
-                    "percentage done: {}",
-                    progress
-                );
             }
         }
-    }
+        Ok(())
+    });
 
     pool_writer.finish()?;
 
+    let _ = res?; // bubble errors up
+
     if need_catalog {
         task_log!(worker, "append media catalog");
 
-- 
2.30.2





^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2021-07-28  8:52 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-07-28  8:52 [pbs-devel] [PATCH proxmox-backup v2 5/7] api2: tape/backup: commit pool_writer even on error Dietmar Maurer
  -- strict thread matches above, loose matches on Subject: below --
2021-07-22 13:40 [pbs-devel] [PATCH proxmox-backup v2 0/7] improve catalog handling Dominik Csapak
2021-07-22 13:41 ` [pbs-devel] [PATCH proxmox-backup v2 5/7] api2: tape/backup: commit pool_writer even on error Dominik Csapak

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox
Service provided by Proxmox Server Solutions GmbH | Privacy | Legal