public inbox for pbs-devel@lists.proxmox.com
 help / color / mirror / Atom feed
* [pbs-devel] [PATCH proxmox-backup v3] api: make prune-group a real workertask
@ 2024-03-08 13:36 Gabriel Goller
  2024-03-12 10:30 ` Stefan Lendl
                   ` (2 more replies)
  0 siblings, 3 replies; 6+ messages in thread
From: Gabriel Goller @ 2024-03-08 13:36 UTC (permalink / raw)
  To: pbs-devel

`prune-group` is currently not a real workertask, ie it behaves like one
but doesn't start a thread nor a task to do its work.

Changed it to start a tokio-task, so that we can delete snapshots
asynchronously. The `dry-run` feature still behaves in the same way and
returns early.

This paves the way for the new logging infra (which uses `task_local` to
define a logger) and improves performance of bigger backup-groups.

Signed-off-by: Gabriel Goller <g.goller@proxmox.com>
---

v3, thanks @Max
 * remove use-task parameter, extract from serde Value
 * simplify dry-run statement
 * removed prune-result output
 * inline variables in formatting call

v2, thanks @Fiona, @Thomas:
 * use feature flag to activate, so we don't break the api 
 * convert the result to a structure and print it in the tasklog
 * enable the feature flag in the frontend


 src/api2/admin/datastore.rs | 152 ++++++++++++++++++++++--------------
 www/datastore/Prune.js      |   1 +
 2 files changed, 96 insertions(+), 57 deletions(-)

diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs
index a95031e7..0e55a2ca 100644
--- a/src/api2/admin/datastore.rs
+++ b/src/api2/admin/datastore.rs
@@ -944,6 +944,12 @@ pub fn verify(
                 type: BackupNamespace,
                 optional: true,
             },
+            "use-task": {
+                type: bool,
+                default: false,
+                optional: true,
+                description: "Spins up an asynchronous task that does the work.",
+            },
         },
     },
     returns: pbs_api_types::ADMIN_DATASTORE_PRUNE_RETURN_TYPE,
@@ -960,7 +966,7 @@ pub fn prune(
     keep_options: KeepOptions,
     store: String,
     ns: Option<BackupNamespace>,
-    _param: Value,
+    param: Value,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<Value, Error> {
     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
@@ -978,7 +984,20 @@ pub fn prune(
     let worker_id = format!("{}:{}:{}", store, ns, group);
     let group = datastore.backup_group(ns.clone(), group);
 
-    let mut prune_result = Vec::new();
+    #[derive(Debug, serde::Serialize)]
+    struct PruneResult {
+        #[serde(rename = "backup-type")]
+        backup_type: BackupType,
+        #[serde(rename = "backup-id")]
+        backup_id: String,
+        #[serde(rename = "backup-time")]
+        backup_time: i64,
+        keep: bool,
+        protected: bool,
+        #[serde(skip_serializing_if = "Option::is_none")]
+        ns: Option<BackupNamespace>,
+    }
+    let mut prune_result: Vec<PruneResult> = Vec::new();
 
     let list = group.list_backups()?;
 
@@ -991,78 +1010,97 @@ pub fn prune(
     if dry_run {
         for (info, mark) in prune_info {
             let keep = keep_all || mark.keep();
-
-            let mut result = json!({
-                "backup-type": info.backup_dir.backup_type(),
-                "backup-id": info.backup_dir.backup_id(),
-                "backup-time": info.backup_dir.backup_time(),
-                "keep": keep,
-                "protected": mark.protected(),
-            });
-            let prune_ns = info.backup_dir.backup_ns();
+            let backup_dir = &info.backup_dir;
+
+            let mut result = PruneResult {
+                backup_type: backup_dir.backup_type(),
+                backup_id: backup_dir.backup_id().to_owned(),
+                backup_time: backup_dir.backup_time(),
+                keep,
+                protected: mark.protected(),
+                ns: None,
+            };
+            let prune_ns = backup_dir.backup_ns();
             if !prune_ns.is_root() {
-                result["ns"] = serde_json::to_value(prune_ns)?;
+                result.ns = Some(prune_ns.to_owned());
             }
             prune_result.push(result);
         }
         return Ok(json!(prune_result));
     }
 
-    // We use a WorkerTask just to have a task log, but run synchrounously
-    let worker = WorkerTask::new("prune", Some(worker_id), auth_id.to_string(), true)?;
-
-    if keep_all {
-        task_log!(worker, "No prune selection - keeping all files.");
-    } else {
-        let mut opts = Vec::new();
-        if !ns.is_root() {
-            opts.push(format!("--ns {ns}"));
+    let prune_group = move |worker: Arc<WorkerTask>| {
+        if keep_all {
+            task_log!(worker, "No prune selection - keeping all files.");
+        } else {
+            let mut opts = Vec::new();
+            if !ns.is_root() {
+                opts.push(format!("--ns {ns}"));
+            }
+            crate::server::cli_keep_options(&mut opts, &keep_options);
+
+            task_log!(worker, "retention options: {}", opts.join(" "));
+            task_log!(
+                worker,
+                "Starting prune on {} group \"{}\"",
+                print_store_and_ns(&store, &ns),
+                group.group(),
+            );
         }
-        crate::server::cli_keep_options(&mut opts, &keep_options);
-
-        task_log!(worker, "retention options: {}", opts.join(" "));
-        task_log!(
-            worker,
-            "Starting prune on {} group \"{}\"",
-            print_store_and_ns(&store, &ns),
-            group.group(),
-        );
-    }
 
-    for (info, mark) in prune_info {
-        let keep = keep_all || mark.keep();
+        for (info, mark) in prune_info {
+            let keep = keep_all || mark.keep();
+            let backup_dir = &info.backup_dir;
 
-        let backup_time = info.backup_dir.backup_time();
-        let timestamp = info.backup_dir.backup_time_string();
-        let group: &pbs_api_types::BackupGroup = info.backup_dir.as_ref();
+            let backup_time = backup_dir.backup_time();
+            let timestamp = backup_dir.backup_time_string();
+            let group: &pbs_api_types::BackupGroup = backup_dir.as_ref();
 
-        let msg = format!("{}/{}/{} {}", group.ty, group.id, timestamp, mark,);
+            let msg = format!("{}/{}/{timestamp} {mark}", group.ty, group.id);
 
-        task_log!(worker, "{}", msg);
+            task_log!(worker, "{msg}");
 
-        prune_result.push(json!({
-            "backup-type": group.ty,
-            "backup-id": group.id,
-            "backup-time": backup_time,
-            "keep": keep,
-            "protected": mark.protected(),
-        }));
+            prune_result.push(PruneResult {
+                backup_type: group.ty,
+                backup_id: group.id.clone(),
+                backup_time,
+                keep,
+                protected: mark.protected(),
+                ns: None,
+            });
 
-        if !(dry_run || keep) {
-            if let Err(err) = info.backup_dir.destroy(false) {
-                task_warn!(
-                    worker,
-                    "failed to remove dir {:?}: {}",
-                    info.backup_dir.relative_path(),
-                    err,
-                );
+            if !keep {
+                if let Err(err) = backup_dir.destroy(false) {
+                    task_warn!(
+                        worker,
+                        "failed to remove dir {:?}: {}",
+                        backup_dir.relative_path(),
+                        err,
+                    );
+                }
             }
         }
-    }
-
-    worker.log_result(&Ok(()));
+        prune_result
+    };
 
-    Ok(json!(prune_result))
+    if param["use-task"].as_bool().unwrap_or(false) {
+        let upid = WorkerTask::spawn(
+            "prune",
+            Some(worker_id),
+            auth_id.to_string(),
+            true,
+            move |worker| async move {
+                let _ = prune_group(worker.clone());
+                Ok(())
+            },
+        )?;
+        Ok(json!(upid))
+    } else {
+        let worker = WorkerTask::new("prune", Some(worker_id), auth_id.to_string(), true)?;
+        let result = prune_group(worker.clone());
+        worker.log_result(&Ok(()));
+        Ok(json!(result))
+    }
 }
 
 #[api(
diff --git a/www/datastore/Prune.js b/www/datastore/Prune.js
index 81f6927b..5752907e 100644
--- a/www/datastore/Prune.js
+++ b/www/datastore/Prune.js
@@ -52,6 +52,7 @@ Ext.define('PBS.Datastore.PruneInputPanel', {
 	if (me.ns && me.ns !== '') {
 	    values.ns = me.ns;
 	}
+	values["use-task"] = true;
 	return values;
     },
 
-- 
2.43.0





^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2024-04-09 10:44 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-03-08 13:36 [pbs-devel] [PATCH proxmox-backup v3] api: make prune-group a real workertask Gabriel Goller
2024-03-12 10:30 ` Stefan Lendl
2024-03-12 13:07   ` Gabriel Goller
2024-04-02 11:14     ` Stefan Lendl
2024-04-09  9:04 ` Gabriel Goller
2024-04-09 10:44 ` [pbs-devel] applied: " Thomas Lamprecht

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox
Service provided by Proxmox Server Solutions GmbH | Privacy | Legal