all lists on lists.proxmox.com
 help / color / mirror / Atom feed
From: Hannes Laimer <h.laimer@proxmox.com>
To: pbs-devel@lists.proxmox.com
Subject: [pbs-devel] [PATCH proxmox-backup 1/2] proxy: move prune logic into new file
Date: Thu, 29 Oct 2020 11:37:48 +0100	[thread overview]
Message-ID: <20201029103749.109210-2-h.laimer@proxmox.com> (raw)
In-Reply-To: <20201029103749.109210-1-h.laimer@proxmox.com>

Signed-off-by: Hannes Laimer <h.laimer@proxmox.com>
---
 src/bin/proxmox-backup-proxy.rs | 67 +++---------------------
 src/server.rs                   |  3 ++
 src/server/prune_job.rs         | 91 +++++++++++++++++++++++++++++++++
 3 files changed, 100 insertions(+), 61 deletions(-)
 create mode 100644 src/server/prune_job.rs

diff --git a/src/bin/proxmox-backup-proxy.rs b/src/bin/proxmox-backup-proxy.rs
index ce290171..21c5e9fb 100644
--- a/src/bin/proxmox-backup-proxy.rs
+++ b/src/bin/proxmox-backup-proxy.rs
@@ -49,6 +49,7 @@ use proxmox_backup::tools::{
 
 use proxmox_backup::api2::pull::do_sync_job;
 use proxmox_backup::server::do_verification_job;
+use proxmox_backup::server::do_prune_job;
 
 fn main() -> Result<(), Error> {
     proxmox_backup::tools::setup_safe_path_env();
@@ -358,8 +359,6 @@ async fn schedule_datastore_prune() {
     use proxmox_backup::{
         backup::{
             PruneOptions,
-            BackupGroup,
-            compute_prune_info,
         },
         config::datastore::{
             self,
@@ -376,13 +375,6 @@ async fn schedule_datastore_prune() {
     };
 
     for (store, (_, store_config)) in config.sections {
-        let datastore = match DataStore::lookup_datastore(&store) {
-            Ok(datastore) => datastore,
-            Err(err) => {
-                eprintln!("lookup_datastore '{}' failed - {}", store, err);
-                continue;
-            }
-        };
 
         let store_config: DataStoreConfig = match serde_json::from_value(store_config) {
             Ok(c) => c,
@@ -441,64 +433,17 @@ async fn schedule_datastore_prune() {
 
         if next > now  { continue; }
 
-        let mut job = match Job::new(worker_type, &store) {
+        let job = match Job::new(worker_type, &store) {
             Ok(job) => job,
             Err(_) => continue, // could not get lock
         };
 
-        let store2 = store.clone();
-
-        if let Err(err) = WorkerTask::new_thread(
-            worker_type,
-            Some(store.clone()),
-            Userid::backup_userid().clone(),
-            false,
-            move |worker| {
-
-                job.start(&worker.upid().to_string())?;
-
-                let result = try_block!({
-
-                    worker.log(format!("Starting datastore prune on store \"{}\"", store));
-                    worker.log(format!("task triggered by schedule '{}'", event_str));
-                    worker.log(format!("retention options: {}", prune_options.cli_options_string()));
-
-                    let base_path = datastore.base_path();
-
-                    let groups = BackupGroup::list_groups(&base_path)?;
-                    for group in groups {
-                        let list = group.list_backups(&base_path)?;
-                        let mut prune_info = compute_prune_info(list, &prune_options)?;
-                        prune_info.reverse(); // delete older snapshots first
-
-                        worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
-                                store, group.backup_type(), group.backup_id()));
-
-                        for (info, keep) in prune_info {
-                            worker.log(format!(
-                                    "{} {}/{}/{}",
-                                    if keep { "keep" } else { "remove" },
-                                    group.backup_type(), group.backup_id(),
-                                    info.backup_dir.backup_time_string()));
-                            if !keep {
-                                datastore.remove_backup_dir(&info.backup_dir, true)?;
-                            }
-                        }
-                    }
-                    Ok(())
-                });
-
-                let status = worker.create_state(&result);
-
-                if let Err(err) = job.finish(status) {
-                    eprintln!("could not finish job state for {}: {}", worker_type, err);
-                }
+        let userid = Userid::backup_userid().clone();
 
-                result
-            }
-        ) {
-            eprintln!("unable to start datastore prune on store {} - {}", store2, err);
+        if let Err(err) = do_prune_job(job, prune_options, store.clone(), &userid, Some(event_str)) {
+            eprintln!("unable to start datastore prune job {} - {}", &store, err);
         }
+
     }
 }
 
diff --git a/src/server.rs b/src/server.rs
index dbaec645..86719c42 100644
--- a/src/server.rs
+++ b/src/server.rs
@@ -34,3 +34,6 @@ pub mod jobstate;
 
 mod verify_job;
 pub use verify_job::*;
+
+mod prune_job;
+pub use prune_job::*;
diff --git a/src/server/prune_job.rs b/src/server/prune_job.rs
new file mode 100644
index 00000000..86127733
--- /dev/null
+++ b/src/server/prune_job.rs
@@ -0,0 +1,91 @@
+use anyhow::Error;
+
+use proxmox::try_block;
+
+use crate::{
+    api2::types::*,
+    backup::{compute_prune_info, BackupGroup, DataStore, PruneOptions},
+    server::jobstate::Job,
+    server::WorkerTask,
+    task_log,
+};
+
+pub fn do_prune_job(
+    mut job: Job,
+    prune_options: PruneOptions,
+    store: String,
+    userid: &Userid,
+    schedule: Option<String>,
+) -> Result<String, Error> {
+    let datastore = DataStore::lookup_datastore(&store)?;
+
+    let worker_type = job.jobtype().to_string();
+    let upid_str = WorkerTask::new_thread(
+        &worker_type,
+        Some(job.jobname().to_string()),
+        userid.clone(),
+        false,
+        move |worker| {
+            job.start(&worker.upid().to_string())?;
+
+            let result = try_block!({
+                task_log!(worker, "Starting datastore prune on store \"{}\"", store);
+
+                if let Some(event_str) = schedule {
+                    task_log!(worker, "task triggered by schedule '{}'", event_str);
+                }
+
+                task_log!(
+                    worker,
+                    "retention options: {}",
+                    prune_options.cli_options_string()
+                );
+
+                let base_path = datastore.base_path();
+
+                let groups = BackupGroup::list_groups(&base_path)?;
+                for group in groups {
+                    let list = group.list_backups(&base_path)?;
+                    let mut prune_info = compute_prune_info(list, &prune_options)?;
+                    prune_info.reverse(); // delete older snapshots first
+
+                    task_log!(
+                        worker,
+                        "Starting prune on store \"{}\" group \"{}/{}\"",
+                        store,
+                        group.backup_type(),
+                        group.backup_id()
+                    );
+
+                    for (info, keep) in prune_info {
+                        task_log!(
+                            worker,
+                            "{} {}/{}/{}",
+                            if keep { "keep" } else { "remove" },
+                            group.backup_type(),
+                            group.backup_id(),
+                            info.backup_dir.backup_time_string()
+                        );
+                        if !keep {
+                            datastore.remove_backup_dir(&info.backup_dir, true)?;
+                        }
+                    }
+                }
+                Ok(())
+            });
+
+            let status = worker.create_state(&result);
+
+            if let Err(err) = job.finish(status) {
+                eprintln!(
+                    "could not finish job state for {}: {}",
+                    job.jobtype().to_string(),
+                    err
+                );
+            }
+
+            result
+        },
+    )?;
+    Ok(upid_str)
+}
-- 
2.20.1





  reply	other threads:[~2020-10-29 10:38 UTC|newest]

Thread overview: 4+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-10-29 10:37 [pbs-devel] [PATCH proxmox-backup 0/2] proxy: scheduling cleanup Hannes Laimer
2020-10-29 10:37 ` Hannes Laimer [this message]
2020-10-29 10:37 ` [pbs-devel] [PATCH proxmox-backup 2/2] proxy: extract commonly used logic for scheduling into new function Hannes Laimer
2020-10-29 11:21 ` [pbs-devel] [PATCH proxmox-backup 0/2] proxy: scheduling cleanup Dietmar Maurer

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201029103749.109210-2-h.laimer@proxmox.com \
    --to=h.laimer@proxmox.com \
    --cc=pbs-devel@lists.proxmox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.
Service provided by Proxmox Server Solutions GmbH | Privacy | Legal