all lists on lists.proxmox.com
 help / color / mirror / Atom feed
From: Dominik Csapak <d.csapak@proxmox.com>
To: pbs-devel@lists.proxmox.com
Subject: [pbs-devel] [PATCH proxmox-backup] datastore/prune schedules: use JobState for tracking of schedules
Date: Fri, 18 Sep 2020 16:03:52 +0200	[thread overview]
Message-ID: <20200918140352.22294-1-d.csapak@proxmox.com> (raw)

like the sync jobs, so that if an admin configures a schedule it
really starts the next time that time is reached not immediately

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
---
best viewed with '-w'
the patch for garbage collection is not yet done, since there are more
things to consider there, since we already save a state
in memory for that

 src/api2/config/datastore.rs    | 16 ++++++-
 src/bin/proxmox-backup-proxy.rs | 83 +++++++++++++++++++--------------
 2 files changed, 64 insertions(+), 35 deletions(-)

diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs
index e62ba5ce..870324a3 100644
--- a/src/api2/config/datastore.rs
+++ b/src/api2/config/datastore.rs
@@ -131,6 +131,8 @@ pub fn create_datastore(param: Value) -> Result<(), Error> {
 
     datastore::save_config(&config)?;
 
+    crate::config::jobstate::create_state_file("prune", &datastore.name)?;
+
     Ok(())
 }
 
@@ -312,7 +314,11 @@ pub fn update_datastore(
     }
 
     if gc_schedule.is_some() { data.gc_schedule = gc_schedule; }
-    if prune_schedule.is_some() { data.prune_schedule = prune_schedule; }
+    let mut prune_schedule_changed = false;
+    if prune_schedule.is_some() {
+        prune_schedule_changed = true;
+        data.prune_schedule = prune_schedule;
+    }
     if verify_schedule.is_some() { data.verify_schedule = verify_schedule; }
 
     if keep_last.is_some() { data.keep_last = keep_last; }
@@ -326,6 +332,12 @@ pub fn update_datastore(
 
     datastore::save_config(&config)?;
 
+    // we want to reset the statefile, to avoid an immediate sync in some cases
+    // (e.g. going from monthly to weekly in the second week of the month)
+    if prune_schedule_changed {
+        crate::config::jobstate::create_state_file("prune", &name)?;
+    }
+
     Ok(())
 }
 
@@ -365,6 +377,8 @@ pub fn delete_datastore(name: String, digest: Option<String>) -> Result<(), Erro
 
     datastore::save_config(&config)?;
 
+    crate::config::jobstate::remove_state_file("prune", &name)?;
+
     Ok(())
 }
 
diff --git a/src/bin/proxmox-backup-proxy.rs b/src/bin/proxmox-backup-proxy.rs
index 1f349c8c..8a6dfe36 100644
--- a/src/bin/proxmox-backup-proxy.rs
+++ b/src/bin/proxmox-backup-proxy.rs
@@ -337,7 +337,10 @@ async fn schedule_datastore_prune() {
     use proxmox_backup::backup::{
         PruneOptions, DataStore, BackupGroup, compute_prune_info};
     use proxmox_backup::server::{WorkerTask};
-    use proxmox_backup::config::datastore::{self, DataStoreConfig};
+    use proxmox_backup::config::{
+        jobstate::{self, Job},
+        datastore::{self, DataStoreConfig}
+    };
     use proxmox_backup::tools::systemd::time::{
         parse_calendar_event, compute_next_event};
 
@@ -394,16 +397,10 @@ async fn schedule_datastore_prune() {
 
         let worker_type = "prune";
 
-        let last = match lookup_last_worker(worker_type, &store) {
-            Ok(Some(upid)) => {
-                if proxmox_backup::server::worker_is_active_local(&upid) {
-                    continue;
-                }
-                upid.starttime
-            }
-            Ok(None) => 0,
+        let last = match jobstate::last_run_time(worker_type, &store) {
+            Ok(time) => time,
             Err(err) => {
-                eprintln!("lookup_last_job_start failed: {}", err);
+                eprintln!("could not get last run time of {} {}: {}", worker_type, store, err);
                 continue;
             }
         };
@@ -421,6 +418,11 @@ async fn schedule_datastore_prune() {
 
         if next > now  { continue; }
 
+        let mut job = match Job::new(worker_type, &store) {
+            Ok(job) => job,
+            Err(_) => continue, // could not get lock
+        };
+
         let store2 = store.clone();
 
         if let Err(err) = WorkerTask::new_thread(
@@ -429,34 +431,47 @@ async fn schedule_datastore_prune() {
             Userid::backup_userid().clone(),
             false,
             move |worker| {
-                worker.log(format!("Starting datastore prune on store \"{}\"", store));
-                worker.log(format!("task triggered by schedule '{}'", event_str));
-                worker.log(format!("retention options: {}", prune_options.cli_options_string()));
-
-                let base_path = datastore.base_path();
-
-                let groups = BackupGroup::list_groups(&base_path)?;
-                for group in groups {
-                    let list = group.list_backups(&base_path)?;
-                    let mut prune_info = compute_prune_info(list, &prune_options)?;
-                    prune_info.reverse(); // delete older snapshots first
-
-                    worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
-                                       store, group.backup_type(), group.backup_id()));
-
-                    for (info, keep) in prune_info {
-                        worker.log(format!(
-                            "{} {}/{}/{}",
-                            if keep { "keep" } else { "remove" },
-                            group.backup_type(), group.backup_id(),
-                            info.backup_dir.backup_time_string()));
-                        if !keep {
-                            datastore.remove_backup_dir(&info.backup_dir, true)?;
+
+                job.start(&worker.upid().to_string())?;
+
+                let result = {
+
+                    worker.log(format!("Starting datastore prune on store \"{}\"", store));
+                    worker.log(format!("task triggered by schedule '{}'", event_str));
+                    worker.log(format!("retention options: {}", prune_options.cli_options_string()));
+
+                    let base_path = datastore.base_path();
+
+                    let groups = BackupGroup::list_groups(&base_path)?;
+                    for group in groups {
+                        let list = group.list_backups(&base_path)?;
+                        let mut prune_info = compute_prune_info(list, &prune_options)?;
+                        prune_info.reverse(); // delete older snapshots first
+
+                        worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
+                                store, group.backup_type(), group.backup_id()));
+
+                        for (info, keep) in prune_info {
+                            worker.log(format!(
+                                    "{} {}/{}/{}",
+                                    if keep { "keep" } else { "remove" },
+                                    group.backup_type(), group.backup_id(),
+                                    info.backup_dir.backup_time_string()));
+                            if !keep {
+                                datastore.remove_backup_dir(&info.backup_dir, true)?;
+                            }
                         }
                     }
+                    Ok(())
+                };
+
+                let status = worker.create_state(&result);
+
+                if let Err(err) = job.finish(status) {
+                    eprintln!("could not finish job state for {}: {}", worker_type, err);
                 }
 
-                Ok(())
+                result
             }
         ) {
             eprintln!("unable to start datastore prune on store {} - {}", store2, err);
-- 
2.20.1





             reply	other threads:[~2020-09-18 14:04 UTC|newest]

Thread overview: 2+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-09-18 14:03 Dominik Csapak [this message]
2020-09-19  4:39 ` [pbs-devel] applied: " Dietmar Maurer

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200918140352.22294-1-d.csapak@proxmox.com \
    --to=d.csapak@proxmox.com \
    --cc=pbs-devel@lists.proxmox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.
Service provided by Proxmox Server Solutions GmbH | Privacy | Legal