From: Stefan Lendl <s.lendl@proxmox.com>
To: pbs-devel@lists.proxmox.com
Subject: [pbs-devel] [PATCH v2 proxmox-backup 1/8] api: garbage collect job status
Date: Tue, 6 Feb 2024 15:51:36 +0100 [thread overview]
Message-ID: <20240206145142.1175527-3-s.lendl@proxmox.com> (raw)
In-Reply-To: <20240206145142.1175527-2-s.lendl@proxmox.com>
Adds an api endpoint on the datastore that reports the gc job status
such as:
- Schedule
- State (of last run)
- Duration (of last run)
- Last Run
- Next Run (if scheduled)
- Pending Chunks (of last run)
- Removed Chunks (of last run)
Adds a dedicated endpoint admin/gc that reports gc job status for all
datastores including the onces without a gc-schedule.
Originally-by: Gabriel Goller <g.goller@proxmox.com>
Signed-off-by: Stefan Lendl <s.lendl@proxmox.com>
---
pbs-api-types/src/datastore.rs | 40 ++++++++++
src/api2/admin/datastore.rs | 129 ++++++++++++++++++++++++++++++++-
src/api2/admin/gc.rs | 57 +++++++++++++++
src/api2/admin/mod.rs | 2 +
4 files changed, 225 insertions(+), 3 deletions(-)
mode change 100644 => 100755 src/api2/admin/datastore.rs
create mode 100644 src/api2/admin/gc.rs
diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs
index cce9888b..ba3879c9 100644
--- a/pbs-api-types/src/datastore.rs
+++ b/pbs-api-types/src/datastore.rs
@@ -1270,6 +1270,46 @@ pub struct GarbageCollectionStatus {
pub still_bad: usize,
}
+#[api(
+ properties: {
+ "last-run-upid": {
+ optional: true,
+ type: UPID,
+ },
+ },
+)]
+#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
+#[serde(rename_all = "kebab-case")]
+/// Garbage Collection general info
+pub struct GarbageCollectionJobStatus {
+ /// Datastore
+ pub store: String,
+ /// upid of the last run gc job
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub last_run_upid: Option<String>,
+ /// Number of removed chunks
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub removed_chunks: Option<usize>,
+ /// Number of pending chunks
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub pending_chunks: Option<usize>,
+ /// Schedule of the gc job
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub schedule: Option<String>,
+ /// Time of the next gc run
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub next_run: Option<i64>,
+ /// Endtime of the last gc run
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub last_run_endtime: Option<i64>,
+ /// State of the last gc run
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub last_run_state: Option<String>,
+ /// Duration of last gc run
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub duration: Option<i64>,
+}
+
#[api(
properties: {
"gc-status": {
diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs
old mode 100644
new mode 100755
index a95031e7..357cae0a
--- a/src/api2/admin/datastore.rs
+++ b/src/api2/admin/datastore.rs
@@ -27,18 +27,20 @@ use proxmox_sys::fs::{
file_read_firstline, file_read_optional_string, replace_file, CreateOptions,
};
use proxmox_sys::{task_log, task_warn};
+use proxmox_time::CalendarEvent;
use pxar::accessor::aio::Accessor;
use pxar::EntryKind;
use pbs_api_types::{
print_ns_and_snapshot, print_store_and_ns, Authid, BackupContent, BackupNamespace, BackupType,
- Counts, CryptMode, DataStoreListItem, DataStoreStatus, GarbageCollectionStatus, GroupListItem,
+ Counts, CryptMode, DataStoreConfig, DataStoreListItem, DataStoreStatus,
+ GarbageCollectionJobStatus, GarbageCollectionStatus, GroupListItem, JobScheduleStatus,
KeepOptions, Operation, PruneJobOptions, RRDMode, RRDTimeFrame, SnapshotListItem,
SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA,
BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA,
MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP,
- PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY,
+ PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID,
UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA,
};
use pbs_client::pxar::{create_tar, create_zip};
@@ -67,7 +69,7 @@ use crate::backup::{
ListAccessibleBackupGroups, NS_PRIVS_OK,
};
-use crate::server::jobstate::Job;
+use crate::server::jobstate::{compute_schedule_status, Job, JobState};
const GROUP_NOTES_FILE_NAME: &str = "notes";
@@ -1199,6 +1201,123 @@ pub fn garbage_collection_status(
Ok(status)
}
+#[api(
+ input: {
+ properties: {
+ store: {
+ schema: DATASTORE_SCHEMA,
+ },
+ },
+ },
+ returns: {
+ type: GarbageCollectionJobStatus,
+ },
+ access: {
+ permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
+ },
+)]
+/// Garbage collection status.
+pub fn garbage_collection_job_status(
+ store: String,
+ _info: &ApiMethod,
+ _rpcenv: &mut dyn RpcEnvironment,
+) -> Result<GarbageCollectionJobStatus, Error> {
+ let (config, _) = pbs_config::datastore::config()?;
+ let store_config: DataStoreConfig = config.lookup("datastore", &store)?;
+
+ let mut info = GarbageCollectionJobStatus {
+ store: store.clone(),
+ schedule: store_config.gc_schedule,
+ ..Default::default()
+ };
+
+ let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
+ let status_in_memory = datastore.last_gc_status();
+ let state_file = JobState::load("garbage_collection", &store)
+ .map_err(|err| {
+ log::error!(
+ "could not open statefile for {:?}: {}",
+ info.last_run_upid,
+ err
+ )
+ })
+ .ok();
+
+ let mut selected_upid = None;
+ if status_in_memory.upid.is_some() {
+ selected_upid = status_in_memory.upid;
+ } else if let Some(JobState::Finished { upid, .. }) = &state_file {
+ selected_upid = Some(upid.to_owned());
+ }
+
+ info.last_run_upid = selected_upid.clone();
+
+ match selected_upid {
+ Some(upid) => {
+ info.removed_chunks = Some(status_in_memory.removed_chunks);
+ info.pending_chunks = Some(status_in_memory.pending_chunks);
+
+ let mut computed_schedule: JobScheduleStatus = JobScheduleStatus::default();
+ let mut duration = None;
+ if let Some(state) = state_file {
+ if let Ok(cs) = compute_schedule_status(&state, info.last_run_upid.as_deref()) {
+ computed_schedule = cs;
+ }
+ }
+
+ if let Some(endtime) = computed_schedule.last_run_endtime {
+ computed_schedule.next_run = info
+ .schedule
+ .as_ref()
+ .and_then(|s| {
+ s.parse::<CalendarEvent>()
+ .map_err(|err| log::error!("{err}"))
+ .ok()
+ })
+ .and_then(|e| {
+ e.compute_next_event(endtime)
+ .map_err(|err| log::error!("{err}"))
+ .ok()
+ })
+ .and_then(|ne| ne);
+
+ if let Ok(parsed_upid) = upid.parse::<UPID>() {
+ duration = Some(endtime - parsed_upid.starttime);
+ }
+ }
+
+ info.next_run = computed_schedule.next_run;
+ info.last_run_endtime = computed_schedule.last_run_endtime;
+ info.last_run_state = computed_schedule.last_run_state;
+ info.duration = duration;
+ }
+ None => {
+ if let Some(schedule) = &info.schedule {
+ info.next_run = schedule
+ .parse::<CalendarEvent>()
+ .map_err(|err| log::error!("{err}"))
+ .ok()
+ .and_then(|e| {
+ e.compute_next_event(proxmox_time::epoch_i64())
+ .map_err(|err| log::error!("{err}"))
+ .ok()
+ })
+ .and_then(|ne| ne);
+
+ if let Ok(event) = schedule.parse::<CalendarEvent>() {
+ if let Ok(next_event) = event.compute_next_event(proxmox_time::epoch_i64()) {
+ info.next_run = next_event;
+ }
+ }
+ } else {
+ return Ok(info);
+ }
+ }
+ }
+
+ Ok(info)
+}
+
#[api(
returns: {
description: "List the accessible datastores.",
@@ -2265,6 +2384,10 @@ const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
.get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
.post(&API_METHOD_START_GARBAGE_COLLECTION),
),
+ (
+ "gc-job-status",
+ &Router::new().get(&API_METHOD_GARBAGE_COLLECTION_JOB_STATUS),
+ ),
(
"group-notes",
&Router::new()
diff --git a/src/api2/admin/gc.rs b/src/api2/admin/gc.rs
new file mode 100644
index 00000000..7535f369
--- /dev/null
+++ b/src/api2/admin/gc.rs
@@ -0,0 +1,57 @@
+use anyhow::Error;
+use pbs_api_types::GarbageCollectionJobStatus;
+
+use proxmox_router::{ApiMethod, Permission, Router, RpcEnvironment};
+use proxmox_schema::api;
+
+use pbs_api_types::DATASTORE_SCHEMA;
+
+use serde_json::Value;
+
+use crate::api2::admin::datastore::{garbage_collection_job_status, get_datastore_list};
+
+#[api(
+ input: {
+ properties: {
+ store: {
+ schema: DATASTORE_SCHEMA,
+ optional: true,
+ },
+ },
+ },
+ returns: {
+ description: "List configured gc jobs and their status",
+ type: Array,
+ items: { type: GarbageCollectionJobStatus },
+ },
+ access: {
+ permission: &Permission::Anybody,
+ description: "Requires Datastore.Audit or Datastore.Modify on datastore.",
+ },
+)]
+/// List all GC jobs (max one per datastore)
+pub fn list_all_gc_jobs(
+ store: Option<String>,
+ _param: Value,
+ _info: &ApiMethod,
+ rpcenv: &mut dyn RpcEnvironment,
+) -> Result<Vec<GarbageCollectionJobStatus>, Error> {
+ let gc_info = match store {
+ Some(store) => {
+ garbage_collection_job_status(store, _info, rpcenv).map(|info| vec![info])?
+ }
+ None => get_datastore_list(Value::Null, _info, rpcenv)?
+ .into_iter()
+ .map(|store_list_item| store_list_item.store)
+ .filter_map(|store| garbage_collection_job_status(store, _info, rpcenv).ok())
+ .collect::<Vec<_>>(),
+ };
+
+ Ok(gc_info)
+}
+
+const GC_ROUTER: Router = Router::new().get(&API_METHOD_LIST_ALL_GC_JOBS);
+
+pub const ROUTER: Router = Router::new()
+ .get(&API_METHOD_LIST_ALL_GC_JOBS)
+ .match_all("store", &GC_ROUTER);
diff --git a/src/api2/admin/mod.rs b/src/api2/admin/mod.rs
index 168dc038..a1c49f8e 100644
--- a/src/api2/admin/mod.rs
+++ b/src/api2/admin/mod.rs
@@ -5,6 +5,7 @@ use proxmox_router::{Router, SubdirMap};
use proxmox_sortable_macro::sortable;
pub mod datastore;
+pub mod gc;
pub mod metrics;
pub mod namespace;
pub mod prune;
@@ -17,6 +18,7 @@ const SUBDIRS: SubdirMap = &sorted!([
("datastore", &datastore::ROUTER),
("metrics", &metrics::ROUTER),
("prune", &prune::ROUTER),
+ ("gc", &gc::ROUTER),
("sync", &sync::ROUTER),
("traffic-control", &traffic_control::ROUTER),
("verify", &verify::ROUTER),
--
2.43.0
next prev parent reply other threads:[~2024-02-06 14:52 UTC|newest]
Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-02-06 14:51 [pbs-devel] [PATCH v2 proxmox-backup 0/8] Add GC job status to datastore and global prune job view Stefan Lendl
2024-02-06 14:51 ` Stefan Lendl [this message]
2024-02-06 14:51 ` [pbs-devel] [PATCH v2 proxmox-backup 2/8] gc: global prune and gc " Stefan Lendl
2024-02-06 14:51 ` [pbs-devel] [PATCH v2 proxmox-backup 3/8] gc: move datastore/PruneAndGC to config/PruneAndGC Stefan Lendl
2024-02-06 14:51 ` [pbs-devel] [PATCH v2 proxmox-backup 4/8] gc: hide datastore column in local gc view Stefan Lendl
2024-02-06 14:51 ` [pbs-devel] [PATCH v2 proxmox-backup 5/8] ui: order Prune&GC before SyncJobs Stefan Lendl
2024-02-06 14:51 ` [pbs-devel] [PATCH v2 proxmox-backup 6/8] cli: list gc jobs with proxmox-backup-manager Stefan Lendl
2024-02-06 14:51 ` [pbs-devel] [PATCH v2 proxmox-backup 7/8] gc: show removed and pending chunks of last run in ui Stefan Lendl
2024-02-06 14:51 ` [pbs-devel] [PATCH v2 proxmox-backup 8/8] gc: configure width and flex on GC Jobs columns Stefan Lendl
2024-02-07 9:30 ` Gabriel Goller
2024-02-06 14:56 ` [pbs-devel] [PATCH v2 proxmox-backup 0/8] Add GC job status to datastore and global prune job view Stefan Lendl
2024-02-07 9:42 ` Gabriel Goller
2024-02-08 14:01 ` Stefan Lendl
-- strict thread matches above, loose matches on Subject: below --
2024-02-06 13:23 [pbs-devel] [PATCH v2 proxmox-backup 0/8] *** SUBJECT HERE *** Stefan Lendl
2024-02-06 13:23 ` [pbs-devel] [PATCH v2 proxmox-backup 1/8] api: garbage collect job status Stefan Lendl
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240206145142.1175527-3-s.lendl@proxmox.com \
--to=s.lendl@proxmox.com \
--cc=pbs-devel@lists.proxmox.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox