From: Gabriel Goller <g.goller@proxmox.com>
To: pbs-devel@lists.proxmox.com
Subject: [pbs-devel] [PATCH proxmox-backup v4 2/2] close #4723: api: added endpoint for gc status
Date: Wed, 27 Sep 2023 15:06:08 +0200 [thread overview]
Message-ID: <20230927130608.166028-2-g.goller@proxmox.com> (raw)
In-Reply-To: <20230927130608.166028-1-g.goller@proxmox.com>
Returns general info on the garbage collection such as:
- Schedule
- State (of last run)
- Duration (of last run)
- Last Run
- Next Run
- Pending Chunks (of last run)
- Removed Chunks (of last run)
Signed-off-by: Gabriel Goller <g.goller@proxmox.com>
---
update v4:
- separate commits for ui/api
- cleaned up code
- show 'next scheduled run', when no gc has ever been run
update v3:
- ui: removed `required` attribute on items to get the sorting right
- made `pending_chunks` and `removed_chunks` options, so that they
are not shown when no gc run exists
update v2:
- skip serializing if value is `None`
- return just the schedule if `upid` doesn't exist (means no gc has been
run)
- ui: removed default values on timestamps
- ui: removed flex and minHeight properties
pbs-api-types/src/datastore.rs | 38 +++++++++
src/api2/admin/datastore.rs | 138 +++++++++++++++++++++++++++++++--
2 files changed, 168 insertions(+), 8 deletions(-)
mode change 100644 => 100755 src/api2/admin/datastore.rs
diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs
index 73c4890e..55caa963 100644
--- a/pbs-api-types/src/datastore.rs
+++ b/pbs-api-types/src/datastore.rs
@@ -1250,6 +1250,44 @@ pub struct GarbageCollectionStatus {
pub still_bad: usize,
}
+#[api(
+ properties: {
+ "last-run-upid": {
+ optional: true,
+ type: UPID,
+ },
+ },
+)]
+#[derive(Clone, Default, Serialize, Deserialize, PartialEq)]
+#[serde(rename_all = "kebab-case")]
+/// Garbage Collection general info
+pub struct GarbageCollectionInfo {
+ /// upid of the last run gc job
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub last_run_upid: Option<String>,
+ /// Number of removed chunks
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub removed_chunks: Option<usize>,
+ /// Number of pending chunks
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub pending_chunks: Option<usize>,
+ /// Schedule of the gc job
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub schedule: Option<String>,
+ /// Time of the next gc run
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub next_run: Option<i64>,
+ /// Endtime of the last gc run
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub last_run_endtime: Option<i64>,
+ /// State of the last gc run
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub last_run_state: Option<String>,
+ /// Duration of last gc run
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub duration: Option<i64>,
+}
+
#[api(
properties: {
"gc-status": {
diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs
old mode 100644
new mode 100755
index a95031e7..8a86c233
--- a/src/api2/admin/datastore.rs
+++ b/src/api2/admin/datastore.rs
@@ -10,6 +10,7 @@ use anyhow::{bail, format_err, Error};
use futures::*;
use hyper::http::request::Parts;
use hyper::{header, Body, Response, StatusCode};
+use proxmox_time::CalendarEvent;
use serde::Deserialize;
use serde_json::{json, Value};
use tokio_stream::wrappers::ReceiverStream;
@@ -33,13 +34,14 @@ use pxar::EntryKind;
use pbs_api_types::{
print_ns_and_snapshot, print_store_and_ns, Authid, BackupContent, BackupNamespace, BackupType,
- Counts, CryptMode, DataStoreListItem, DataStoreStatus, GarbageCollectionStatus, GroupListItem,
- KeepOptions, Operation, PruneJobOptions, RRDMode, RRDTimeFrame, SnapshotListItem,
- SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA,
- BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA,
- MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP,
- PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY,
- UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA,
+ Counts, CryptMode, DataStoreConfig, DataStoreListItem, DataStoreStatus, GarbageCollectionInfo,
+ GarbageCollectionStatus, GroupListItem, JobScheduleStatus, KeepOptions, Operation,
+ PruneJobOptions, RRDMode, RRDTimeFrame, SnapshotListItem, SnapshotVerifyState,
+ BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA,
+ BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, MAX_NAMESPACE_DEPTH,
+ NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY,
+ PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID, UPID_SCHEMA,
+ VERIFICATION_OUTDATED_AFTER_SCHEMA,
};
use pbs_client::pxar::{create_tar, create_zip};
use pbs_config::CachedUserInfo;
@@ -67,7 +69,7 @@ use crate::backup::{
ListAccessibleBackupGroups, NS_PRIVS_OK,
};
-use crate::server::jobstate::Job;
+use crate::server::jobstate::{compute_schedule_status, Job, JobState};
const GROUP_NOTES_FILE_NAME: &str = "notes";
@@ -1199,6 +1201,122 @@ pub fn garbage_collection_status(
Ok(status)
}
+#[api(
+ input: {
+ properties: {
+ store: {
+ schema: DATASTORE_SCHEMA,
+ },
+ },
+ },
+ returns: {
+ type: GarbageCollectionInfo,
+ },
+ access: {
+ permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
+ },
+)]
+/// Garbage collection status.
+pub fn garbage_collection_info(
+ store: String,
+ _info: &ApiMethod,
+ _rpcenv: &mut dyn RpcEnvironment,
+) -> Result<GarbageCollectionInfo, Error> {
+ let (config, _) = pbs_config::datastore::config()?;
+ let store_config: DataStoreConfig = config.lookup("datastore", &store)?;
+
+ let mut info = GarbageCollectionInfo {
+ schedule: store_config.gc_schedule,
+ ..Default::default()
+ };
+
+ let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
+ let status_in_memory = datastore.last_gc_status();
+ let state_file = JobState::load("garbage_collection", &store)
+ .map_err(|err| {
+ log::error!(
+ "could not open statefile for {:?}: {}",
+ info.last_run_upid,
+ err
+ )
+ })
+ .ok();
+
+ let mut selected_upid = None;
+ if status_in_memory.upid.is_some() {
+ selected_upid = status_in_memory.upid;
+ } else if let Some(JobState::Finished { upid, .. }) = &state_file {
+ selected_upid = Some(upid.to_owned());
+ }
+
+ info.last_run_upid = selected_upid.clone();
+
+ match selected_upid {
+ Some(upid) => {
+ info.removed_chunks = Some(status_in_memory.removed_chunks);
+ info.pending_chunks = Some(status_in_memory.pending_chunks);
+
+ let mut computed_schedule: JobScheduleStatus = JobScheduleStatus::default();
+ let mut duration = None;
+ if let Some(state) = state_file {
+ if let Ok(cs) = compute_schedule_status(&state, info.last_run_upid.as_deref()) {
+ computed_schedule = cs;
+ }
+ }
+
+ if let Some(endtime) = computed_schedule.last_run_endtime {
+ computed_schedule.next_run = info
+ .schedule
+ .as_ref()
+ .and_then(|s| {
+ s.parse::<CalendarEvent>()
+ .map_err(|err| log::error!("{err}"))
+ .ok()
+ })
+ .and_then(|e| {
+ e.compute_next_event(endtime)
+ .map_err(|err| log::error!("{err}"))
+ .ok()
+ })
+ .and_then(|ne| ne);
+
+ if let Ok(parsed_upid) = upid.parse::<UPID>() {
+ duration = Some(endtime - parsed_upid.starttime);
+ }
+ }
+
+ info.next_run = computed_schedule.next_run;
+ info.last_run_endtime = computed_schedule.last_run_endtime;
+ info.last_run_state = computed_schedule.last_run_state;
+ info.duration = duration;
+ }
+ None => {
+ if let Some(schedule) = &info.schedule {
+ info.next_run = schedule
+ .parse::<CalendarEvent>()
+ .map_err(|err| log::error!("{err}"))
+ .ok()
+ .and_then(|e| {
+ e.compute_next_event(proxmox_time::epoch_i64())
+ .map_err(|err| log::error!("{err}"))
+ .ok()
+ })
+ .and_then(|ne| ne);
+
+ if let Ok(event) = schedule.parse::<CalendarEvent>() {
+ if let Ok(next_event) = event.compute_next_event(proxmox_time::epoch_i64()) {
+ info.next_run = next_event;
+ }
+ }
+ } else {
+ return Ok(info);
+ }
+ }
+ }
+
+ Ok(info)
+}
+
#[api(
returns: {
description: "List the accessible datastores.",
@@ -2265,6 +2383,10 @@ const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
.get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
.post(&API_METHOD_START_GARBAGE_COLLECTION),
),
+ (
+ "gc_info",
+ &Router::new().get(&API_METHOD_GARBAGE_COLLECTION_INFO),
+ ),
(
"group-notes",
&Router::new()
--
2.39.2
next prev parent reply other threads:[~2023-09-27 13:06 UTC|newest]
Thread overview: 3+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-09-27 13:06 [pbs-devel] [PATCH proxmox-backup v4 1/2] close #4723: ui: new gc view Gabriel Goller
2023-09-27 13:06 ` Gabriel Goller [this message]
2023-09-27 15:19 ` [pbs-devel] [PATCH proxmox-backup v4 2/2] close #4723: api: added endpoint for gc status Thomas Lamprecht
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230927130608.166028-2-g.goller@proxmox.com \
--to=g.goller@proxmox.com \
--cc=pbs-devel@lists.proxmox.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox