From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [212.224.123.68]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits)) (No client certificate requested) by lists.proxmox.com (Postfix) with ESMTPS id EC8D3EBCF for ; Wed, 27 Sep 2023 15:06:44 +0200 (CEST) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id CD5BEC4E4 for ; Wed, 27 Sep 2023 15:06:14 +0200 (CEST) Received: from proxmox-new.maurer-it.com (proxmox-new.maurer-it.com [94.136.29.106]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits)) (No client certificate requested) by firstgate.proxmox.com (Proxmox) with ESMTPS for ; Wed, 27 Sep 2023 15:06:14 +0200 (CEST) Received: from proxmox-new.maurer-it.com (localhost.localdomain [127.0.0.1]) by proxmox-new.maurer-it.com (Proxmox) with ESMTP id C04FC48E41 for ; Wed, 27 Sep 2023 15:06:13 +0200 (CEST) From: Gabriel Goller To: pbs-devel@lists.proxmox.com Date: Wed, 27 Sep 2023 15:06:08 +0200 Message-Id: <20230927130608.166028-2-g.goller@proxmox.com> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20230927130608.166028-1-g.goller@proxmox.com> References: <20230927130608.166028-1-g.goller@proxmox.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-SPAM-LEVEL: Spam detection results: 0 AWL -0.373 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record URIBL_BLOCKED 0.001 ADMINISTRATOR NOTICE: The query to URIBL was blocked. See http://wiki.apache.org/spamassassin/DnsBlocklists#dnsbl-block for more information. [datastore.rs] Subject: [pbs-devel] [PATCH proxmox-backup v4 2/2] close #4723: api: added endpoint for gc status X-BeenThere: pbs-devel@lists.proxmox.com X-Mailman-Version: 2.1.29 Precedence: list List-Id: Proxmox Backup Server development discussion List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 27 Sep 2023 13:06:45 -0000 Returns general info on the garbage collection such as: - Schedule - State (of last run) - Duration (of last run) - Last Run - Next Run - Pending Chunks (of last run) - Removed Chunks (of last run) Signed-off-by: Gabriel Goller --- update v4: - separate commits for ui/api - cleaned up code - show 'next scheduled run', when no gc has ever been run update v3: - ui: removed `required` attribute on items to get the sorting right - made `pending_chunks` and `removed_chunks` options, so that they are not shown when no gc run exists update v2: - skip serializing if value is `None` - return just the schedule if `upid` doesn't exist (means no gc has been run) - ui: removed default values on timestamps - ui: removed flex and minHeight properties pbs-api-types/src/datastore.rs | 38 +++++++++ src/api2/admin/datastore.rs | 138 +++++++++++++++++++++++++++++++-- 2 files changed, 168 insertions(+), 8 deletions(-) mode change 100644 => 100755 src/api2/admin/datastore.rs diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 73c4890e..55caa963 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -1250,6 +1250,44 @@ pub struct GarbageCollectionStatus { pub still_bad: usize, } +#[api( + properties: { + "last-run-upid": { + optional: true, + type: UPID, + }, + }, +)] +#[derive(Clone, Default, Serialize, Deserialize, PartialEq)] +#[serde(rename_all = "kebab-case")] +/// Garbage Collection general info +pub struct GarbageCollectionInfo { + /// upid of the last run gc job + #[serde(skip_serializing_if = "Option::is_none")] + pub last_run_upid: Option, + /// Number of removed chunks + #[serde(skip_serializing_if = "Option::is_none")] + pub removed_chunks: Option, + /// Number of pending chunks + #[serde(skip_serializing_if = "Option::is_none")] + pub pending_chunks: Option, + /// Schedule of the gc job + #[serde(skip_serializing_if = "Option::is_none")] + pub schedule: Option, + /// Time of the next gc run + #[serde(skip_serializing_if = "Option::is_none")] + pub next_run: Option, + /// Endtime of the last gc run + #[serde(skip_serializing_if = "Option::is_none")] + pub last_run_endtime: Option, + /// State of the last gc run + #[serde(skip_serializing_if = "Option::is_none")] + pub last_run_state: Option, + /// Duration of last gc run + #[serde(skip_serializing_if = "Option::is_none")] + pub duration: Option, +} + #[api( properties: { "gc-status": { diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs old mode 100644 new mode 100755 index a95031e7..8a86c233 --- a/src/api2/admin/datastore.rs +++ b/src/api2/admin/datastore.rs @@ -10,6 +10,7 @@ use anyhow::{bail, format_err, Error}; use futures::*; use hyper::http::request::Parts; use hyper::{header, Body, Response, StatusCode}; +use proxmox_time::CalendarEvent; use serde::Deserialize; use serde_json::{json, Value}; use tokio_stream::wrappers::ReceiverStream; @@ -33,13 +34,14 @@ use pxar::EntryKind; use pbs_api_types::{ print_ns_and_snapshot, print_store_and_ns, Authid, BackupContent, BackupNamespace, BackupType, - Counts, CryptMode, DataStoreListItem, DataStoreStatus, GarbageCollectionStatus, GroupListItem, - KeepOptions, Operation, PruneJobOptions, RRDMode, RRDTimeFrame, SnapshotListItem, - SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, - BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, - MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, - PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, - UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA, + Counts, CryptMode, DataStoreConfig, DataStoreListItem, DataStoreStatus, GarbageCollectionInfo, + GarbageCollectionStatus, GroupListItem, JobScheduleStatus, KeepOptions, Operation, + PruneJobOptions, RRDMode, RRDTimeFrame, SnapshotListItem, SnapshotVerifyState, + BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, + BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, MAX_NAMESPACE_DEPTH, + NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, + PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID, UPID_SCHEMA, + VERIFICATION_OUTDATED_AFTER_SCHEMA, }; use pbs_client::pxar::{create_tar, create_zip}; use pbs_config::CachedUserInfo; @@ -67,7 +69,7 @@ use crate::backup::{ ListAccessibleBackupGroups, NS_PRIVS_OK, }; -use crate::server::jobstate::Job; +use crate::server::jobstate::{compute_schedule_status, Job, JobState}; const GROUP_NOTES_FILE_NAME: &str = "notes"; @@ -1199,6 +1201,122 @@ pub fn garbage_collection_status( Ok(status) } +#[api( + input: { + properties: { + store: { + schema: DATASTORE_SCHEMA, + }, + }, + }, + returns: { + type: GarbageCollectionInfo, + }, + access: { + permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false), + }, +)] +/// Garbage collection status. +pub fn garbage_collection_info( + store: String, + _info: &ApiMethod, + _rpcenv: &mut dyn RpcEnvironment, +) -> Result { + let (config, _) = pbs_config::datastore::config()?; + let store_config: DataStoreConfig = config.lookup("datastore", &store)?; + + let mut info = GarbageCollectionInfo { + schedule: store_config.gc_schedule, + ..Default::default() + }; + + let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?; + let status_in_memory = datastore.last_gc_status(); + let state_file = JobState::load("garbage_collection", &store) + .map_err(|err| { + log::error!( + "could not open statefile for {:?}: {}", + info.last_run_upid, + err + ) + }) + .ok(); + + let mut selected_upid = None; + if status_in_memory.upid.is_some() { + selected_upid = status_in_memory.upid; + } else if let Some(JobState::Finished { upid, .. }) = &state_file { + selected_upid = Some(upid.to_owned()); + } + + info.last_run_upid = selected_upid.clone(); + + match selected_upid { + Some(upid) => { + info.removed_chunks = Some(status_in_memory.removed_chunks); + info.pending_chunks = Some(status_in_memory.pending_chunks); + + let mut computed_schedule: JobScheduleStatus = JobScheduleStatus::default(); + let mut duration = None; + if let Some(state) = state_file { + if let Ok(cs) = compute_schedule_status(&state, info.last_run_upid.as_deref()) { + computed_schedule = cs; + } + } + + if let Some(endtime) = computed_schedule.last_run_endtime { + computed_schedule.next_run = info + .schedule + .as_ref() + .and_then(|s| { + s.parse::() + .map_err(|err| log::error!("{err}")) + .ok() + }) + .and_then(|e| { + e.compute_next_event(endtime) + .map_err(|err| log::error!("{err}")) + .ok() + }) + .and_then(|ne| ne); + + if let Ok(parsed_upid) = upid.parse::() { + duration = Some(endtime - parsed_upid.starttime); + } + } + + info.next_run = computed_schedule.next_run; + info.last_run_endtime = computed_schedule.last_run_endtime; + info.last_run_state = computed_schedule.last_run_state; + info.duration = duration; + } + None => { + if let Some(schedule) = &info.schedule { + info.next_run = schedule + .parse::() + .map_err(|err| log::error!("{err}")) + .ok() + .and_then(|e| { + e.compute_next_event(proxmox_time::epoch_i64()) + .map_err(|err| log::error!("{err}")) + .ok() + }) + .and_then(|ne| ne); + + if let Ok(event) = schedule.parse::() { + if let Ok(next_event) = event.compute_next_event(proxmox_time::epoch_i64()) { + info.next_run = next_event; + } + } + } else { + return Ok(info); + } + } + } + + Ok(info) +} + #[api( returns: { description: "List the accessible datastores.", @@ -2265,6 +2383,10 @@ const DATASTORE_INFO_SUBDIRS: SubdirMap = &[ .get(&API_METHOD_GARBAGE_COLLECTION_STATUS) .post(&API_METHOD_START_GARBAGE_COLLECTION), ), + ( + "gc_info", + &Router::new().get(&API_METHOD_GARBAGE_COLLECTION_INFO), + ), ( "group-notes", &Router::new() -- 2.39.2