From: Christian Ebner <c.ebner@proxmox.com>
To: pbs-devel@lists.proxmox.com
Subject: [pbs-devel] [RFC v2 proxmox-backup 30/42] datastore: prune groups/snapshots from S3 object store backend
Date: Thu, 29 May 2025 16:31:55 +0200 [thread overview]
Message-ID: <20250529143207.694497-31-c.ebner@proxmox.com> (raw)
In-Reply-To: <20250529143207.694497-1-c.ebner@proxmox.com>
When pruning a backup group or a backup snapshot for a datastore with
S3 object store backend, remove the associated objects by removing
them based on the prefix.
In order to exclude protected contents, add a filtering based on the
presence of the protected marker.
Signed-off-by: Christian Ebner <c.ebner@proxmox.com>
---
pbs-datastore/src/backup_info.rs | 45 +++++++++++++++++++++++++++++---
pbs-datastore/src/datastore.rs | 34 +++++++++++++++++++++---
src/api2/admin/datastore.rs | 24 +++++++++++------
3 files changed, 88 insertions(+), 15 deletions(-)
diff --git a/pbs-datastore/src/backup_info.rs b/pbs-datastore/src/backup_info.rs
index 1422fe865..b9ac286ad 100644
--- a/pbs-datastore/src/backup_info.rs
+++ b/pbs-datastore/src/backup_info.rs
@@ -8,6 +8,7 @@ use std::time::Duration;
use anyhow::{bail, format_err, Context, Error};
+use pbs_s3_client::S3_CONTENT_PREFIX;
use proxmox_sys::fs::{lock_dir_noblock, lock_dir_noblock_shared, replace_file, CreateOptions};
use proxmox_systemd::escape_unit;
@@ -18,7 +19,7 @@ use pbs_api_types::{
use pbs_config::{open_backup_lockfile, BackupLockGuard};
use crate::manifest::{BackupManifest, MANIFEST_LOCK_NAME};
-use crate::{DataBlob, DataStore};
+use crate::{DataBlob, DataStore, DatastoreBackend};
pub const DATASTORE_LOCKS_DIR: &str = "/run/proxmox-backup/locks";
@@ -214,7 +215,7 @@ impl BackupGroup {
///
/// Returns `BackupGroupDeleteStats`, containing the number of deleted snapshots
/// and number of protected snaphsots, which therefore were not removed.
- pub fn destroy(&self) -> Result<BackupGroupDeleteStats, Error> {
+ pub fn destroy(&self, backend: &DatastoreBackend) -> Result<BackupGroupDeleteStats, Error> {
let _guard = self
.lock()
.with_context(|| format!("while destroying group '{self:?}'"))?;
@@ -228,10 +229,26 @@ impl BackupGroup {
delete_stats.increment_protected_snapshots();
continue;
}
- snap.destroy(false)?;
+ // also for S3 cleanup local only, the actual S3 objects will be removed below,
+ // reducing the number of required API calls.
+ snap.destroy(false, &DatastoreBackend::Filesystem)?;
delete_stats.increment_removed_snapshots();
}
+ if let DatastoreBackend::S3(s3_client) = backend {
+ let path = self.relative_group_path();
+ let group_prefix = path
+ .to_str()
+ .ok_or_else(|| format_err!("invalid group path prefix"))?;
+ let prefix = format!("{S3_CONTENT_PREFIX}/{group_prefix}");
+ let delete_objects_error = proxmox_async::runtime::block_on(
+ s3_client.delete_objects_by_prefix_with_suffix_filter(&prefix, ".protected"),
+ )?;
+ if delete_objects_error {
+ bail!("deleting objects failed");
+ }
+ }
+
// Note: make sure the old locking mechanism isn't used as `remove_dir_all` is not safe in
// that case
if delete_stats.all_removed() && !*OLD_LOCKING {
@@ -577,7 +594,7 @@ impl BackupDir {
/// Destroy the whole snapshot, bails if it's protected
///
/// Setting `force` to true skips locking and thus ignores if the backup is currently in use.
- pub fn destroy(&self, force: bool) -> Result<(), Error> {
+ pub fn destroy(&self, force: bool, backend: &DatastoreBackend) -> Result<(), Error> {
let (_guard, _manifest_guard);
if !force {
_guard = self
@@ -590,6 +607,19 @@ impl BackupDir {
bail!("cannot remove protected snapshot"); // use special error type?
}
+ if let DatastoreBackend::S3(s3_client) = backend {
+ let path = self.relative_path();
+ let snapshot_prefix = path
+ .to_str()
+ .ok_or_else(|| format_err!("invalid snapshot path"))?;
+ let prefix = format!("{S3_CONTENT_PREFIX}/{snapshot_prefix}");
+ let delete_objects_error =
+ proxmox_async::runtime::block_on(s3_client.delete_objects_by_prefix(&prefix))?;
+ if delete_objects_error {
+ bail!("deleting objects failed");
+ }
+ }
+
let full_path = self.full_path();
log::info!("removing backup snapshot {:?}", full_path);
std::fs::remove_dir_all(&full_path).map_err(|err| {
@@ -619,6 +649,13 @@ impl BackupDir {
// do to rectify the situation.
if guard.is_ok() && group.list_backups()?.is_empty() && !*OLD_LOCKING {
group.remove_group_dir()?;
+ if let DatastoreBackend::S3(s3_client) = backend {
+ let path = group.relative_group_path().join("owner");
+ let owner_key = path
+ .to_str()
+ .ok_or_else(|| format_err!("invalid group path prefix"))?;
+ proxmox_async::runtime::block_on(s3_client.delete_object(owner_key.into()))?;
+ }
} else if let Err(err) = guard {
log::debug!("{err:#}");
}
diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs
index 5c8b49947..d016e2139 100644
--- a/pbs-datastore/src/datastore.rs
+++ b/pbs-datastore/src/datastore.rs
@@ -29,6 +29,7 @@ use pbs_api_types::{
S3ClientSecretsConfig, UPID,
};
use pbs_config::BackupLockGuard;
+use pbs_s3_client::S3_CONTENT_PREFIX;
use crate::backup_info::{BackupDir, BackupGroup, BackupInfo, OLD_LOCKING};
use crate::chunk_store::ChunkStore;
@@ -643,7 +644,9 @@ impl DataStore {
let mut stats = BackupGroupDeleteStats::default();
for group in self.iter_backup_groups(ns.to_owned())? {
- let delete_stats = group?.destroy()?;
+ let group = group?;
+ let backend = self.backend()?;
+ let delete_stats = group.destroy(&backend)?;
stats.add(&delete_stats);
removed_all_groups = removed_all_groups && delete_stats.all_removed();
}
@@ -677,6 +680,8 @@ impl DataStore {
let store = self.name();
let mut removed_all_requested = true;
let mut stats = BackupGroupDeleteStats::default();
+ let backend = self.backend()?;
+
if delete_groups {
log::info!("removing whole namespace recursively below {store}:/{ns}",);
for ns in self.recursive_iter_backup_ns(ns.to_owned())? {
@@ -684,6 +689,20 @@ impl DataStore {
stats.add(&delete_stats);
removed_all_requested = removed_all_requested && removed_ns_groups;
}
+
+ if let DatastoreBackend::S3(s3_client) = &backend {
+ let ns_dir = ns.path();
+ let ns_prefix = ns_dir
+ .to_str()
+ .ok_or_else(|| format_err!("invalid namespace path prefix"))?;
+ let prefix = format!("{S3_CONTENT_PREFIX}/{ns_prefix}");
+ let delete_objects_error = proxmox_async::runtime::block_on(
+ s3_client.delete_objects_by_prefix_with_suffix_filter(&prefix, ".protected"),
+ )?;
+ if delete_objects_error {
+ bail!("deleting objects failed");
+ }
+ }
} else {
log::info!("pruning empty namespace recursively below {store}:/{ns}");
}
@@ -719,6 +738,15 @@ impl DataStore {
log::warn!("failed to remove namespace {ns} - {err}")
}
}
+ if let DatastoreBackend::S3(s3_client) = &backend {
+ // Only remove the namespace marker, if it was empty,
+ // than this is the same as the namespace being removed.
+ let ns_dir = ns.path().join(NAMESPACE_MARKER_FILENAME);
+ let ns_key = ns_dir
+ .to_str()
+ .ok_or_else(|| format_err!("invalid namespace path"))?;
+ proxmox_async::runtime::block_on(s3_client.delete_object(ns_key.into()))?;
+ }
}
}
@@ -736,7 +764,7 @@ impl DataStore {
) -> Result<BackupGroupDeleteStats, Error> {
let backup_group = self.backup_group(ns.clone(), backup_group.clone());
- backup_group.destroy()
+ backup_group.destroy(&self.backend()?)
}
/// Remove a backup directory including all content
@@ -748,7 +776,7 @@ impl DataStore {
) -> Result<(), Error> {
let backup_dir = self.backup_dir(ns.clone(), backup_dir.clone())?;
- backup_dir.destroy(force)
+ backup_dir.destroy(force, &self.backend()?)
}
/// Returns the time of the last successful backup
diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs
index 7b7f79b22..c62b980d1 100644
--- a/src/api2/admin/datastore.rs
+++ b/src/api2/admin/datastore.rs
@@ -432,7 +432,7 @@ pub async fn delete_snapshot(
let snapshot = datastore.backup_dir(ns, backup_dir)?;
- snapshot.destroy(false)?;
+ snapshot.destroy(false, &datastore.backend()?)?;
Ok(Value::Null)
})
@@ -1098,13 +1098,21 @@ pub fn prune(
});
if !keep {
- if let Err(err) = backup_dir.destroy(false) {
- warn!(
- "failed to remove dir {:?}: {}",
- backup_dir.relative_path(),
- err,
- );
- }
+ match datastore.backend() {
+ Ok(backend) => {
+ if let Err(err) = backup_dir.destroy(false, &backend) {
+ warn!(
+ "failed to remove dir {:?}: {}",
+ backup_dir.relative_path(),
+ err,
+ );
+ }
+ }
+ Err(err) => warn!(
+ "failed to remove dir {:?}: {err}",
+ backup_dir.relative_path()
+ ),
+ };
}
}
prune_result
--
2.39.5
_______________________________________________
pbs-devel mailing list
pbs-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
next prev parent reply other threads:[~2025-05-29 14:33 UTC|newest]
Thread overview: 47+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-05-29 14:31 [pbs-devel] [RFC v2 proxmox/bookworm-stable proxmox-backup 00/42] S3 storage backend for datastores Christian Ebner
2025-05-29 14:31 ` [pbs-devel] [RFC v2 proxmox/bookworm-stable 1/42] pbs-api-types: add types for S3 client configs and secrets Christian Ebner
2025-05-29 14:31 ` [pbs-devel] [RFC v2 proxmox/bookworm-stable 2/42] pbs-api-types: extend datastore config by backend config enum Christian Ebner
2025-05-29 14:31 ` [pbs-devel] [RFC v2 proxmox-backup 03/42] api: fix minor formatting issues Christian Ebner
2025-05-29 14:31 ` [pbs-devel] [RFC v2 proxmox-backup 04/42] bin: sort submodules alphabetically Christian Ebner
2025-05-29 14:31 ` [pbs-devel] [RFC v2 proxmox-backup 05/42] datastore: ignore missing owner file when removing group directory Christian Ebner
2025-05-29 14:31 ` [pbs-devel] [RFC v2 proxmox-backup 06/42] verify: refactor verify related functions to be methods of worker Christian Ebner
2025-05-29 14:31 ` [pbs-devel] [RFC v2 proxmox-backup 07/42] s3 client: add crate for AWS S3 compatible object store client Christian Ebner
2025-05-29 14:31 ` [pbs-devel] [RFC v2 proxmox-backup 08/42] s3 client: implement AWS signature v4 request authentication Christian Ebner
2025-05-29 14:31 ` [pbs-devel] [RFC v2 proxmox-backup 09/42] s3 client: add dedicated type for s3 object keys Christian Ebner
2025-05-29 14:31 ` [pbs-devel] [RFC v2 proxmox-backup 10/42] s3 client: add type for last modified timestamp in responses Christian Ebner
2025-05-29 14:31 ` [pbs-devel] [RFC v2 proxmox-backup 11/42] s3 client: add helper to parse http date headers Christian Ebner
2025-05-29 14:31 ` [pbs-devel] [RFC v2 proxmox-backup 12/42] s3 client: implement methods to operate on s3 objects in bucket Christian Ebner
2025-05-29 14:31 ` [pbs-devel] [RFC v2 proxmox-backup 13/42] config: introduce s3 object store client configuration Christian Ebner
2025-05-29 14:31 ` [pbs-devel] [RFC v2 proxmox-backup 14/42] api: config: implement endpoints to manipulate and list s3 configs Christian Ebner
2025-05-29 14:31 ` [pbs-devel] [RFC v2 proxmox-backup 15/42] api: datastore: check S3 backend bucket access on datastore create Christian Ebner
2025-05-29 14:31 ` [pbs-devel] [RFC v2 proxmox-backup 16/42] api/bin: add endpoint and command to check s3 client connection Christian Ebner
2025-05-29 14:31 ` [pbs-devel] [RFC v2 proxmox-backup 17/42] datastore: allow to get the backend for a datastore Christian Ebner
2025-05-29 14:31 ` [pbs-devel] [RFC v2 proxmox-backup 18/42] api: backup: store datastore backend in runtime environment Christian Ebner
2025-05-29 14:31 ` [pbs-devel] [RFC v2 proxmox-backup 19/42] api: backup: conditionally upload chunks to S3 object store backend Christian Ebner
2025-05-29 14:31 ` [pbs-devel] [RFC v2 proxmox-backup 20/42] api: backup: conditionally upload blobs " Christian Ebner
2025-05-29 14:31 ` [pbs-devel] [RFC v2 proxmox-backup 21/42] api: backup: conditionally upload indices " Christian Ebner
2025-05-29 14:31 ` [pbs-devel] [RFC v2 proxmox-backup 22/42] api: backup: conditionally upload manifest " Christian Ebner
2025-05-29 14:31 ` [pbs-devel] [RFC v2 proxmox-backup 23/42] sync: pull: conditionally upload content to S3 backend Christian Ebner
2025-05-29 14:31 ` [pbs-devel] [RFC v2 proxmox-backup 24/42] api: reader: fetch chunks based on datastore backend Christian Ebner
2025-05-29 14:31 ` [pbs-devel] [RFC v2 proxmox-backup 25/42] datastore: local chunk reader: read chunks based on backend Christian Ebner
2025-05-29 14:31 ` [pbs-devel] [RFC v2 proxmox-backup 26/42] verify worker: add datastore backed to verify worker Christian Ebner
2025-05-29 14:31 ` [pbs-devel] [RFC v2 proxmox-backup 27/42] verify: implement chunk verification for stores with s3 backend Christian Ebner
2025-05-29 14:31 ` [pbs-devel] [RFC v2 proxmox-backup 28/42] datastore: create namespace marker in S3 backend Christian Ebner
2025-05-29 14:31 ` [pbs-devel] [RFC v2 proxmox-backup 29/42] datastore: create/delete protected marker file on S3 storage backend Christian Ebner
2025-05-29 14:31 ` Christian Ebner [this message]
2025-05-29 14:31 ` [pbs-devel] [RFC v2 proxmox-backup 31/42] datastore: get and set owner for S3 store backend Christian Ebner
2025-05-29 14:31 ` [pbs-devel] [RFC v2 proxmox-backup 32/42] datastore: implement garbage collection for s3 backend Christian Ebner
2025-05-29 14:31 ` [pbs-devel] [RFC v2 proxmox-backup 33/42] ui: add S3 client edit window for configuration create/edit Christian Ebner
2025-05-29 14:31 ` [pbs-devel] [RFC v2 proxmox-backup 34/42] ui: add S3 client view for configuration Christian Ebner
2025-05-29 14:32 ` [pbs-devel] [RFC v2 proxmox-backup 35/42] ui: expose the S3 client view in the navigation tree Christian Ebner
2025-05-29 14:32 ` [pbs-devel] [RFC v2 proxmox-backup 36/42] ui: add s3 bucket selector and allow to set s3 backend Christian Ebner
2025-05-29 14:32 ` [pbs-devel] [RFC v2 proxmox-backup 37/42] tools: lru cache: add removed callback for evicted cache nodes Christian Ebner
2025-05-29 14:32 ` [pbs-devel] [RFC v2 proxmox-backup 38/42] tools: async lru cache: implement insert, remove and contains methods Christian Ebner
2025-05-29 14:32 ` [pbs-devel] [RFC v2 proxmox-backup 39/42] datastore: add local datastore cache for network attached storages Christian Ebner
2025-05-29 14:32 ` [pbs-devel] [RFC v2 proxmox-backup 40/42] api: backup: use local datastore cache on S3 backend chunk upload Christian Ebner
2025-05-29 14:32 ` [pbs-devel] [RFC v2 proxmox-backup 41/42] api: reader: use local datastore cache on S3 backend chunk fetching Christian Ebner
2025-05-29 14:32 ` [pbs-devel] [RFC v2 proxmox-backup 42/42] api: backup: add no-cache flag to bypass local datastore cache Christian Ebner
2025-06-04 11:58 ` [pbs-devel] [RFC v2 proxmox/bookworm-stable proxmox-backup 00/42] S3 storage backend for datastores Lukas Wagner
2025-06-06 7:40 ` Christian Ebner
2025-06-06 11:12 ` Lukas Wagner
2025-06-16 14:27 ` [pbs-devel] superseded: " Christian Ebner
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250529143207.694497-31-c.ebner@proxmox.com \
--to=c.ebner@proxmox.com \
--cc=pbs-devel@lists.proxmox.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox
Service provided by Proxmox Server Solutions GmbH | Privacy | Legal