From: Hannes Laimer <h.laimer@proxmox.com>
To: pbs-devel@lists.proxmox.com
Subject: [PATCH proxmox-backup v8 06/13] datastore: add move-group
Date: Wed, 22 Apr 2026 15:39:44 +0200 [thread overview]
Message-ID: <20260422133951.192862-7-h.laimer@proxmox.com> (raw)
In-Reply-To: <20260422133951.192862-1-h.laimer@proxmox.com>
Add support for moving a single backup group to a different namespace
within the same datastore.
For the filesystem backend each snapshot directory is renamed. For
S3 all objects are copied to the target prefix before deleting the
source, per snapshot.
Exclusive locks on the group and all its snapshots are acquired
before the move. Snapshots are locked and moved in batches to avoid
exhausting file descriptors on groups with many snapshots.
If the target group already exists and merging is enabled, snapshots
are merged into it provided both groups share the same owner and the
source snapshots are strictly newer than the target's. Group notes
are renamed into place when the target has none, and kept with a
warning when they diverge during a merge.
Before each rename, move_to records the post-rename index paths in the
move journal (see the preceding patch), so a concurrent GC phase-1
drain can still mark the moved chunks if the namespace iteration
missed them at both the old and new locations.
Signed-off-by: Hannes Laimer <h.laimer@proxmox.com>
---
pbs-datastore/src/backup_info.rs | 260 ++++++++++++++++++++++++++++++-
pbs-datastore/src/datastore.rs | 250 ++++++++++++++++++++++++++---
2 files changed, 485 insertions(+), 25 deletions(-)
diff --git a/pbs-datastore/src/backup_info.rs b/pbs-datastore/src/backup_info.rs
index 3f7f7c90..69949c51 100644
--- a/pbs-datastore/src/backup_info.rs
+++ b/pbs-datastore/src/backup_info.rs
@@ -9,18 +9,19 @@ use std::time::Duration;
use anyhow::{bail, format_err, Context, Error};
use const_format::concatcp;
-use proxmox_s3_client::S3PathPrefix;
+use proxmox_s3_client::{S3ObjectKey, S3PathPrefix};
use proxmox_sys::fs::{lock_dir_noblock, lock_dir_noblock_shared, replace_file, CreateOptions};
use proxmox_systemd::escape_unit;
use pbs_api_types::{
- Authid, BackupGroupDeleteStats, BackupNamespace, BackupType, GroupFilter, VerifyState,
- BACKUP_DATE_REGEX, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME,
+ ArchiveType, Authid, BackupGroupDeleteStats, BackupNamespace, BackupType, GroupFilter,
+ VerifyState, BACKUP_DATE_REGEX, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME,
};
use pbs_config::{open_backup_lockfile, BackupLockGuard};
use crate::datastore::{GROUP_NOTES_FILE_NAME, GROUP_OWNER_FILE_NAME};
use crate::manifest::{BackupManifest, MANIFEST_LOCK_NAME};
+use crate::move_journal;
use crate::s3::S3_CONTENT_PREFIX;
use crate::{DataBlob, DataStore, DatastoreBackend};
@@ -274,6 +275,120 @@ impl BackupGroup {
Ok(delete_stats)
}
+ /// Check merge invariants for moving this group's snapshots into `target`.
+ /// Returns an error if ownership differs or snapshot times overlap.
+ pub(crate) fn check_merge_invariants(&self, target: &BackupGroup) -> Result<(), Error> {
+ let src_owner = self.get_owner()?;
+ let tgt_owner = target.get_owner()?;
+ if src_owner != tgt_owner {
+ bail!(
+ "cannot merge group '{}/{}' from '{}' into '{}': owner mismatch \
+ (source: {src_owner}, target: {tgt_owner})",
+ self.group.ty,
+ self.group.id,
+ self.ns,
+ target.ns,
+ );
+ }
+
+ let src_oldest = self
+ .iter_snapshots()?
+ .filter_map(Result::ok)
+ .map(|s| s.backup_time())
+ .min();
+
+ if let Some(src_oldest) = src_oldest {
+ // Any target snapshot with time >= src_oldest violates the
+ // "source strictly newer than target" merge invariant. Short-circuit on the first hit.
+ if let Some(overlap) = target
+ .iter_snapshots()?
+ .filter_map(Result::ok)
+ .map(|s| s.backup_time())
+ .find(|t| *t >= src_oldest)
+ {
+ bail!(
+ "cannot merge group '{}/{}' from '{}' into '{}': snapshot time overlap \
+ (oldest source: {src_oldest}, conflicting target: {overlap})",
+ self.group.ty,
+ self.group.id,
+ self.ns,
+ target.ns,
+ );
+ }
+ }
+
+ Ok(())
+ }
+
+ /// Move the group notes file (if any) from this group to `target`. Caller must hold
+ /// exclusive group locks on both.
+ ///
+ /// Behavior:
+ /// - If the source has no notes, do nothing.
+ /// - If only the source has notes, upload them to the target's S3 notes object first
+ /// (when the backend is S3), then rename the local file. A failure in either step
+ /// aborts the move so the source is left intact for the caller to retry; the S3
+ /// upload uses replace semantics so re-running is idempotent.
+ /// - If both source and target have notes (merge case):
+ /// - identical contents: leave the target unchanged. The source copy will be
+ /// removed by `destroy()`.
+ /// - diverging contents: log a warning and keep the target's notes.
+ pub(crate) fn move_notes_to(
+ &self,
+ target: &BackupGroup,
+ backend: &DatastoreBackend,
+ ) -> Result<(), Error> {
+ let src_notes_path = self.store.group_notes_path(&self.ns, &self.group);
+ let dst_notes_path = target.store.group_notes_path(&target.ns, &target.group);
+
+ let src_notes = match std::fs::read(&src_notes_path) {
+ Ok(v) => v,
+ Err(ref err) if err.kind() == std::io::ErrorKind::NotFound => return Ok(()),
+ Err(err) => {
+ bail!("reading source group notes {src_notes_path:?} failed: {err}")
+ }
+ };
+
+ match std::fs::read(&dst_notes_path) {
+ Ok(dst_notes) => {
+ if dst_notes != src_notes {
+ log::warn!(
+ "group notes differ during merge of '{}' from '{}' into '{}' - keeping target's notes",
+ self.group, self.ns, target.ns,
+ );
+ }
+ // Identical or intentionally kept: source copy will be removed by destroy().
+ return Ok(());
+ }
+ Err(ref err) if err.kind() == std::io::ErrorKind::NotFound => {}
+ Err(err) => {
+ bail!("reading target group notes {dst_notes_path:?} failed: {err}")
+ }
+ }
+
+ if let DatastoreBackend::S3(s3_client) = backend {
+ let dst_key = crate::s3::object_key_from_path(
+ &target.relative_group_path(),
+ GROUP_NOTES_FILE_NAME,
+ )
+ .context("invalid target notes object key")?;
+ let data = hyper::body::Bytes::copy_from_slice(&src_notes);
+ proxmox_async::runtime::block_on(s3_client.upload_replace_with_retry(dst_key, data))
+ .with_context(|| {
+ format!(
+ "failed to upload group notes on S3 backend for '{}' in '{}'",
+ target.group, target.ns,
+ )
+ })?;
+ }
+
+ std::fs::rename(&src_notes_path, &dst_notes_path).with_context(|| {
+ format!("failed to move group notes {src_notes_path:?} -> {dst_notes_path:?}")
+ })?;
+
+ Ok(())
+ }
+
/// Helper function, assumes that no more snapshots are present in the group.
fn remove_group_dir(&self) -> Result<(), Error> {
let note_path = self.store.group_notes_path(&self.ns, &self.group);
@@ -613,6 +728,145 @@ impl BackupDir {
}
}
+ /// Move this snapshot into `target`.
+ ///
+ /// For the filesystem backend, renames the snapshot directory. For S3, copies all
+ /// objects under the snapshot prefix to the target, renames the local cache directory,
+ /// then deletes the source objects. A copy failure returns an error with the snapshot
+ /// intact at source. A delete failure is logged as a warning.
+ ///
+ /// Before the rename, each index file's post-rename path is recorded in the per-datastore
+ /// move journal so a concurrent GC phase-1 drain can still mark their chunks - see
+ /// `move_journal` for the race this closes. If the rename then fails the journal entry
+ /// becomes a ghost that the drain skips on `open_index_reader` returning `None`.
+ ///
+ /// The caller must hold an exclusive lock on this snapshot, hold exclusive locks on
+ /// both source and target groups, and ensure the target group directory exists.
+ pub(crate) fn move_to(
+ &self,
+ target: &BackupGroup,
+ backend: &DatastoreBackend,
+ ) -> Result<(), Error> {
+ if !Arc::ptr_eq(&self.store, &target.store) {
+ bail!("cannot move snapshot across different datastores");
+ }
+
+ let target_snap = target.backup_dir_with_rfc3339(self.backup_time_string.clone())?;
+ let src_snap_path = self.full_path();
+ let dst_snap_path = target_snap.full_path();
+
+ // Enumerate the index files at source (they are still there until the
+ // rename below) and record their future paths in the move journal so a
+ // concurrent GC phase-1 drain can mark their chunks even if the
+ // hierarchy iteration missed both the source and target.
+ let mut journal_entries: Vec<PathBuf> = Vec::new();
+ for entry in std::fs::read_dir(&src_snap_path)
+ .with_context(|| format!("failed to list source snapshot dir {src_snap_path:?}"))?
+ {
+ let entry =
+ entry.with_context(|| format!("failed to read entry in {src_snap_path:?}"))?;
+ let name = entry.file_name();
+ let Some(name_str) = name.to_str() else {
+ continue;
+ };
+ if matches!(
+ ArchiveType::from_path(name_str),
+ Ok(ArchiveType::FixedIndex) | Ok(ArchiveType::DynamicIndex)
+ ) {
+ journal_entries.push(dst_snap_path.join(&name));
+ }
+ }
+ move_journal::append_moved_indices(self.store.name(), &journal_entries)?;
+
+ match backend {
+ DatastoreBackend::Filesystem => {
+ std::fs::rename(&src_snap_path, &dst_snap_path).with_context(|| {
+ format!("failed to move snapshot {src_snap_path:?} to {dst_snap_path:?}")
+ })?;
+ }
+ DatastoreBackend::S3(s3_client) => {
+ let src_rel = self.relative_path();
+ let src_rel_str = src_rel
+ .to_str()
+ .ok_or_else(|| format_err!("invalid source snapshot path"))?;
+ let src_prefix_str = format!("{S3_CONTENT_PREFIX}/{src_rel_str}/");
+
+ let dst_rel = target_snap.relative_path();
+ let dst_rel_str = dst_rel
+ .to_str()
+ .ok_or_else(|| format_err!("invalid target snapshot path"))?;
+ let dst_prefix_str = format!("{S3_CONTENT_PREFIX}/{dst_rel_str}/");
+
+ let store_prefix = format!("{}/", self.store.name());
+
+ // Copy all objects for this snapshot to the target prefix. On failure the
+ // source snapshot remains intact and any partial target copies stay as
+ // leftovers (visible via the API for cleanup).
+ let prefix = S3PathPrefix::Some(src_prefix_str.clone());
+ let mut token: Option<String> = None;
+ let mut src_keys = Vec::new();
+
+ loop {
+ let result = proxmox_async::runtime::block_on(
+ s3_client.list_objects_v2(&prefix, token.as_deref()),
+ )
+ .context("failed to list snapshot objects on S3 backend")?;
+
+ for item in result.contents {
+ let full_key_str: &str = &item.key;
+ let rel_key =
+ full_key_str.strip_prefix(&store_prefix).ok_or_else(|| {
+ format_err!("unexpected key prefix in '{full_key_str}'")
+ })?;
+ let src_key = S3ObjectKey::try_from(rel_key)?;
+
+ let suffix = rel_key
+ .strip_prefix(&src_prefix_str)
+ .ok_or_else(|| format_err!("unexpected key format '{rel_key}'"))?;
+ let dst_key_str = format!("{dst_prefix_str}{suffix}");
+ let dst_key = S3ObjectKey::try_from(dst_key_str.as_str())?;
+
+ proxmox_async::runtime::block_on(
+ s3_client.copy_object(src_key.clone(), dst_key),
+ )
+ .with_context(|| format!("failed to copy S3 object '{rel_key}'"))?;
+ src_keys.push(src_key);
+ }
+
+ if result.is_truncated {
+ token = result.next_continuation_token;
+ } else {
+ break;
+ }
+ }
+
+ std::fs::rename(&src_snap_path, &dst_snap_path).with_context(|| {
+ format!("failed to move snapshot cache {src_snap_path:?} to {dst_snap_path:?}")
+ })?;
+
+ // Delete source S3 objects. Treat failures as warnings since the snapshot
+ // is already at the target.
+ for src_key in src_keys {
+ if let Err(err) =
+ proxmox_async::runtime::block_on(s3_client.delete_object(src_key.clone()))
+ {
+ log::warn!(
+ "S3 move: failed to delete source object '{src_key:?}' \
+ (snapshot already at target, orphaned object requires manual removal): \
+ {err:#}"
+ );
+ }
+ }
+ }
+ }
+
+ // Clean up stale source lock files under /run for this snapshot.
+ let _ = std::fs::remove_file(self.manifest_lock_path());
+ let _ = std::fs::remove_file(self.lock_path());
+
+ Ok(())
+ }
+
/// Destroy the whole snapshot, bails if it's protected
///
/// Setting `force` to true skips locking and thus ignores if the backup is currently in use.
diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs
index 319b4cba..d215dd0d 100644
--- a/pbs-datastore/src/datastore.rs
+++ b/pbs-datastore/src/datastore.rs
@@ -88,6 +88,15 @@ const CHUNK_LOCK_TIMEOUT: Duration = Duration::from_secs(3 * 60 * 60);
const S3_DELETE_BATCH_LIMIT: usize = 100;
// max defer time for s3 batch deletions
const S3_DELETE_DEFER_LIMIT_SECONDS: Duration = Duration::from_secs(60 * 5);
+// upper bound on concurrent snapshot locks during move operations to avoid exhausting FDs
+const MAX_SNAPSHOT_LOCKS: usize = 512;
+
+/// Error from a locked per-group operation. Soft errors (e.g. lock conflicts)
+/// are retryable, hard errors are not.
+pub(crate) enum BackupGroupOpError {
+ Soft(Error),
+ Hard(Error),
+}
/// Check that moving/syncing `namespaces` from `source` to `target` stays within
/// `MAX_NAMESPACE_DEPTH`.
@@ -1187,6 +1196,174 @@ impl DataStore {
backup_group.destroy(guard, &self.backend()?)
}
+ /// Validate a group-move request without touching locks.
+ ///
+ /// Fast pre-flight check used by the API endpoint so we can fail synchronously before spawning
+ /// a worker. The authoritative post-lock gate lives in [`Self::lock_and_move_group`]. This
+ /// function does *not* replace it, and it does *not* check ownership - the caller handles
+ /// that.
+ pub fn check_move_group(
+ self: &Arc<Self>,
+ source_ns: &BackupNamespace,
+ target_ns: &BackupNamespace,
+ group: &pbs_api_types::BackupGroup,
+ merge_group: bool,
+ ) -> Result<(), Error> {
+ if source_ns == target_ns {
+ bail!("source and target namespace must be different");
+ }
+ if !self.namespace_exists(target_ns) {
+ bail!("target namespace '{target_ns}' does not exist");
+ }
+ if !self.backup_group(source_ns.clone(), group.clone()).exists() {
+ bail!("group '{group}' does not exist in namespace '{source_ns}'");
+ }
+ if self.backup_group(target_ns.clone(), group.clone()).exists() && !merge_group {
+ bail!(
+ "group '{group}' already exists in target namespace '{target_ns}' \
+ and merge-group is disabled"
+ );
+ }
+ Ok(())
+ }
+
+ /// Move a single backup group to a different namespace within the same datastore.
+ pub fn move_group(
+ self: &Arc<Self>,
+ source_ns: &BackupNamespace,
+ group: &pbs_api_types::BackupGroup,
+ target_ns: &BackupNamespace,
+ merge_group: bool,
+ ) -> Result<(), Error> {
+ if *OLD_LOCKING {
+ bail!("move operations require new-style file-based locking");
+ }
+
+ let source_group = self.backup_group(source_ns.clone(), group.clone());
+ let backend = self.backend()?;
+
+ self.lock_and_move_group(source_ns, &source_group, target_ns, &backend, merge_group)
+ .map_err(|err| match err {
+ BackupGroupOpError::Soft(err) | BackupGroupOpError::Hard(err) => err,
+ })
+ }
+
+ /// Lock and move a single group. Acquires exclusive locks on both source and target groups,
+ /// then moves snapshots in batches - each batch is locked, moved, and released before the next
+ /// batch starts. This ensures no concurrent readers (e.g. verify) can operate on snapshots
+ /// being moved, without exhausting file descriptors.
+ ///
+ /// `source_ns` is the subtree root of the caller's operation, not necessarily the group's own
+ /// namespace: for a namespace move, a group can live deeper than `source_ns`, so the prefix is
+ /// rewritten to its new target location.
+ ///
+ /// Returns a `BackupGroupOpError` on failure, indicating whether the failure was a transient
+ /// lock conflict (retryable) or a hard error.
+ fn lock_and_move_group(
+ self: &Arc<Self>,
+ source_ns: &BackupNamespace,
+ group: &BackupGroup,
+ target_ns: &BackupNamespace,
+ backend: &DatastoreBackend,
+ merge_groups: bool,
+ ) -> Result<(), BackupGroupOpError> {
+ let target_group_ns = group
+ .backup_ns()
+ .map_prefix(source_ns, target_ns)
+ .map_err(BackupGroupOpError::Hard)?;
+
+ // Lock the source first so concurrent lock conflicts are detected before we touch the
+ // target structure. Acquisition order is source -> target (via
+ // `create_locked_backup_group_do` below). `.lock()` is non-blocking, so two concurrent
+ // mirror moves (A -> B and B -> A on same-named groups) fail fast with `Soft` rather than
+ // deadlocking.
+ let source_guard = group.lock().map_err(BackupGroupOpError::Soft)?;
+
+ // Post-lock validation: re-runs the pre-flight checks now that we hold the source lock,
+ // closing the TOCTOU gap between the API-level pre-check and the worker. This also stops
+ // the target-group create below from silently creating a missing target namespace
+ // (intentional for backup/pull/tape-restore, not for move).
+ self.check_move_group(
+ group.backup_ns(),
+ &target_group_ns,
+ group.group(),
+ merge_groups,
+ )
+ .map_err(BackupGroupOpError::Hard)?;
+
+ let source_owner = group.get_owner().map_err(BackupGroupOpError::Hard)?;
+
+ // Create-or-open the target group under lock. Reports whether the group was created (= no
+ // merge) or already existed (= merge), and differentiates a target lock conflict (Soft)
+ // from other failures (Hard).
+ let (_, created, _target_guard) =
+ self.create_locked_backup_group_do(&target_group_ns, group.group(), &source_owner)?;
+ let target_group = self.backup_group(target_group_ns.clone(), group.group().clone());
+ let merge = !created;
+
+ if merge && !merge_groups {
+ return Err(BackupGroupOpError::Hard(format_err!(
+ "group '{}' already exists in target namespace '{target_group_ns}' \
+ and merging is disabled",
+ group.group(),
+ )));
+ }
+ if merge {
+ group
+ .check_merge_invariants(&target_group)
+ .map_err(BackupGroupOpError::Hard)?;
+ }
+
+ let mut snapshots: Vec<_> = group
+ .iter_snapshots()
+ .map_err(BackupGroupOpError::Hard)?
+ .collect::<Result<Vec<_>, _>>()
+ .map_err(BackupGroupOpError::Hard)?;
+ // Sort by time so a partial failure (e.g. batch N succeeds but N+1 fails) leaves a clean
+ // time-ordered split: all moved snapshots are older than all remaining ones. This keeps
+ // the merge invariant (source oldest > target newest) valid on retry.
+ snapshots.sort_by_key(|s| s.backup_time());
+
+ log::info!(
+ "{} group '{}/{}' from '{}' to '{target_group_ns}'",
+ if merge { "merging" } else { "moving" },
+ group.group().ty,
+ group.group().id,
+ group.backup_ns(),
+ );
+
+ // Each batch is locked, moved, and released before the next to exclude concurrent readers.
+ // `move_to` writes to the per-datastore move journal before each rename so a concurrent
+ // GC phase-1 drain can still mark the moved chunks; see `move_journal` for the race.
+ for chunk in snapshots.chunks(MAX_SNAPSHOT_LOCKS) {
+ let _batch_locks: Vec<BackupLockGuard> = chunk
+ .iter()
+ .map(|snap| snap.lock().map_err(BackupGroupOpError::Soft))
+ .collect::<Result<Vec<_>, _>>()?;
+
+ for snap in chunk {
+ snap.move_to(&target_group, backend)
+ .map_err(BackupGroupOpError::Hard)?;
+ }
+ // batch locks released here
+ }
+
+ // Move the group notes file (local rename + S3 upload). Diverging notes during a merge are
+ // kept on the target and logged.
+ group
+ .move_notes_to(&target_group, backend)
+ .map_err(BackupGroupOpError::Hard)?;
+
+ // All snapshots and notes are at the target. The source group holds only owner/lock
+ // metadata now, so hand the source lock to `destroy()` to clean up the owner, the group
+ // directory, and any group-level S3 objects in one place.
+ group
+ .destroy(source_guard, backend)
+ .map_err(BackupGroupOpError::Hard)?;
+
+ Ok(())
+ }
+
/// Remove a backup directory including all content
pub fn remove_backup_dir(
self: &Arc<Self>,
@@ -1308,37 +1485,66 @@ impl DataStore {
backup_group: &pbs_api_types::BackupGroup,
auth_id: &Authid,
) -> Result<(Authid, BackupLockGuard), Error> {
+ let (owner, _created, guard) = self
+ .create_locked_backup_group_do(ns, backup_group, auth_id)
+ .map_err(|err| match err {
+ BackupGroupOpError::Soft(err) | BackupGroupOpError::Hard(err) => err,
+ })?;
+ Ok((owner, guard))
+ }
+
+ /// Inner variant of [`Self::create_locked_backup_group`] that also reports whether the group
+ /// directory was newly created, and distinguishes a transient lock conflict
+ /// ([`BackupGroupOpError::Soft`]) from other errors ([`BackupGroupOpError::Hard`]).
+ pub(crate) fn create_locked_backup_group_do(
+ self: &Arc<Self>,
+ ns: &BackupNamespace,
+ backup_group: &pbs_api_types::BackupGroup,
+ auth_id: &Authid,
+ ) -> Result<(Authid, bool, BackupLockGuard), BackupGroupOpError> {
let backup_group = self.backup_group(ns.clone(), backup_group.clone());
- // create intermediate path first
let full_path = backup_group.full_group_path();
std::fs::create_dir_all(full_path.parent().ok_or_else(|| {
- format_err!("could not construct parent path for group {backup_group:?}")
- })?)?;
+ BackupGroupOpError::Hard(format_err!(
+ "could not construct parent path for group {backup_group:?}"
+ ))
+ })?)
+ .map_err(|err| BackupGroupOpError::Hard(err.into()))?;
+
+ // Try to create the group directory. This lets us distinguish "created" from "already
+ // existed".
+ let created = match std::fs::create_dir(&full_path) {
+ Ok(_) => true,
+ Err(ref err) if err.kind() == io::ErrorKind::AlreadyExists => false,
+ Err(err) => {
+ return Err(BackupGroupOpError::Hard(format_err!(
+ "unable to create backup group {full_path:?} - {err}"
+ )))
+ }
+ };
- // now create the group, this allows us to check whether it existed before
- match std::fs::create_dir(&full_path) {
- Ok(_) => {
- let guard = backup_group.lock().with_context(|| {
- format!("while creating new locked backup group '{backup_group:?}'")
- })?;
- if let Err(err) = self.set_owner(ns, backup_group.group(), auth_id, false) {
- let _ = std::fs::remove_dir(&full_path);
- return Err(err);
- }
- let owner = self.get_owner(ns, backup_group.group())?; // just to be sure
- Ok((owner, guard))
+ let guard = backup_group.lock().map_err(|err| {
+ if created {
+ let _ = std::fs::remove_dir(&full_path);
}
- Err(ref err) if err.kind() == io::ErrorKind::AlreadyExists => {
- let guard = backup_group.lock().with_context(|| {
- format!("while creating locked backup group '{backup_group:?}'")
- })?;
- let owner = self.get_owner(ns, backup_group.group())?; // just to be sure
- Ok((owner, guard))
+ BackupGroupOpError::Soft(err.context(format!(
+ "while {} locked backup group '{backup_group:?}'",
+ if created { "creating new" } else { "creating" },
+ )))
+ })?;
+
+ if created {
+ if let Err(err) = self.set_owner(ns, backup_group.group(), auth_id, false) {
+ let _ = std::fs::remove_dir(&full_path);
+ return Err(BackupGroupOpError::Hard(err));
}
- Err(err) => bail!("unable to create backup group {:?} - {}", full_path, err),
}
+ let owner = self
+ .get_owner(ns, backup_group.group())
+ .map_err(BackupGroupOpError::Hard)?;
+ Ok((owner, created, guard))
}
/// Creates a new backup snapshot inside a BackupGroup
--
2.47.3
next prev parent reply other threads:[~2026-04-22 13:40 UTC|newest]
Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-22 13:39 [PATCH proxmox-backup v8 00/13] fixes #6195: add support for moving groups and namespaces Hannes Laimer
2026-04-22 13:39 ` [PATCH proxmox-backup v8 01/13] ui: show empty groups Hannes Laimer
2026-04-22 13:39 ` [PATCH proxmox-backup v8 02/13] datastore: lift check_namespace_depth_limit to pbs-datastore Hannes Laimer
2026-04-22 13:39 ` [PATCH proxmox-backup v8 03/13] datastore: have BackupGroup::destroy consume the group lock Hannes Laimer
2026-04-22 13:39 ` [PATCH proxmox-backup v8 04/13] datastore: split remove_namespace into flat and recursive variants Hannes Laimer
2026-04-22 13:39 ` [PATCH proxmox-backup v8 05/13] datastore: add move journal for coordinating with gc phase 1 Hannes Laimer
2026-04-22 13:39 ` Hannes Laimer [this message]
2026-04-22 13:39 ` [PATCH proxmox-backup v8 07/13] datastore: add move-namespace Hannes Laimer
2026-04-22 13:39 ` [PATCH proxmox-backup v8 08/13] docs: add section on moving namespaces and groups Hannes Laimer
2026-04-22 13:39 ` [PATCH proxmox-backup v8 09/13] api: add POST endpoint for move-group Hannes Laimer
2026-04-22 13:39 ` [PATCH proxmox-backup v8 10/13] api: add POST endpoint for move-namespace Hannes Laimer
2026-04-22 13:39 ` [PATCH proxmox-backup v8 11/13] ui: add move group action Hannes Laimer
2026-04-23 13:35 ` Michael Köppl
2026-04-23 13:47 ` Hannes Laimer
2026-04-22 13:39 ` [PATCH proxmox-backup v8 12/13] ui: add move namespace action Hannes Laimer
2026-04-23 14:49 ` Michael Köppl
2026-04-22 13:39 ` [PATCH proxmox-backup v8 13/13] cli: add move-namespace and move-group commands Hannes Laimer
2026-04-23 16:29 ` [PATCH proxmox-backup v8 00/13] fixes #6195: add support for moving groups and namespaces Michael Köppl
2026-04-23 22:38 ` applied: " Thomas Lamprecht
2026-04-24 8:31 ` Fabian Grünbichler
2026-04-24 8:43 ` Hannes Laimer
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260422133951.192862-7-h.laimer@proxmox.com \
--to=h.laimer@proxmox.com \
--cc=pbs-devel@lists.proxmox.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox