* [PATCH proxmox-backup v8 01/13] ui: show empty groups
2026-04-22 13:39 [PATCH proxmox-backup v8 00/13] fixes #6195: add support for moving groups and namespaces Hannes Laimer
@ 2026-04-22 13:39 ` Hannes Laimer
2026-04-22 13:39 ` [PATCH proxmox-backup v8 02/13] datastore: lift check_namespace_depth_limit to pbs-datastore Hannes Laimer
` (13 subsequent siblings)
14 siblings, 0 replies; 21+ messages in thread
From: Hannes Laimer @ 2026-04-22 13:39 UTC (permalink / raw)
To: pbs-devel
Display groups that have no snapshots. Currently, deleting the last
snapshot also removes the parent group, which causes metadata like
notes to be lost.
Showing empty groups is also needed for cleaning up partially failed
moves on an S3-backed datastore. Without them, the only way to delete
leftover groups (and their orphaned S3 objects) would be through the
API.
Signed-off-by: Hannes Laimer <h.laimer@proxmox.com>
---
www/datastore/Content.js | 89 +++++++++++++++++++++++++++-------------
1 file changed, 61 insertions(+), 28 deletions(-)
diff --git a/www/datastore/Content.js b/www/datastore/Content.js
index a2aa1949..dfb7787c 100644
--- a/www/datastore/Content.js
+++ b/www/datastore/Content.js
@@ -139,6 +139,22 @@ Ext.define('PBS.DataStoreContent', {
});
},
+ makeGroupEntry: function (btype, backupId) {
+ let cls = PBS.Utils.get_type_icon_cls(btype);
+ if (cls === '') {
+ return null;
+ }
+ return {
+ text: btype + '/' + backupId,
+ leaf: false,
+ iconCls: 'fa ' + cls,
+ expanded: false,
+ backup_type: btype,
+ backup_id: backupId,
+ children: [],
+ };
+ },
+
getRecordGroups: function (records) {
let groups = {};
@@ -150,27 +166,20 @@ Ext.define('PBS.DataStoreContent', {
continue;
}
- let cls = PBS.Utils.get_type_icon_cls(btype);
- if (cls === '') {
+ let entry = this.makeGroupEntry(btype, item.data['backup-id']);
+ if (entry === null) {
console.warn(`got unknown backup-type '${btype}'`);
continue; // FIXME: auto render? what do?
}
- groups[group] = {
- text: group,
- leaf: false,
- iconCls: 'fa ' + cls,
- expanded: false,
- backup_type: item.data['backup-type'],
- backup_id: item.data['backup-id'],
- children: [],
- };
+ groups[group] = entry;
}
return groups;
},
- updateGroupNotes: async function (view) {
+ loadGroups: async function () {
+ let view = this.getView();
try {
let url = `/api2/extjs/admin/datastore/${view.datastore}/groups`;
if (view.namespace && view.namespace !== '') {
@@ -179,19 +188,24 @@ Ext.define('PBS.DataStoreContent', {
let {
result: { data: groups },
} = await Proxmox.Async.api2({ url });
- let map = {};
- for (const group of groups) {
- map[`${group['backup-type']}/${group['backup-id']}`] = group.comment;
- }
- view.getRootNode().cascade((node) => {
- if (node.data.ty === 'group') {
- let group = `${node.data.backup_type}/${node.data.backup_id}`;
- node.set('comment', map[group], { dirty: false });
- }
- });
+ return groups;
} catch (err) {
console.debug(err);
}
+ return [];
+ },
+
+ updateGroupNotes: function (view, groupList) {
+ let map = {};
+ for (const group of groupList) {
+ map[`${group['backup-type']}/${group['backup-id']}`] = group.comment;
+ }
+ view.getRootNode().cascade((node) => {
+ if (node.data.ty === 'group') {
+ let group = `${node.data.backup_type}/${node.data.backup_id}`;
+ node.set('comment', map[group], { dirty: false });
+ }
+ });
},
loadNamespaceFromSameLevel: async function () {
@@ -215,7 +229,10 @@ Ext.define('PBS.DataStoreContent', {
let me = this;
let view = this.getView();
- let namespaces = await me.loadNamespaceFromSameLevel();
+ let [namespaces, groupList] = await Promise.all([
+ me.loadNamespaceFromSameLevel(),
+ me.loadGroups(),
+ ]);
if (!success) {
// TODO also check error code for != 403 ?
@@ -232,6 +249,22 @@ Ext.define('PBS.DataStoreContent', {
let groups = this.getRecordGroups(records);
+ for (const item of groupList) {
+ let btype = item['backup-type'];
+ let group = btype + '/' + item['backup-id'];
+ if (groups[group] !== undefined) {
+ continue;
+ }
+ let entry = me.makeGroupEntry(btype, item['backup-id']);
+ if (entry === null) {
+ continue;
+ }
+ entry.leaf = true;
+ entry.comment = item.comment;
+ entry.owner = item.owner;
+ groups[group] = entry;
+ }
+
let selected;
let expanded = {};
@@ -399,7 +432,7 @@ Ext.define('PBS.DataStoreContent', {
);
}
- this.updateGroupNotes(view);
+ this.updateGroupNotes(view, groupList);
if (selected !== undefined) {
let selection = view.getRootNode().findChildBy(
@@ -985,7 +1018,7 @@ Ext.define('PBS.DataStoreContent', {
flex: 1,
renderer: (v, meta, record) => {
let data = record.data;
- if (!data || data.leaf || data.root) {
+ if (!data || (data.leaf && data.ty !== 'group') || data.root) {
return '';
}
@@ -1029,7 +1062,7 @@ Ext.define('PBS.DataStoreContent', {
},
dblclick: function (tree, el, row, col, ev, rec) {
let data = rec.data || {};
- if (data.leaf || data.root) {
+ if ((data.leaf && data.ty !== 'group') || data.root) {
return;
}
let view = tree.up();
@@ -1065,7 +1098,7 @@ Ext.define('PBS.DataStoreContent', {
getTip: (v, m, rec) => Ext.String.format(gettext("Prune '{0}'"), v),
getClass: (v, m, { data }) =>
data.ty === 'group' ? 'fa fa-scissors' : 'pmx-hidden',
- isActionDisabled: (v, r, c, i, { data }) => data.ty !== 'group',
+ isActionDisabled: (v, r, c, i, { data }) => data.ty !== 'group' || !!data.leaf,
},
{
handler: 'onProtectionChange',
@@ -1230,7 +1263,7 @@ Ext.define('PBS.DataStoreContent', {
return ''; // TODO: accumulate verify of all groups into root NS node?
}
let i = (cls, txt) => `<i class="fa fa-fw fa-${cls}"></i> ${txt}`;
- if (v === undefined || v === null) {
+ if (v === undefined || v === null || record.data.count === 0) {
return record.data.leaf ? '' : i('question-circle-o warning', gettext('None'));
}
let tip, iconCls, txt;
--
2.47.3
^ permalink raw reply [flat|nested] 21+ messages in thread* [PATCH proxmox-backup v8 02/13] datastore: lift check_namespace_depth_limit to pbs-datastore
2026-04-22 13:39 [PATCH proxmox-backup v8 00/13] fixes #6195: add support for moving groups and namespaces Hannes Laimer
2026-04-22 13:39 ` [PATCH proxmox-backup v8 01/13] ui: show empty groups Hannes Laimer
@ 2026-04-22 13:39 ` Hannes Laimer
2026-04-22 13:39 ` [PATCH proxmox-backup v8 03/13] datastore: have BackupGroup::destroy consume the group lock Hannes Laimer
` (12 subsequent siblings)
14 siblings, 0 replies; 21+ messages in thread
From: Hannes Laimer @ 2026-04-22 13:39 UTC (permalink / raw)
To: pbs-devel
Move-namespace needs the same depth check sync (push/pull) already
uses. Move it to pbs-datastore as a free helper so both share one
implementation.
Signed-off-by: Hannes Laimer <h.laimer@proxmox.com>
---
pbs-datastore/src/datastore.rs | 25 ++++++++++++++++++++++++-
pbs-datastore/src/lib.rs | 6 +++---
src/server/pull.rs | 9 +++++----
src/server/push.rs | 7 +++----
src/server/sync.rs | 21 ---------------------
5 files changed, 35 insertions(+), 33 deletions(-)
diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs
index f1475d77..5cdf6a4a 100644
--- a/pbs-datastore/src/datastore.rs
+++ b/pbs-datastore/src/datastore.rs
@@ -34,7 +34,7 @@ use pbs_api_types::{
ArchiveType, Authid, BackupGroupDeleteStats, BackupNamespace, BackupType, ChunkOrder,
DataStoreConfig, DatastoreBackendConfig, DatastoreBackendType, DatastoreFSyncLevel,
DatastoreTuning, GarbageCollectionCacheStats, GarbageCollectionStatus, MaintenanceMode,
- MaintenanceType, Operation, S3Statistics, UPID,
+ MaintenanceType, Operation, S3Statistics, MAX_NAMESPACE_DEPTH, UPID,
};
use pbs_config::s3::S3_CFG_TYPE_ID;
use pbs_config::{BackupLockGuard, ConfigVersionCache};
@@ -89,6 +89,29 @@ const S3_DELETE_BATCH_LIMIT: usize = 100;
// max defer time for s3 batch deletions
const S3_DELETE_DEFER_LIMIT_SECONDS: Duration = Duration::from_secs(60 * 5);
+/// Check that moving/syncing `namespaces` from `source` to `target` stays within
+/// `MAX_NAMESPACE_DEPTH`.
+pub fn check_namespace_depth_limit(
+ source: &BackupNamespace,
+ target: &BackupNamespace,
+ namespaces: &[BackupNamespace],
+) -> Result<(), Error> {
+ let target_depth = target.depth();
+ let sub_depth = namespaces
+ .iter()
+ .map(BackupNamespace::depth)
+ .max()
+ .map_or(0, |v| v - source.depth());
+
+ if sub_depth + target_depth > MAX_NAMESPACE_DEPTH {
+ bail!(
+ "namespace depth limit exceeded: source subtree depth ({sub_depth}) + target \
+ depth ({target_depth}) would exceed max ({MAX_NAMESPACE_DEPTH})",
+ );
+ }
+ Ok(())
+}
+
/// checks if auth_id is owner, or, if owner is a token, if
/// auth_id is the user of the token
pub fn check_backup_owner(owner: &Authid, auth_id: &Authid) -> Result<(), Error> {
diff --git a/pbs-datastore/src/lib.rs b/pbs-datastore/src/lib.rs
index 9fa9dd59..4d8ac505 100644
--- a/pbs-datastore/src/lib.rs
+++ b/pbs-datastore/src/lib.rs
@@ -216,9 +216,9 @@ pub use store_progress::StoreProgress;
mod datastore;
pub use datastore::{
- check_backup_owner, ensure_datastore_is_mounted, get_datastore_mount_status, DataStore,
- DataStoreLookup, DatastoreBackend, S3_CLIENT_REQUEST_COUNTER_BASE_PATH,
- S3_DATASTORE_IN_USE_MARKER,
+ check_backup_owner, check_namespace_depth_limit, ensure_datastore_is_mounted,
+ get_datastore_mount_status, DataStore, DataStoreLookup, DatastoreBackend,
+ S3_CLIENT_REQUEST_COUNTER_BASE_PATH, S3_DATASTORE_IN_USE_MARKER,
};
mod hierarchy;
diff --git a/src/server/pull.rs b/src/server/pull.rs
index 688f9557..16a59fee 100644
--- a/src/server/pull.rs
+++ b/src/server/pull.rs
@@ -25,13 +25,14 @@ use pbs_datastore::fixed_index::FixedIndexReader;
use pbs_datastore::index::IndexFile;
use pbs_datastore::manifest::{BackupManifest, FileInfo};
use pbs_datastore::read_chunk::AsyncReadChunk;
-use pbs_datastore::{check_backup_owner, DataStore, DatastoreBackend, StoreProgress};
+use pbs_datastore::{
+ check_backup_owner, check_namespace_depth_limit, DataStore, DatastoreBackend, StoreProgress,
+};
use pbs_tools::sha::sha256;
use super::sync::{
- check_namespace_depth_limit, exclude_not_verified_or_encrypted,
- ignore_not_verified_or_encrypted, LocalSource, RemoteSource, RemovedVanishedStats, SkipInfo,
- SkipReason, SyncSource, SyncSourceReader, SyncStats,
+ exclude_not_verified_or_encrypted, ignore_not_verified_or_encrypted, LocalSource, RemoteSource,
+ RemovedVanishedStats, SkipInfo, SkipReason, SyncSource, SyncSourceReader, SyncStats,
};
use crate::backup::{check_ns_modification_privs, check_ns_privs};
use crate::tools::parallel_handler::ParallelHandler;
diff --git a/src/server/push.rs b/src/server/push.rs
index e69973b4..82319993 100644
--- a/src/server/push.rs
+++ b/src/server/push.rs
@@ -26,12 +26,11 @@ use pbs_datastore::dynamic_index::DynamicIndexReader;
use pbs_datastore::fixed_index::FixedIndexReader;
use pbs_datastore::index::IndexFile;
use pbs_datastore::read_chunk::AsyncReadChunk;
-use pbs_datastore::{DataStore, StoreProgress};
+use pbs_datastore::{check_namespace_depth_limit, DataStore, StoreProgress};
use super::sync::{
- check_namespace_depth_limit, exclude_not_verified_or_encrypted,
- ignore_not_verified_or_encrypted, LocalSource, RemovedVanishedStats, SkipInfo, SkipReason,
- SyncSource, SyncStats,
+ exclude_not_verified_or_encrypted, ignore_not_verified_or_encrypted, LocalSource,
+ RemovedVanishedStats, SkipInfo, SkipReason, SyncSource, SyncStats,
};
use crate::api2::config::remote;
diff --git a/src/server/sync.rs b/src/server/sync.rs
index aedf4a27..47badf1f 100644
--- a/src/server/sync.rs
+++ b/src/server/sync.rs
@@ -570,27 +570,6 @@ impl std::fmt::Display for SkipInfo {
}
}
-/// Check if a sync from source to target of given namespaces exceeds the global namespace depth limit
-pub(crate) fn check_namespace_depth_limit(
- source_namespace: &BackupNamespace,
- target_namespace: &BackupNamespace,
- namespaces: &[BackupNamespace],
-) -> Result<(), Error> {
- let target_ns_depth = target_namespace.depth();
- let sync_ns_depth = namespaces
- .iter()
- .map(BackupNamespace::depth)
- .max()
- .map_or(0, |v| v - source_namespace.depth());
-
- if sync_ns_depth + target_ns_depth > MAX_NAMESPACE_DEPTH {
- bail!(
- "Syncing would exceed max allowed namespace depth. ({sync_ns_depth}+{target_ns_depth} > {MAX_NAMESPACE_DEPTH})",
- );
- }
- Ok(())
-}
-
/// Run a sync job in given direction
pub fn do_sync_job(
mut job: Job,
--
2.47.3
^ permalink raw reply [flat|nested] 21+ messages in thread* [PATCH proxmox-backup v8 03/13] datastore: have BackupGroup::destroy consume the group lock
2026-04-22 13:39 [PATCH proxmox-backup v8 00/13] fixes #6195: add support for moving groups and namespaces Hannes Laimer
2026-04-22 13:39 ` [PATCH proxmox-backup v8 01/13] ui: show empty groups Hannes Laimer
2026-04-22 13:39 ` [PATCH proxmox-backup v8 02/13] datastore: lift check_namespace_depth_limit to pbs-datastore Hannes Laimer
@ 2026-04-22 13:39 ` Hannes Laimer
2026-04-22 13:39 ` [PATCH proxmox-backup v8 04/13] datastore: split remove_namespace into flat and recursive variants Hannes Laimer
` (11 subsequent siblings)
14 siblings, 0 replies; 21+ messages in thread
From: Hannes Laimer @ 2026-04-22 13:39 UTC (permalink / raw)
To: pbs-devel
Take ownership of the group lock guard instead of acquiring it
internally, so callers that already hold the lock (e.g. the upcoming
move-group) can delegate final cleanup without a second lock attempt
and without duplicating the steps.
Signed-off-by: Hannes Laimer <h.laimer@proxmox.com>
---
pbs-datastore/src/backup_info.rs | 9 +++++----
pbs-datastore/src/datastore.rs | 10 ++++++++--
2 files changed, 13 insertions(+), 6 deletions(-)
diff --git a/pbs-datastore/src/backup_info.rs b/pbs-datastore/src/backup_info.rs
index c33eb307..3f7f7c90 100644
--- a/pbs-datastore/src/backup_info.rs
+++ b/pbs-datastore/src/backup_info.rs
@@ -217,17 +217,18 @@ impl BackupGroup {
crate::ListSnapshots::new(self.clone())
}
- /// Destroy the group inclusive all its backup snapshots (BackupDir's)
+ /// Destroy the group inclusive all its backup snapshots (BackupDir's).
+ ///
+ /// Consumes the group lock. The caller is responsible for acquiring it via
+ /// [`Self::lock`] beforehand.
///
/// Returns `BackupGroupDeleteStats`, containing the number of deleted snapshots
/// and number of protected snaphsots, which therefore were not removed.
pub(crate) fn destroy(
&self,
+ _lock_guard: BackupLockGuard,
backend: &DatastoreBackend,
) -> Result<BackupGroupDeleteStats, Error> {
- let _guard = self
- .lock()
- .with_context(|| format!("while destroying group '{self:?}'"))?;
let path = self.full_group_path();
log::info!("removing backup group {:?}", path);
diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs
index 5cdf6a4a..c1f2f8eb 100644
--- a/pbs-datastore/src/datastore.rs
+++ b/pbs-datastore/src/datastore.rs
@@ -1027,7 +1027,10 @@ impl DataStore {
for group in self.iter_backup_groups(ns.to_owned())? {
let group = group?;
let backend = self.backend()?;
- let delete_stats = group.destroy(&backend)?;
+ let guard = group
+ .lock()
+ .with_context(|| format!("while destroying group '{group:?}'"))?;
+ let delete_stats = group.destroy(guard, &backend)?;
stats.add(&delete_stats);
removed_all_groups = removed_all_groups && delete_stats.all_removed();
}
@@ -1148,8 +1151,11 @@ impl DataStore {
backup_group: &pbs_api_types::BackupGroup,
) -> Result<BackupGroupDeleteStats, Error> {
let backup_group = self.backup_group(ns.clone(), backup_group.clone());
+ let guard = backup_group
+ .lock()
+ .with_context(|| format!("while destroying group '{backup_group:?}'"))?;
- backup_group.destroy(&self.backend()?)
+ backup_group.destroy(guard, &self.backend()?)
}
/// Remove a backup directory including all content
--
2.47.3
^ permalink raw reply [flat|nested] 21+ messages in thread* [PATCH proxmox-backup v8 04/13] datastore: split remove_namespace into flat and recursive variants
2026-04-22 13:39 [PATCH proxmox-backup v8 00/13] fixes #6195: add support for moving groups and namespaces Hannes Laimer
` (2 preceding siblings ...)
2026-04-22 13:39 ` [PATCH proxmox-backup v8 03/13] datastore: have BackupGroup::destroy consume the group lock Hannes Laimer
@ 2026-04-22 13:39 ` Hannes Laimer
2026-04-22 13:39 ` [PATCH proxmox-backup v8 05/13] datastore: add move journal for coordinating with gc phase 1 Hannes Laimer
` (10 subsequent siblings)
14 siblings, 0 replies; 21+ messages in thread
From: Hannes Laimer @ 2026-04-22 13:39 UTC (permalink / raw)
To: pbs-devel
Removal previously always descended into child namespaces. Callers
that drive traversal themselves (pull's vanished-namespace loop, and
the upcoming move-namespace cleanup) only need the per-level work.
Factor the single-level work into its own public entry point and
keep the recursive variant as a bottom-up wrapper around it. Also
lift the empty-type-dir cleanup so it runs in both modes. Otherwise
the flat path cannot prune an already-empty namespace whose type
dirs were left behind by prior group moves or deletes. Switch pull's
vanished-NS cleanup to the flat variant.
Signed-off-by: Hannes Laimer <h.laimer@proxmox.com>
---
pbs-datastore/src/datastore.rs | 151 ++++++++++++++++++++-------------
src/server/pull.rs | 7 +-
2 files changed, 93 insertions(+), 65 deletions(-)
diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs
index c1f2f8eb..bce0d846 100644
--- a/pbs-datastore/src/datastore.rs
+++ b/pbs-datastore/src/datastore.rs
@@ -1035,46 +1035,111 @@ impl DataStore {
removed_all_groups = removed_all_groups && delete_stats.all_removed();
}
+ Ok((removed_all_groups, stats))
+ }
+
+ /// Remove a single namespace level (no recursion into children).
+ ///
+ /// If `delete_groups` is true, destroys all groups in this namespace. Otherwise only
+ /// an empty namespace directory is pruned. Returns `(removed_all_requested, stats)`
+ /// where `removed_all_requested` is true if nothing was protected and the directory
+ /// was removed.
+ pub fn remove_namespace(
+ self: &Arc<Self>,
+ ns: &BackupNamespace,
+ delete_groups: bool,
+ ) -> Result<(bool, BackupGroupDeleteStats), Error> {
+ let mut removed_all_requested = true;
+ let mut stats = BackupGroupDeleteStats::default();
+ let backend = self.backend()?;
+
+ if delete_groups {
+ log::info!("removing groups in namespace {}:/{ns}", self.name());
+ let (removed_ns_groups, delete_stats) = self.remove_namespace_groups(ns)?;
+ stats.add(&delete_stats);
+ removed_all_requested = removed_all_requested && removed_ns_groups;
+ }
+
let base_file = std::fs::File::open(self.base_path())?;
let base_fd = base_file.as_raw_fd();
+
+ // Best-effort cleanup of empty type subdirectories (e.g. `host/`). These can be
+ // left behind after groups are destroyed (aside from protected snapshots) or after
+ // groups have been moved out, and would otherwise prevent the namespace directory
+ // below from being removed. ENOTEMPTY is expected (protected snapshots, or
+ // groups still present when `delete_groups` is false) and silently skipped.
for ty in BackupType::iter() {
let mut ty_dir = ns.path();
ty_dir.push(ty.to_string());
- // best effort only, but we probably should log the error
if let Err(err) = unlinkat(Some(base_fd), &ty_dir, UnlinkatFlags::RemoveDir) {
- if err != nix::errno::Errno::ENOENT {
- log::error!("failed to remove backup type {ty} in {ns} - {err}");
+ if err != nix::errno::Errno::ENOENT && err != nix::errno::Errno::ENOTEMPTY {
+ log::warn!("failed to remove empty backup type dir {ty} in {ns} - {err}");
}
}
}
- Ok((removed_all_groups, stats))
+ let mut ns_dir = ns.path();
+ ns_dir.push("ns");
+ let _ = unlinkat(Some(base_fd), &ns_dir, UnlinkatFlags::RemoveDir);
+
+ if !ns.is_root() {
+ match unlinkat(Some(base_fd), &ns.path(), UnlinkatFlags::RemoveDir) {
+ Ok(()) => log::debug!("removed namespace {ns}"),
+ Err(nix::errno::Errno::ENOENT) => {
+ log::debug!("namespace {ns} already removed")
+ }
+ Err(nix::errno::Errno::ENOTEMPTY) if !delete_groups => {
+ removed_all_requested = false;
+ log::debug!("skip removal of non-empty namespace {ns}")
+ }
+ Err(err) => {
+ removed_all_requested = false;
+ log::warn!("failed to remove namespace {ns} - {err}")
+ }
+ }
+ if let DatastoreBackend::S3(s3_client) = &backend {
+ // Only remove the namespace marker, if it was empty,
+ // than this is the same as the namespace being removed.
+ let object_key =
+ crate::s3::object_key_from_path(&ns.path(), NAMESPACE_MARKER_FILENAME)
+ .context("invalid namespace marker object key")?;
+ proxmox_async::runtime::block_on(s3_client.delete_object(object_key))?;
+ }
+ }
+
+ Ok((removed_all_requested, stats))
}
- /// Remove a complete backup namespace optionally including all it's, and child namespaces',
- /// groups. If `removed_groups` is false this only prunes empty namespaces.
+ /// Remove a complete backup namespace subtree including all child namespaces.
///
- /// Returns true if everything requested, and false if some groups were protected or if some
- /// namespaces weren't empty even though all groups were deleted (race with new backup)
+ /// If `delete_groups` is false this only prunes empty namespaces. Returns true if
+ /// everything requested was removed, and false if some groups were protected or some
+ /// namespaces weren't empty even though all groups were deleted (race with new backup).
pub fn remove_namespace_recursive(
self: &Arc<Self>,
ns: &BackupNamespace,
delete_groups: bool,
) -> Result<(bool, BackupGroupDeleteStats), Error> {
let store = self.name();
- let mut removed_all_requested = true;
- let mut stats = BackupGroupDeleteStats::default();
- let backend = self.backend()?;
+ log::info!(
+ "{} namespace recursively below {store}:/{ns}",
+ if delete_groups {
+ "removing whole"
+ } else {
+ "pruning empty"
+ },
+ );
- if delete_groups {
- log::info!("removing whole namespace recursively below {store}:/{ns}",);
- for ns in self.recursive_iter_backup_ns(ns.to_owned())? {
- let (removed_ns_groups, delete_stats) = self.remove_namespace_groups(&ns?)?;
- stats.add(&delete_stats);
- removed_all_requested = removed_all_requested && removed_ns_groups;
- }
+ let mut children = self
+ .recursive_iter_backup_ns(ns.to_owned())?
+ .collect::<Result<Vec<BackupNamespace>, Error>>()?;
+ // Process deepest first so a parent's rmdir sees an empty directory.
+ children.sort_by_key(|b| std::cmp::Reverse(b.depth()));
- if let DatastoreBackend::S3(s3_client) = &backend {
+ // When destroying the full subtree, a bulk S3 delete under the root prefix catches
+ // any orphaned group-level metadata (owner/notes) before per-level cleanup iterates.
+ if delete_groups {
+ if let DatastoreBackend::S3(s3_client) = &self.backend()? {
let ns_dir = ns.path();
let ns_prefix = ns_dir
.to_str()
@@ -1092,50 +1157,14 @@ impl DataStore {
bail!("deleting objects failed");
}
}
- } else {
- log::info!("pruning empty namespace recursively below {store}:/{ns}");
}
- // now try to delete the actual namespaces, bottom up so that we can use safe rmdir that
- // will choke if a new backup/group appeared in the meantime (but not on an new empty NS)
- let mut children = self
- .recursive_iter_backup_ns(ns.to_owned())?
- .collect::<Result<Vec<BackupNamespace>, Error>>()?;
-
- children.sort_by_key(|b| std::cmp::Reverse(b.depth()));
-
- let base_file = std::fs::File::open(self.base_path())?;
- let base_fd = base_file.as_raw_fd();
-
- for ns in children.iter() {
- let mut ns_dir = ns.path();
- ns_dir.push("ns");
- let _ = unlinkat(Some(base_fd), &ns_dir, UnlinkatFlags::RemoveDir);
-
- if !ns.is_root() {
- match unlinkat(Some(base_fd), &ns.path(), UnlinkatFlags::RemoveDir) {
- Ok(()) => log::debug!("removed namespace {ns}"),
- Err(nix::errno::Errno::ENOENT) => {
- log::debug!("namespace {ns} already removed")
- }
- Err(nix::errno::Errno::ENOTEMPTY) if !delete_groups => {
- removed_all_requested = false;
- log::debug!("skip removal of non-empty namespace {ns}")
- }
- Err(err) => {
- removed_all_requested = false;
- log::warn!("failed to remove namespace {ns} - {err}")
- }
- }
- if let DatastoreBackend::S3(s3_client) = &backend {
- // Only remove the namespace marker, if it was empty,
- // than this is the same as the namespace being removed.
- let object_key =
- crate::s3::object_key_from_path(&ns.path(), NAMESPACE_MARKER_FILENAME)
- .context("invalid namespace marker object key")?;
- proxmox_async::runtime::block_on(s3_client.delete_object(object_key))?;
- }
- }
+ let mut removed_all_requested = true;
+ let mut stats = BackupGroupDeleteStats::default();
+ for child in &children {
+ let (removed, delete_stats) = self.remove_namespace(child, delete_groups)?;
+ stats.add(&delete_stats);
+ removed_all_requested = removed_all_requested && removed;
}
Ok((removed_all_requested, stats))
diff --git a/src/server/pull.rs b/src/server/pull.rs
index 16a59fee..2c9d89a8 100644
--- a/src/server/pull.rs
+++ b/src/server/pull.rs
@@ -879,10 +879,9 @@ fn check_and_remove_ns(params: &PullParameters, local_ns: &BackupNamespace) -> R
check_ns_modification_privs(params.target.store.name(), local_ns, ¶ms.owner)
.map_err(|err| format_err!("Removing {local_ns} not allowed - {err}"))?;
- let (removed_all, _delete_stats) = params
- .target
- .store
- .remove_namespace_recursive(local_ns, true)?;
+ // The outer loop (check_and_remove_vanished_ns) iterates children first, so we only need
+ // to act on this one level.
+ let (removed_all, _delete_stats) = params.target.store.remove_namespace(local_ns, true)?;
Ok(removed_all)
}
--
2.47.3
^ permalink raw reply [flat|nested] 21+ messages in thread* [PATCH proxmox-backup v8 05/13] datastore: add move journal for coordinating with gc phase 1
2026-04-22 13:39 [PATCH proxmox-backup v8 00/13] fixes #6195: add support for moving groups and namespaces Hannes Laimer
` (3 preceding siblings ...)
2026-04-22 13:39 ` [PATCH proxmox-backup v8 04/13] datastore: split remove_namespace into flat and recursive variants Hannes Laimer
@ 2026-04-22 13:39 ` Hannes Laimer
2026-04-22 13:39 ` [PATCH proxmox-backup v8 06/13] datastore: add move-group Hannes Laimer
` (9 subsequent siblings)
14 siblings, 0 replies; 21+ messages in thread
From: Hannes Laimer @ 2026-04-22 13:39 UTC (permalink / raw)
To: pbs-devel
Close a data-loss race between move_group/move_namespace and a
concurrent gc phase-1 mark: if a snapshot is moved between
list_index_files() and the hierarchy iteration, and the target namespace
happens to be iterated before the source, neither pass sees the moved
index and phase 2 could sweep its chunks.
Add a per-datastore write-ahead journal at
/run/proxmox-backup/locks/<datastore>/move-journal that GC drains at the
end of phase-1 mark. No caller yet writes to it, the following commits
will use this.
See the `move_journal` module doc for the protocol and the invariant
that makes it correct.
Signed-off-by: Hannes Laimer <h.laimer@proxmox.com>
---
pbs-datastore/src/datastore.rs | 18 ++++
pbs-datastore/src/lib.rs | 1 +
pbs-datastore/src/move_journal.rs | 149 ++++++++++++++++++++++++++++++
3 files changed, 168 insertions(+)
create mode 100644 pbs-datastore/src/move_journal.rs
diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs
index bce0d846..319b4cba 100644
--- a/pbs-datastore/src/datastore.rs
+++ b/pbs-datastore/src/datastore.rs
@@ -1800,6 +1800,24 @@ impl DataStore {
warn!("Found {strange_paths_count} index files outside of expected directory scheme");
}
+ // Drain the move journal under an exclusive flock. Any index file whose path
+ // was recorded before its rename is processed here even if the namespace
+ // iteration missed it at both its old and new locations. See `move_journal`
+ // for details.
+ crate::move_journal::drain_move_journal(self.name(), |path| {
+ let Some(index) = self.open_index_reader(path)? else {
+ return Ok(());
+ };
+ self.index_mark_used_chunks(
+ index,
+ path,
+ &mut chunk_lru_cache,
+ status,
+ worker,
+ s3_client.as_ref().cloned(),
+ )
+ })?;
+
Ok(())
}
diff --git a/pbs-datastore/src/lib.rs b/pbs-datastore/src/lib.rs
index 4d8ac505..6647ee2b 100644
--- a/pbs-datastore/src/lib.rs
+++ b/pbs-datastore/src/lib.rs
@@ -192,6 +192,7 @@ pub mod data_blob_reader;
pub mod file_formats;
pub mod index;
pub mod manifest;
+pub mod move_journal;
pub mod paperkey;
pub mod prune;
pub mod read_chunk;
diff --git a/pbs-datastore/src/move_journal.rs b/pbs-datastore/src/move_journal.rs
new file mode 100644
index 00000000..891644d7
--- /dev/null
+++ b/pbs-datastore/src/move_journal.rs
@@ -0,0 +1,149 @@
+//! Per-datastore journal used to coordinate snapshot renames with a concurrent garbage collection
+//! phase-1 mark.
+//!
+//! # Race fixed
+//!
+//! GC phase 1 first calls `list_index_files()` to snapshot the set of absolute index-file paths in
+//! the datastore, then iterates namespaces live and touches the atime of every referenced chunk.
+//! If a `move_group`/`move_namespace` relocates a snapshot between those two steps, and the target
+//! namespace is visited by GC before the source (`readdir(2)` order, not deterministic), the moved
+//! index is at neither location when GC looks: missing from the target (already iterated) and
+//! missing from the source (iterated after the rename). Its old path lands in the leftover
+//! `unprocessed_index_list` and is discarded as a vanished file. Chunks referenced only by that
+//! index never get their atime bumped and phase 2 sweeps them.
+//!
+//! # Protocol
+//!
+//! Write-ahead journal for renames:
+//!
+//! - **Before** renaming a snapshot, the move records the new path of each index file it is about
+//! to create, under a brief exclusive flock.
+//! - At the end of phase-1 mark, GC acquires the same exclusive flock, reads every recorded path,
+//! runs the normal `index_mark_used_chunks` on each, truncates, and releases before entering phase 2.
+//!
+//! Why write-before-rename rather than write-after-rename with a long-held shared lock by each
+//! mover: the invariant is "if the new path exists, a journal entry for it exists too". So the
+//! drain - which runs only after iteration finishes - is guaranteed to catch anything iteration
+//! missed:
+//!
+//! - If the source-ns iteration found the index at the old path, its chunks are already marked.
+//! The journal entry is then either a redundant re-mark (rename completed before the drain, LRU
+//! dedups it) or a no-op skip (rename not yet, `open_index_reader` returns `None`) - harmless
+//! either way.
+//! - If the source-ns iteration missed it, then the rename already happened by the time iteration
+//! reached source, which is before the drain, so `open_index_reader(new_path)` at drain time
+//! succeeds and marks the chunks.
+//!
+//! A move that crashes between the journal write and the rename leaves a "ghost" entry. The
+//! drain's `open_index_reader` returns `None` and skips, and the truncate step clears it. This is
+//! handled by the existing vanished-file logic in the caller.
+//!
+//! The file lives under `/run/proxmox-backup/locks/<datastore>/move-journal`. Tmpfs is correct
+//! here: a reboot aborts any in-progress GC, and the next GC rebuilds state from a fresh
+//! `list_index_files()` against the post-move filesystem - there is nothing worth persisting.
+
+use std::fs::File;
+use std::io::{Read, Seek, SeekFrom, Write};
+use std::path::{Path, PathBuf};
+use std::time::Duration;
+
+use anyhow::{bail, format_err, Context, Error};
+use nix::sys::stat::Mode;
+
+use proxmox_sys::fs::{open_file_locked, CreateOptions};
+
+use pbs_config::backup_user;
+
+use crate::backup_info::DATASTORE_LOCKS_DIR;
+
+const JOURNAL_FILENAME: &str = "move-journal";
+const APPEND_LOCK_TIMEOUT: Duration = Duration::from_secs(10);
+// Long enough to cover any in-flight append, if it takes longer than this something is very wrong
+// and we'd rather fail GC than hang forever.
+const DRAIN_LOCK_TIMEOUT: Duration = Duration::from_secs(10);
+
+fn journal_path(datastore_name: &str) -> PathBuf {
+ Path::new(DATASTORE_LOCKS_DIR)
+ .join(datastore_name)
+ .join(JOURNAL_FILENAME)
+}
+
+fn ensure_parent(path: &Path) -> Result<(), Error> {
+ if let Some(parent) = path.parent() {
+ std::fs::create_dir_all(parent)
+ .with_context(|| format!("failed to create move-journal parent dir {parent:?}"))?;
+ }
+ Ok(())
+}
+
+fn open_locked_exclusive(path: &Path, timeout: Duration) -> Result<File, Error> {
+ ensure_parent(path)?;
+ let user = backup_user()?;
+ let options = CreateOptions::new()
+ .perm(Mode::from_bits_truncate(0o660))
+ .owner(user.uid)
+ .group(user.gid);
+ open_file_locked(path, timeout, true, options)
+ .with_context(|| format!("failed to acquire exclusive move-journal lock at {path:?}"))
+}
+
+/// Append one or more absolute index-file paths to the journal under a brief exclusive flock. The
+/// caller passes the *post-rename* paths, the rename must happen after this returns.
+pub fn append_moved_indices(datastore_name: &str, paths: &[PathBuf]) -> Result<(), Error> {
+ if paths.is_empty() {
+ return Ok(());
+ }
+
+ let mut buf = Vec::new();
+ for path in paths {
+ if !path.is_absolute() {
+ bail!("move-journal: refusing to record non-absolute path {path:?}");
+ }
+ let s = path
+ .to_str()
+ .ok_or_else(|| format_err!("move-journal: non-UTF-8 path {path:?}"))?;
+ if s.as_bytes().contains(&b'\n') {
+ bail!("move-journal: path contains newline {path:?}");
+ }
+ buf.extend_from_slice(s.as_bytes());
+ buf.push(b'\n');
+ }
+
+ let path = journal_path(datastore_name);
+ let mut file = open_locked_exclusive(&path, APPEND_LOCK_TIMEOUT)?;
+ file.write_all(&buf)
+ .context("failed to append to move journal")?;
+ Ok(())
+}
+
+/// Drain the journal under an exclusive lock, calling `f` for each recorded path. Blocks only for
+/// the brief window of a concurrent append. After the callback runs on every entry, the journal is
+/// truncated under the same lock.
+///
+/// On a processing error the entry is left in the journal (no truncate) so the next GC will retry.
+pub fn drain_move_journal<F>(datastore_name: &str, mut f: F) -> Result<(), Error>
+where
+ F: FnMut(&Path) -> Result<(), Error>,
+{
+ let path = journal_path(datastore_name);
+ let mut file = open_locked_exclusive(&path, DRAIN_LOCK_TIMEOUT)?;
+
+ file.seek(SeekFrom::Start(0))
+ .context("failed to rewind move journal for draining")?;
+ let mut contents = String::new();
+ file.read_to_string(&mut contents)
+ .context("failed to read move journal")?;
+
+ for line in contents.lines() {
+ let entry = line.trim();
+ if entry.is_empty() {
+ continue;
+ }
+ f(Path::new(entry))
+ .with_context(|| format!("move-journal: processing '{entry}' failed"))?;
+ }
+
+ file.set_len(0)
+ .context("failed to truncate move journal after drain")?;
+ Ok(())
+}
--
2.47.3
^ permalink raw reply [flat|nested] 21+ messages in thread* [PATCH proxmox-backup v8 06/13] datastore: add move-group
2026-04-22 13:39 [PATCH proxmox-backup v8 00/13] fixes #6195: add support for moving groups and namespaces Hannes Laimer
` (4 preceding siblings ...)
2026-04-22 13:39 ` [PATCH proxmox-backup v8 05/13] datastore: add move journal for coordinating with gc phase 1 Hannes Laimer
@ 2026-04-22 13:39 ` Hannes Laimer
2026-04-22 13:39 ` [PATCH proxmox-backup v8 07/13] datastore: add move-namespace Hannes Laimer
` (8 subsequent siblings)
14 siblings, 0 replies; 21+ messages in thread
From: Hannes Laimer @ 2026-04-22 13:39 UTC (permalink / raw)
To: pbs-devel
Add support for moving a single backup group to a different namespace
within the same datastore.
For the filesystem backend each snapshot directory is renamed. For
S3 all objects are copied to the target prefix before deleting the
source, per snapshot.
Exclusive locks on the group and all its snapshots are acquired
before the move. Snapshots are locked and moved in batches to avoid
exhausting file descriptors on groups with many snapshots.
If the target group already exists and merging is enabled, snapshots
are merged into it provided both groups share the same owner and the
source snapshots are strictly newer than the target's. Group notes
are renamed into place when the target has none, and kept with a
warning when they diverge during a merge.
Before each rename, move_to records the post-rename index paths in the
move journal (see the preceding patch), so a concurrent GC phase-1
drain can still mark the moved chunks if the namespace iteration
missed them at both the old and new locations.
Signed-off-by: Hannes Laimer <h.laimer@proxmox.com>
---
pbs-datastore/src/backup_info.rs | 260 ++++++++++++++++++++++++++++++-
pbs-datastore/src/datastore.rs | 250 ++++++++++++++++++++++++++---
2 files changed, 485 insertions(+), 25 deletions(-)
diff --git a/pbs-datastore/src/backup_info.rs b/pbs-datastore/src/backup_info.rs
index 3f7f7c90..69949c51 100644
--- a/pbs-datastore/src/backup_info.rs
+++ b/pbs-datastore/src/backup_info.rs
@@ -9,18 +9,19 @@ use std::time::Duration;
use anyhow::{bail, format_err, Context, Error};
use const_format::concatcp;
-use proxmox_s3_client::S3PathPrefix;
+use proxmox_s3_client::{S3ObjectKey, S3PathPrefix};
use proxmox_sys::fs::{lock_dir_noblock, lock_dir_noblock_shared, replace_file, CreateOptions};
use proxmox_systemd::escape_unit;
use pbs_api_types::{
- Authid, BackupGroupDeleteStats, BackupNamespace, BackupType, GroupFilter, VerifyState,
- BACKUP_DATE_REGEX, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME,
+ ArchiveType, Authid, BackupGroupDeleteStats, BackupNamespace, BackupType, GroupFilter,
+ VerifyState, BACKUP_DATE_REGEX, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME,
};
use pbs_config::{open_backup_lockfile, BackupLockGuard};
use crate::datastore::{GROUP_NOTES_FILE_NAME, GROUP_OWNER_FILE_NAME};
use crate::manifest::{BackupManifest, MANIFEST_LOCK_NAME};
+use crate::move_journal;
use crate::s3::S3_CONTENT_PREFIX;
use crate::{DataBlob, DataStore, DatastoreBackend};
@@ -274,6 +275,120 @@ impl BackupGroup {
Ok(delete_stats)
}
+ /// Check merge invariants for moving this group's snapshots into `target`.
+ /// Returns an error if ownership differs or snapshot times overlap.
+ pub(crate) fn check_merge_invariants(&self, target: &BackupGroup) -> Result<(), Error> {
+ let src_owner = self.get_owner()?;
+ let tgt_owner = target.get_owner()?;
+ if src_owner != tgt_owner {
+ bail!(
+ "cannot merge group '{}/{}' from '{}' into '{}': owner mismatch \
+ (source: {src_owner}, target: {tgt_owner})",
+ self.group.ty,
+ self.group.id,
+ self.ns,
+ target.ns,
+ );
+ }
+
+ let src_oldest = self
+ .iter_snapshots()?
+ .filter_map(Result::ok)
+ .map(|s| s.backup_time())
+ .min();
+
+ if let Some(src_oldest) = src_oldest {
+ // Any target snapshot with time >= src_oldest violates the
+ // "source strictly newer than target" merge invariant. Short-circuit on the first hit.
+ if let Some(overlap) = target
+ .iter_snapshots()?
+ .filter_map(Result::ok)
+ .map(|s| s.backup_time())
+ .find(|t| *t >= src_oldest)
+ {
+ bail!(
+ "cannot merge group '{}/{}' from '{}' into '{}': snapshot time overlap \
+ (oldest source: {src_oldest}, conflicting target: {overlap})",
+ self.group.ty,
+ self.group.id,
+ self.ns,
+ target.ns,
+ );
+ }
+ }
+
+ Ok(())
+ }
+
+ /// Move the group notes file (if any) from this group to `target`. Caller must hold
+ /// exclusive group locks on both.
+ ///
+ /// Behavior:
+ /// - If the source has no notes, do nothing.
+ /// - If only the source has notes, upload them to the target's S3 notes object first
+ /// (when the backend is S3), then rename the local file. A failure in either step
+ /// aborts the move so the source is left intact for the caller to retry; the S3
+ /// upload uses replace semantics so re-running is idempotent.
+ /// - If both source and target have notes (merge case):
+ /// - identical contents: leave the target unchanged. The source copy will be
+ /// removed by `destroy()`.
+ /// - diverging contents: log a warning and keep the target's notes.
+ pub(crate) fn move_notes_to(
+ &self,
+ target: &BackupGroup,
+ backend: &DatastoreBackend,
+ ) -> Result<(), Error> {
+ let src_notes_path = self.store.group_notes_path(&self.ns, &self.group);
+ let dst_notes_path = target.store.group_notes_path(&target.ns, &target.group);
+
+ let src_notes = match std::fs::read(&src_notes_path) {
+ Ok(v) => v,
+ Err(ref err) if err.kind() == std::io::ErrorKind::NotFound => return Ok(()),
+ Err(err) => {
+ bail!("reading source group notes {src_notes_path:?} failed: {err}")
+ }
+ };
+
+ match std::fs::read(&dst_notes_path) {
+ Ok(dst_notes) => {
+ if dst_notes != src_notes {
+ log::warn!(
+ "group notes differ during merge of '{}' from '{}' into '{}' - keeping target's notes",
+ self.group, self.ns, target.ns,
+ );
+ }
+ // Identical or intentionally kept: source copy will be removed by destroy().
+ return Ok(());
+ }
+ Err(ref err) if err.kind() == std::io::ErrorKind::NotFound => {}
+ Err(err) => {
+ bail!("reading target group notes {dst_notes_path:?} failed: {err}")
+ }
+ }
+
+ if let DatastoreBackend::S3(s3_client) = backend {
+ let dst_key = crate::s3::object_key_from_path(
+ &target.relative_group_path(),
+ GROUP_NOTES_FILE_NAME,
+ )
+ .context("invalid target notes object key")?;
+ let data = hyper::body::Bytes::copy_from_slice(&src_notes);
+ proxmox_async::runtime::block_on(s3_client.upload_replace_with_retry(dst_key, data))
+ .with_context(|| {
+ format!(
+ "failed to upload group notes on S3 backend for '{}' in '{}'",
+ target.group, target.ns,
+ )
+ })?;
+ }
+
+ std::fs::rename(&src_notes_path, &dst_notes_path).with_context(|| {
+ format!("failed to move group notes {src_notes_path:?} -> {dst_notes_path:?}")
+ })?;
+
+ Ok(())
+ }
+
/// Helper function, assumes that no more snapshots are present in the group.
fn remove_group_dir(&self) -> Result<(), Error> {
let note_path = self.store.group_notes_path(&self.ns, &self.group);
@@ -613,6 +728,145 @@ impl BackupDir {
}
}
+ /// Move this snapshot into `target`.
+ ///
+ /// For the filesystem backend, renames the snapshot directory. For S3, copies all
+ /// objects under the snapshot prefix to the target, renames the local cache directory,
+ /// then deletes the source objects. A copy failure returns an error with the snapshot
+ /// intact at source. A delete failure is logged as a warning.
+ ///
+ /// Before the rename, each index file's post-rename path is recorded in the per-datastore
+ /// move journal so a concurrent GC phase-1 drain can still mark their chunks - see
+ /// `move_journal` for the race this closes. If the rename then fails the journal entry
+ /// becomes a ghost that the drain skips on `open_index_reader` returning `None`.
+ ///
+ /// The caller must hold an exclusive lock on this snapshot, hold exclusive locks on
+ /// both source and target groups, and ensure the target group directory exists.
+ pub(crate) fn move_to(
+ &self,
+ target: &BackupGroup,
+ backend: &DatastoreBackend,
+ ) -> Result<(), Error> {
+ if !Arc::ptr_eq(&self.store, &target.store) {
+ bail!("cannot move snapshot across different datastores");
+ }
+
+ let target_snap = target.backup_dir_with_rfc3339(self.backup_time_string.clone())?;
+ let src_snap_path = self.full_path();
+ let dst_snap_path = target_snap.full_path();
+
+ // Enumerate the index files at source (they are still there until the
+ // rename below) and record their future paths in the move journal so a
+ // concurrent GC phase-1 drain can mark their chunks even if the
+ // hierarchy iteration missed both the source and target.
+ let mut journal_entries: Vec<PathBuf> = Vec::new();
+ for entry in std::fs::read_dir(&src_snap_path)
+ .with_context(|| format!("failed to list source snapshot dir {src_snap_path:?}"))?
+ {
+ let entry =
+ entry.with_context(|| format!("failed to read entry in {src_snap_path:?}"))?;
+ let name = entry.file_name();
+ let Some(name_str) = name.to_str() else {
+ continue;
+ };
+ if matches!(
+ ArchiveType::from_path(name_str),
+ Ok(ArchiveType::FixedIndex) | Ok(ArchiveType::DynamicIndex)
+ ) {
+ journal_entries.push(dst_snap_path.join(&name));
+ }
+ }
+ move_journal::append_moved_indices(self.store.name(), &journal_entries)?;
+
+ match backend {
+ DatastoreBackend::Filesystem => {
+ std::fs::rename(&src_snap_path, &dst_snap_path).with_context(|| {
+ format!("failed to move snapshot {src_snap_path:?} to {dst_snap_path:?}")
+ })?;
+ }
+ DatastoreBackend::S3(s3_client) => {
+ let src_rel = self.relative_path();
+ let src_rel_str = src_rel
+ .to_str()
+ .ok_or_else(|| format_err!("invalid source snapshot path"))?;
+ let src_prefix_str = format!("{S3_CONTENT_PREFIX}/{src_rel_str}/");
+
+ let dst_rel = target_snap.relative_path();
+ let dst_rel_str = dst_rel
+ .to_str()
+ .ok_or_else(|| format_err!("invalid target snapshot path"))?;
+ let dst_prefix_str = format!("{S3_CONTENT_PREFIX}/{dst_rel_str}/");
+
+ let store_prefix = format!("{}/", self.store.name());
+
+ // Copy all objects for this snapshot to the target prefix. On failure the
+ // source snapshot remains intact and any partial target copies stay as
+ // leftovers (visible via the API for cleanup).
+ let prefix = S3PathPrefix::Some(src_prefix_str.clone());
+ let mut token: Option<String> = None;
+ let mut src_keys = Vec::new();
+
+ loop {
+ let result = proxmox_async::runtime::block_on(
+ s3_client.list_objects_v2(&prefix, token.as_deref()),
+ )
+ .context("failed to list snapshot objects on S3 backend")?;
+
+ for item in result.contents {
+ let full_key_str: &str = &item.key;
+ let rel_key =
+ full_key_str.strip_prefix(&store_prefix).ok_or_else(|| {
+ format_err!("unexpected key prefix in '{full_key_str}'")
+ })?;
+ let src_key = S3ObjectKey::try_from(rel_key)?;
+
+ let suffix = rel_key
+ .strip_prefix(&src_prefix_str)
+ .ok_or_else(|| format_err!("unexpected key format '{rel_key}'"))?;
+ let dst_key_str = format!("{dst_prefix_str}{suffix}");
+ let dst_key = S3ObjectKey::try_from(dst_key_str.as_str())?;
+
+ proxmox_async::runtime::block_on(
+ s3_client.copy_object(src_key.clone(), dst_key),
+ )
+ .with_context(|| format!("failed to copy S3 object '{rel_key}'"))?;
+ src_keys.push(src_key);
+ }
+
+ if result.is_truncated {
+ token = result.next_continuation_token;
+ } else {
+ break;
+ }
+ }
+
+ std::fs::rename(&src_snap_path, &dst_snap_path).with_context(|| {
+ format!("failed to move snapshot cache {src_snap_path:?} to {dst_snap_path:?}")
+ })?;
+
+ // Delete source S3 objects. Treat failures as warnings since the snapshot
+ // is already at the target.
+ for src_key in src_keys {
+ if let Err(err) =
+ proxmox_async::runtime::block_on(s3_client.delete_object(src_key.clone()))
+ {
+ log::warn!(
+ "S3 move: failed to delete source object '{src_key:?}' \
+ (snapshot already at target, orphaned object requires manual removal): \
+ {err:#}"
+ );
+ }
+ }
+ }
+ }
+
+ // Clean up stale source lock files under /run for this snapshot.
+ let _ = std::fs::remove_file(self.manifest_lock_path());
+ let _ = std::fs::remove_file(self.lock_path());
+
+ Ok(())
+ }
+
/// Destroy the whole snapshot, bails if it's protected
///
/// Setting `force` to true skips locking and thus ignores if the backup is currently in use.
diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs
index 319b4cba..d215dd0d 100644
--- a/pbs-datastore/src/datastore.rs
+++ b/pbs-datastore/src/datastore.rs
@@ -88,6 +88,15 @@ const CHUNK_LOCK_TIMEOUT: Duration = Duration::from_secs(3 * 60 * 60);
const S3_DELETE_BATCH_LIMIT: usize = 100;
// max defer time for s3 batch deletions
const S3_DELETE_DEFER_LIMIT_SECONDS: Duration = Duration::from_secs(60 * 5);
+// upper bound on concurrent snapshot locks during move operations to avoid exhausting FDs
+const MAX_SNAPSHOT_LOCKS: usize = 512;
+
+/// Error from a locked per-group operation. Soft errors (e.g. lock conflicts)
+/// are retryable, hard errors are not.
+pub(crate) enum BackupGroupOpError {
+ Soft(Error),
+ Hard(Error),
+}
/// Check that moving/syncing `namespaces` from `source` to `target` stays within
/// `MAX_NAMESPACE_DEPTH`.
@@ -1187,6 +1196,174 @@ impl DataStore {
backup_group.destroy(guard, &self.backend()?)
}
+ /// Validate a group-move request without touching locks.
+ ///
+ /// Fast pre-flight check used by the API endpoint so we can fail synchronously before spawning
+ /// a worker. The authoritative post-lock gate lives in [`Self::lock_and_move_group`]. This
+ /// function does *not* replace it, and it does *not* check ownership - the caller handles
+ /// that.
+ pub fn check_move_group(
+ self: &Arc<Self>,
+ source_ns: &BackupNamespace,
+ target_ns: &BackupNamespace,
+ group: &pbs_api_types::BackupGroup,
+ merge_group: bool,
+ ) -> Result<(), Error> {
+ if source_ns == target_ns {
+ bail!("source and target namespace must be different");
+ }
+ if !self.namespace_exists(target_ns) {
+ bail!("target namespace '{target_ns}' does not exist");
+ }
+ if !self.backup_group(source_ns.clone(), group.clone()).exists() {
+ bail!("group '{group}' does not exist in namespace '{source_ns}'");
+ }
+ if self.backup_group(target_ns.clone(), group.clone()).exists() && !merge_group {
+ bail!(
+ "group '{group}' already exists in target namespace '{target_ns}' \
+ and merge-group is disabled"
+ );
+ }
+ Ok(())
+ }
+
+ /// Move a single backup group to a different namespace within the same datastore.
+ pub fn move_group(
+ self: &Arc<Self>,
+ source_ns: &BackupNamespace,
+ group: &pbs_api_types::BackupGroup,
+ target_ns: &BackupNamespace,
+ merge_group: bool,
+ ) -> Result<(), Error> {
+ if *OLD_LOCKING {
+ bail!("move operations require new-style file-based locking");
+ }
+
+ let source_group = self.backup_group(source_ns.clone(), group.clone());
+ let backend = self.backend()?;
+
+ self.lock_and_move_group(source_ns, &source_group, target_ns, &backend, merge_group)
+ .map_err(|err| match err {
+ BackupGroupOpError::Soft(err) | BackupGroupOpError::Hard(err) => err,
+ })
+ }
+
+ /// Lock and move a single group. Acquires exclusive locks on both source and target groups,
+ /// then moves snapshots in batches - each batch is locked, moved, and released before the next
+ /// batch starts. This ensures no concurrent readers (e.g. verify) can operate on snapshots
+ /// being moved, without exhausting file descriptors.
+ ///
+ /// `source_ns` is the subtree root of the caller's operation, not necessarily the group's own
+ /// namespace: for a namespace move, a group can live deeper than `source_ns`, so the prefix is
+ /// rewritten to its new target location.
+ ///
+ /// Returns a `BackupGroupOpError` on failure, indicating whether the failure was a transient
+ /// lock conflict (retryable) or a hard error.
+ fn lock_and_move_group(
+ self: &Arc<Self>,
+ source_ns: &BackupNamespace,
+ group: &BackupGroup,
+ target_ns: &BackupNamespace,
+ backend: &DatastoreBackend,
+ merge_groups: bool,
+ ) -> Result<(), BackupGroupOpError> {
+ let target_group_ns = group
+ .backup_ns()
+ .map_prefix(source_ns, target_ns)
+ .map_err(BackupGroupOpError::Hard)?;
+
+ // Lock the source first so concurrent lock conflicts are detected before we touch the
+ // target structure. Acquisition order is source -> target (via
+ // `create_locked_backup_group_do` below). `.lock()` is non-blocking, so two concurrent
+ // mirror moves (A -> B and B -> A on same-named groups) fail fast with `Soft` rather than
+ // deadlocking.
+ let source_guard = group.lock().map_err(BackupGroupOpError::Soft)?;
+
+ // Post-lock validation: re-runs the pre-flight checks now that we hold the source lock,
+ // closing the TOCTOU gap between the API-level pre-check and the worker. This also stops
+ // the target-group create below from silently creating a missing target namespace
+ // (intentional for backup/pull/tape-restore, not for move).
+ self.check_move_group(
+ group.backup_ns(),
+ &target_group_ns,
+ group.group(),
+ merge_groups,
+ )
+ .map_err(BackupGroupOpError::Hard)?;
+
+ let source_owner = group.get_owner().map_err(BackupGroupOpError::Hard)?;
+
+ // Create-or-open the target group under lock. Reports whether the group was created (= no
+ // merge) or already existed (= merge), and differentiates a target lock conflict (Soft)
+ // from other failures (Hard).
+ let (_, created, _target_guard) =
+ self.create_locked_backup_group_do(&target_group_ns, group.group(), &source_owner)?;
+ let target_group = self.backup_group(target_group_ns.clone(), group.group().clone());
+ let merge = !created;
+
+ if merge && !merge_groups {
+ return Err(BackupGroupOpError::Hard(format_err!(
+ "group '{}' already exists in target namespace '{target_group_ns}' \
+ and merging is disabled",
+ group.group(),
+ )));
+ }
+ if merge {
+ group
+ .check_merge_invariants(&target_group)
+ .map_err(BackupGroupOpError::Hard)?;
+ }
+
+ let mut snapshots: Vec<_> = group
+ .iter_snapshots()
+ .map_err(BackupGroupOpError::Hard)?
+ .collect::<Result<Vec<_>, _>>()
+ .map_err(BackupGroupOpError::Hard)?;
+ // Sort by time so a partial failure (e.g. batch N succeeds but N+1 fails) leaves a clean
+ // time-ordered split: all moved snapshots are older than all remaining ones. This keeps
+ // the merge invariant (source oldest > target newest) valid on retry.
+ snapshots.sort_by_key(|s| s.backup_time());
+
+ log::info!(
+ "{} group '{}/{}' from '{}' to '{target_group_ns}'",
+ if merge { "merging" } else { "moving" },
+ group.group().ty,
+ group.group().id,
+ group.backup_ns(),
+ );
+
+ // Each batch is locked, moved, and released before the next to exclude concurrent readers.
+ // `move_to` writes to the per-datastore move journal before each rename so a concurrent
+ // GC phase-1 drain can still mark the moved chunks; see `move_journal` for the race.
+ for chunk in snapshots.chunks(MAX_SNAPSHOT_LOCKS) {
+ let _batch_locks: Vec<BackupLockGuard> = chunk
+ .iter()
+ .map(|snap| snap.lock().map_err(BackupGroupOpError::Soft))
+ .collect::<Result<Vec<_>, _>>()?;
+
+ for snap in chunk {
+ snap.move_to(&target_group, backend)
+ .map_err(BackupGroupOpError::Hard)?;
+ }
+ // batch locks released here
+ }
+
+ // Move the group notes file (local rename + S3 upload). Diverging notes during a merge are
+ // kept on the target and logged.
+ group
+ .move_notes_to(&target_group, backend)
+ .map_err(BackupGroupOpError::Hard)?;
+
+ // All snapshots and notes are at the target. The source group holds only owner/lock
+ // metadata now, so hand the source lock to `destroy()` to clean up the owner, the group
+ // directory, and any group-level S3 objects in one place.
+ group
+ .destroy(source_guard, backend)
+ .map_err(BackupGroupOpError::Hard)?;
+
+ Ok(())
+ }
+
/// Remove a backup directory including all content
pub fn remove_backup_dir(
self: &Arc<Self>,
@@ -1308,37 +1485,66 @@ impl DataStore {
backup_group: &pbs_api_types::BackupGroup,
auth_id: &Authid,
) -> Result<(Authid, BackupLockGuard), Error> {
+ let (owner, _created, guard) = self
+ .create_locked_backup_group_do(ns, backup_group, auth_id)
+ .map_err(|err| match err {
+ BackupGroupOpError::Soft(err) | BackupGroupOpError::Hard(err) => err,
+ })?;
+ Ok((owner, guard))
+ }
+
+ /// Inner variant of [`Self::create_locked_backup_group`] that also reports whether the group
+ /// directory was newly created, and distinguishes a transient lock conflict
+ /// ([`BackupGroupOpError::Soft`]) from other errors ([`BackupGroupOpError::Hard`]).
+ pub(crate) fn create_locked_backup_group_do(
+ self: &Arc<Self>,
+ ns: &BackupNamespace,
+ backup_group: &pbs_api_types::BackupGroup,
+ auth_id: &Authid,
+ ) -> Result<(Authid, bool, BackupLockGuard), BackupGroupOpError> {
let backup_group = self.backup_group(ns.clone(), backup_group.clone());
- // create intermediate path first
let full_path = backup_group.full_group_path();
std::fs::create_dir_all(full_path.parent().ok_or_else(|| {
- format_err!("could not construct parent path for group {backup_group:?}")
- })?)?;
+ BackupGroupOpError::Hard(format_err!(
+ "could not construct parent path for group {backup_group:?}"
+ ))
+ })?)
+ .map_err(|err| BackupGroupOpError::Hard(err.into()))?;
+
+ // Try to create the group directory. This lets us distinguish "created" from "already
+ // existed".
+ let created = match std::fs::create_dir(&full_path) {
+ Ok(_) => true,
+ Err(ref err) if err.kind() == io::ErrorKind::AlreadyExists => false,
+ Err(err) => {
+ return Err(BackupGroupOpError::Hard(format_err!(
+ "unable to create backup group {full_path:?} - {err}"
+ )))
+ }
+ };
- // now create the group, this allows us to check whether it existed before
- match std::fs::create_dir(&full_path) {
- Ok(_) => {
- let guard = backup_group.lock().with_context(|| {
- format!("while creating new locked backup group '{backup_group:?}'")
- })?;
- if let Err(err) = self.set_owner(ns, backup_group.group(), auth_id, false) {
- let _ = std::fs::remove_dir(&full_path);
- return Err(err);
- }
- let owner = self.get_owner(ns, backup_group.group())?; // just to be sure
- Ok((owner, guard))
+ let guard = backup_group.lock().map_err(|err| {
+ if created {
+ let _ = std::fs::remove_dir(&full_path);
}
- Err(ref err) if err.kind() == io::ErrorKind::AlreadyExists => {
- let guard = backup_group.lock().with_context(|| {
- format!("while creating locked backup group '{backup_group:?}'")
- })?;
- let owner = self.get_owner(ns, backup_group.group())?; // just to be sure
- Ok((owner, guard))
+ BackupGroupOpError::Soft(err.context(format!(
+ "while {} locked backup group '{backup_group:?}'",
+ if created { "creating new" } else { "creating" },
+ )))
+ })?;
+
+ if created {
+ if let Err(err) = self.set_owner(ns, backup_group.group(), auth_id, false) {
+ let _ = std::fs::remove_dir(&full_path);
+ return Err(BackupGroupOpError::Hard(err));
}
- Err(err) => bail!("unable to create backup group {:?} - {}", full_path, err),
}
+ let owner = self
+ .get_owner(ns, backup_group.group())
+ .map_err(BackupGroupOpError::Hard)?;
+ Ok((owner, created, guard))
}
/// Creates a new backup snapshot inside a BackupGroup
--
2.47.3
^ permalink raw reply [flat|nested] 21+ messages in thread* [PATCH proxmox-backup v8 07/13] datastore: add move-namespace
2026-04-22 13:39 [PATCH proxmox-backup v8 00/13] fixes #6195: add support for moving groups and namespaces Hannes Laimer
` (5 preceding siblings ...)
2026-04-22 13:39 ` [PATCH proxmox-backup v8 06/13] datastore: add move-group Hannes Laimer
@ 2026-04-22 13:39 ` Hannes Laimer
2026-04-22 13:39 ` [PATCH proxmox-backup v8 08/13] docs: add section on moving namespaces and groups Hannes Laimer
` (7 subsequent siblings)
14 siblings, 0 replies; 21+ messages in thread
From: Hannes Laimer @ 2026-04-22 13:39 UTC (permalink / raw)
To: pbs-devel
Relocate an entire namespace subtree (the given namespace, all child
namespaces, and their groups) to a new location within the same
datastore.
Groups are iterated and locked one namespace at a time, keeping the
lock-contention window narrow. Groups that cannot be locked because
another task is running are deferred and retried once. Groups that
still fail are reported and remain at the source so they can be
retried individually with move-group.
When merging is enabled and a source group already exists in the
target, snapshots are merged provided ownership matches and source
snapshots are strictly newer.
An optional max-depth limits how many levels of child namespaces are
included, reusing the depth check already enforced by sync. When
delete-source is true (the default), empty source namespaces are
removed level-by-level. A namespace is left behind with a warning
when a group failed to move or a child was created concurrently, and
silently kept when children were explicitly excluded by max-depth.
A shared pre-flight helper validates the request both synchronously
from the API endpoint and again from the worker, so the fast-fail
and the authoritative check cannot drift.
Signed-off-by: Hannes Laimer <h.laimer@proxmox.com>
---
pbs-datastore/src/datastore.rs | 296 +++++++++++++++++++++++++++++++++
1 file changed, 296 insertions(+)
diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs
index d215dd0d..292c56bf 100644
--- a/pbs-datastore/src/datastore.rs
+++ b/pbs-datastore/src/datastore.rs
@@ -1179,6 +1179,302 @@ impl DataStore {
Ok((removed_all_requested, stats))
}
+ /// Validate a namespace-move request without touching locks.
+ ///
+ /// Fast pre-flight check used by the API endpoint so we can fail synchronously before
+ /// spawning a worker. Re-run inside [`Self::move_namespace`] to close the TOCTOU gap between
+ /// the API-level pre-check and the worker; since there is no namespace lock, that re-run is
+ /// the authoritative gate for namespace-level invariants. Per-group authoritative checks
+ /// live in [`Self::lock_and_move_group`].
+ pub fn check_move_namespace(
+ self: &Arc<Self>,
+ source_ns: &BackupNamespace,
+ target_ns: &BackupNamespace,
+ ) -> Result<(), Error> {
+ if source_ns.is_root() {
+ bail!("cannot move root namespace");
+ }
+ if source_ns == target_ns {
+ bail!("source and target namespace must be different");
+ }
+ if !self.namespace_exists(source_ns) {
+ bail!("source namespace '{source_ns}' does not exist");
+ }
+ let target_parent = target_ns.parent();
+ if !target_ns.is_root() && !self.namespace_exists(&target_parent) {
+ bail!("target parent namespace '{target_parent}' does not exist");
+ }
+ if source_ns.contains(target_ns).is_some() {
+ bail!(
+ "cannot move namespace '{source_ns}' into its own subtree (target: '{target_ns}')"
+ );
+ }
+ Ok(())
+ }
+
+ /// Move a backup namespace (including all child namespaces and groups) to a new location.
+ ///
+ /// Groups are moved one at a time. For each group, exclusive locks on the group and all its
+ /// snapshots (locked and moved in batches) ensure no concurrent operations are active. Groups
+ /// that cannot be locked (because a backup, verify, or other task is running) are deferred and
+ /// retried once, then reported in the error.
+ ///
+ /// If the target namespace already exists, groups are moved into it. Groups that fail (lock
+ /// conflict, merge invariant violation, or I/O error) are skipped and reported at the end -
+ /// they remain at the source and can be retried individually.
+ ///
+ /// `max_depth` limits how many levels of child namespaces below `source_ns` are included.
+ /// `None` means unlimited (move the entire subtree). `Some(0)` moves only the groups directly
+ /// in `source_ns` (no child namespaces).
+ ///
+ /// When `delete_source` is true, source namespace directories are removed once they are empty.
+ /// Sources kept behind because of `max_depth`-excluded children or failed moves stay in place.
+ /// When false the (now empty) sources are kept regardless.
+ ///
+ /// When `merge_groups` is true and a source group already exists in the target namespace, the
+ /// snapshots are merged into the existing target group (provided ownership matches and source
+ /// snapshots are strictly newer). When false the operation fails for that group.
+ pub fn move_namespace(
+ self: &Arc<Self>,
+ source_ns: &BackupNamespace,
+ target_ns: &BackupNamespace,
+ max_depth: Option<usize>,
+ delete_source: bool,
+ merge_groups: bool,
+ ) -> Result<(), Error> {
+ if *OLD_LOCKING {
+ bail!("move operations require new-style file-based locking");
+ }
+
+ // Re-run the pre-flight checks inside the worker to close the gap between the API-level
+ // pre-check and here (e.g. source namespace deleted in between).
+ self.check_move_namespace(source_ns, target_ns)?;
+
+ // Collect the source subtree for up-front depth-limit validation and for later cleanup.
+ // Groups are iterated lazily per-namespace below.
+ let depth = max_depth.unwrap_or(MAX_NAMESPACE_DEPTH);
+ let attempted_ns: Vec<BackupNamespace> =
+ ListNamespacesRecursive::new_max_depth(Arc::clone(self), source_ns.clone(), depth)?
+ .collect::<Result<Vec<_>, Error>>()?;
+
+ check_namespace_depth_limit(source_ns, target_ns, &attempted_ns)?;
+
+ let backend = self.backend()?;
+
+ log::info!(
+ "moving namespace '{source_ns}' -> '{target_ns}': {} namespaces",
+ attempted_ns.len(),
+ );
+
+ // Create target namespaces parent-before-child so `create_namespace` can validate each
+ // parent exists. `create_namespace` also creates the S3 marker when applicable.
+ let mut create_order: Vec<&BackupNamespace> = attempted_ns.iter().collect();
+ create_order.sort_by_key(|ns| ns.depth());
+ for ns in create_order {
+ let target_child = ns.map_prefix(source_ns, target_ns)?;
+ if target_child.is_root() || self.namespace_exists(&target_child) {
+ continue;
+ }
+ let name = target_child
+ .components()
+ .last()
+ .ok_or_else(|| format_err!("cannot determine last component of '{target_child}'"))?
+ .to_owned();
+ self.create_namespace(&target_child.parent(), name)
+ .with_context(|| format!("failed to create target namespace '{target_child}'"))?;
+ }
+
+ // Move groups one namespace at a time. Iterating lazily (rather than collecting all groups
+ // up-front) keeps each group's lock window short and avoids serialising the move behind a
+ // long enumeration phase.
+ let mut failed_fatal: Vec<(BackupNamespace, String)> = Vec::new();
+ let mut failed_retryable: Vec<(BackupNamespace, String)> = Vec::new();
+ let mut failed_ns: HashSet<BackupNamespace> = HashSet::new();
+ let mut deferred: Vec<BackupGroup> = Vec::new();
+ let mut moved_count: usize = 0;
+
+ for ns in &attempted_ns {
+ let iter = match self.iter_backup_groups(ns.clone()) {
+ Ok(iter) => iter,
+ Err(err) => {
+ warn!("failed to list groups in '{ns}': {err:#}");
+ failed_ns.insert(ns.clone());
+ continue;
+ }
+ };
+ for group in iter {
+ let group = match group {
+ Ok(g) => g,
+ Err(err) => {
+ warn!("failed to read group entry in '{ns}': {err:#}");
+ failed_ns.insert(ns.clone());
+ continue;
+ }
+ };
+ match self.lock_and_move_group(source_ns, &group, target_ns, &backend, merge_groups)
+ {
+ Ok(()) => {
+ moved_count += 1;
+ log::info!(
+ "moved group {moved_count} ('{}' in '{}')",
+ group.group(),
+ group.backup_ns(),
+ );
+ }
+ Err(BackupGroupOpError::Soft(err)) => {
+ log::info!(
+ "deferring group '{}' in '{}' - lock conflict, will retry: {err:#}",
+ group.group(),
+ group.backup_ns(),
+ );
+ deferred.push(group);
+ }
+ Err(BackupGroupOpError::Hard(err)) => {
+ warn!(
+ "failed to move group '{}' in '{}': {err:#}",
+ group.group(),
+ group.backup_ns(),
+ );
+ failed_fatal.push((group.backup_ns().clone(), group.group().to_string()));
+ failed_ns.insert(group.backup_ns().clone());
+ }
+ }
+ }
+ }
+
+ // Retry deferred groups once.
+ if !deferred.is_empty() {
+ log::info!("retrying {} deferred group(s)...", deferred.len());
+ for group in &deferred {
+ match self.lock_and_move_group(source_ns, group, target_ns, &backend, merge_groups)
+ {
+ Ok(()) => {
+ moved_count += 1;
+ log::info!(
+ "moved group {moved_count} ('{}' in '{}') on retry",
+ group.group(),
+ group.backup_ns(),
+ );
+ }
+ Err(BackupGroupOpError::Soft(err)) => {
+ warn!(
+ "still locked after retry: group '{}' in '{}': {err:#}",
+ group.group(),
+ group.backup_ns(),
+ );
+ failed_retryable
+ .push((group.backup_ns().clone(), group.group().to_string()));
+ failed_ns.insert(group.backup_ns().clone());
+ }
+ Err(BackupGroupOpError::Hard(err)) => {
+ warn!(
+ "failed to move group '{}' in '{}' on retry: {err:#}",
+ group.group(),
+ group.backup_ns(),
+ );
+ failed_fatal.push((group.backup_ns().clone(), group.group().to_string()));
+ failed_ns.insert(group.backup_ns().clone());
+ }
+ }
+ }
+ }
+
+ // Clean up source namespaces when requested. Process deepest-first so child directories
+ // are removed before their parents.
+ if delete_source {
+ let attempted_set: HashSet<&BackupNamespace> = attempted_ns.iter().collect();
+
+ for ns in attempted_ns.iter().rev() {
+ // Skip any namespace on the path to a failed group - those groups still exist at
+ // source and the namespace is therefore not empty.
+ if failed_ns
+ .iter()
+ .any(|failed| failed == ns || ns.contains(failed).is_some())
+ {
+ log::warn!(
+ "skipping source namespace '{ns}' cleanup: contains groups that failed to move",
+ );
+ continue;
+ }
+ // A child of this namespace that's not in `attempted_set` either
+ // - sits below the user's max-depth, so we intentionally did not move it, or
+ // - appeared after we started (concurrent creation).
+ // Either way the source is non-empty and must stay. Warn only for the concurrent
+ // case (depth-limit skips are an explicit user choice).
+ let max_attempted_depth = source_ns.depth() + depth;
+ let mut concurrent_child: Option<BackupNamespace> = None;
+ let mut depth_limited_child = false;
+ for child in self.iter_backup_ns_ok(ns.clone())? {
+ if attempted_set.contains(&child) {
+ continue;
+ }
+ if child.depth() <= max_attempted_depth {
+ concurrent_child = Some(child);
+ break;
+ }
+ depth_limited_child = true;
+ }
+ if let Some(child) = concurrent_child {
+ log::warn!(
+ "skipping source namespace '{ns}' cleanup: child namespace '{child}' \
+ was created after the move started",
+ );
+ continue;
+ }
+ if depth_limited_child {
+ log::debug!(
+ "keeping source namespace '{ns}': contains namespaces excluded by max-depth",
+ );
+ continue;
+ }
+
+ // All groups moved and all children already cleaned up: drop the now-empty
+ // namespace. Single-level cleanup since we drive traversal ourselves.
+ match self.remove_namespace(ns, false) {
+ Ok((true, _)) => {}
+ Ok((false, _)) => {
+ log::warn!(
+ "source namespace '{ns}' was not fully removed (non-empty after move)",
+ );
+ }
+ Err(err) => {
+ log::warn!("failed to remove source namespace '{ns}': {err:#}");
+ }
+ }
+ }
+ }
+
+ if !failed_fatal.is_empty() || !failed_retryable.is_empty() {
+ let fmt = |list: &[(BackupNamespace, String)]| -> String {
+ list.iter()
+ .map(|(ns, group)| format!("'{group}' in '{ns}'"))
+ .collect::<Vec<_>>()
+ .join(", ")
+ };
+ let total = failed_fatal.len() + failed_retryable.len();
+ let mut msg = format!(
+ "namespace move partially completed; {total} group(s) could not be moved and remain at source"
+ );
+ if !failed_fatal.is_empty() {
+ msg.push_str(&format!(
+ "; {} with a fatal error (investigate before retrying): {}",
+ failed_fatal.len(),
+ fmt(&failed_fatal),
+ ));
+ }
+ if !failed_retryable.is_empty() {
+ msg.push_str(&format!(
+ "; {} still lock-conflicted (rerun move-namespace or move-group may succeed): {}",
+ failed_retryable.len(),
+ fmt(&failed_retryable),
+ ));
+ }
+ bail!("{msg}");
+ }
+
+ Ok(())
+ }
+
/// Remove a complete backup group including all snapshots.
///
/// Returns `BackupGroupDeleteStats`, containing the number of deleted snapshots
--
2.47.3
^ permalink raw reply [flat|nested] 21+ messages in thread* [PATCH proxmox-backup v8 08/13] docs: add section on moving namespaces and groups
2026-04-22 13:39 [PATCH proxmox-backup v8 00/13] fixes #6195: add support for moving groups and namespaces Hannes Laimer
` (6 preceding siblings ...)
2026-04-22 13:39 ` [PATCH proxmox-backup v8 07/13] datastore: add move-namespace Hannes Laimer
@ 2026-04-22 13:39 ` Hannes Laimer
2026-04-22 13:39 ` [PATCH proxmox-backup v8 09/13] api: add POST endpoint for move-group Hannes Laimer
` (6 subsequent siblings)
14 siblings, 0 replies; 21+ messages in thread
From: Hannes Laimer @ 2026-04-22 13:39 UTC (permalink / raw)
To: pbs-devel
Document the move-group and move-namespace operations including merge
behavior, max-depth and delete-source parameters. Add an online help
anchor so the UI move dialogs can link directly to this section.
Signed-off-by: Hannes Laimer <h.laimer@proxmox.com>
---
docs/storage.rst | 60 ++++++++++++++++++++++++++++++++++++++++++-
www/OnlineHelpInfo.js | 4 +++
2 files changed, 63 insertions(+), 1 deletion(-)
diff --git a/docs/storage.rst b/docs/storage.rst
index 672091f8..1f6eabe8 100644
--- a/docs/storage.rst
+++ b/docs/storage.rst
@@ -527,7 +527,65 @@ For backup groups, the existing privilege rules still apply. You either need a
privileged enough permission or to be the owner of the backup group; nothing
changed here.
-.. todo:: continue
+.. _storage_move_namespaces_groups:
+
+Moving Namespaces and Groups
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Backup groups can be moved between namespaces within the same datastore.
+This is useful for reorganizing backup hierarchies without having to
+re-run backups.
+
+A single group can be moved with ``move-group``. To relocate an entire
+namespace subtree (including all child namespaces and their groups), use
+``move-namespace``.
+
+.. code-block:: console
+
+ # proxmox-backup-manager datastore move-group <store> --ns <source> --target-ns <target> --backup-type <type> --backup-id <id>
+ # proxmox-backup-manager datastore move-namespace <store> --ns <source> --target-ns <target>
+
+If the target namespace already exists, groups are moved into it. When a
+group with the same type and ID already exists in the target and
+``merge-groups`` is enabled, the snapshots are merged into the existing
+group provided:
+
+- both groups have the same owner
+- the oldest source snapshot is newer than the newest target snapshot
+
+Groups that cannot be merged or locked are skipped and reported in the
+task log. They remain at the source and can be retried individually with
+``move-group``.
+
+.. note::
+
+ With defaults, ``move-namespace`` merges into existing target groups
+ (``merge-groups=true``) and removes source namespaces once they are empty
+ (``delete-source=true``). Pass ``--merge-groups false`` or
+ ``--delete-source false`` to opt out.
+
+Optional parameters for ``move-namespace``:
+
+``merge-groups``
+ Allow merging snapshots into groups that already exist in the target
+ namespace with the same type and ID. Defaults to true.
+
+``max-depth``
+ Limits how many levels of child namespaces below the source are
+ included. When not set, the entire subtree is moved.
+
+``delete-source``
+ Controls whether the source namespace directories are removed after
+ all groups have been moved out. Defaults to true. Set to false to
+ keep the (now empty) source namespace structure.
+
+Required privileges:
+
+- ``move-group``: ``DATASTORE_PRUNE`` on the source namespace and
+ ``DATASTORE_BACKUP`` on the target namespace, plus ownership of the
+ backup group; or ``DATASTORE_MODIFY`` on both.
+- ``move-namespace``: ``DATASTORE_MODIFY`` on the parent of both the
+ source and the target namespace.
Options
diff --git a/www/OnlineHelpInfo.js b/www/OnlineHelpInfo.js
index 89650cfb..e118b0ad 100644
--- a/www/OnlineHelpInfo.js
+++ b/www/OnlineHelpInfo.js
@@ -299,6 +299,10 @@ const proxmoxOnlineHelpInfo = {
"link": "/docs/storage.html#storage-namespaces",
"title": "Backup Namespaces"
},
+ "storage-move-namespaces-groups": {
+ "link": "/docs/storage.html#storage-move-namespaces-groups",
+ "title": "Moving Namespaces and Groups"
+ },
"datastore-tuning-options": {
"link": "/docs/storage.html#datastore-tuning-options",
"title": "Tuning"
--
2.47.3
^ permalink raw reply [flat|nested] 21+ messages in thread* [PATCH proxmox-backup v8 09/13] api: add POST endpoint for move-group
2026-04-22 13:39 [PATCH proxmox-backup v8 00/13] fixes #6195: add support for moving groups and namespaces Hannes Laimer
` (7 preceding siblings ...)
2026-04-22 13:39 ` [PATCH proxmox-backup v8 08/13] docs: add section on moving namespaces and groups Hannes Laimer
@ 2026-04-22 13:39 ` Hannes Laimer
2026-04-22 13:39 ` [PATCH proxmox-backup v8 10/13] api: add POST endpoint for move-namespace Hannes Laimer
` (5 subsequent siblings)
14 siblings, 0 replies; 21+ messages in thread
From: Hannes Laimer @ 2026-04-22 13:39 UTC (permalink / raw)
To: pbs-devel
Add a dedicated /move-group endpoint for moving backup groups between
namespaces within the same datastore.
The permission model allows users with DATASTORE_PRUNE on the source
and DATASTORE_BACKUP on the target namespace to move groups they own,
without requiring full DATASTORE_MODIFY on both sides.
Signed-off-by: Hannes Laimer <h.laimer@proxmox.com>
---
src/api2/admin/datastore.rs | 90 +++++++++++++++++++++++++++++++++++++
1 file changed, 90 insertions(+)
diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs
index 757b3114..b035bd9e 100644
--- a/src/api2/admin/datastore.rs
+++ b/src/api2/admin/datastore.rs
@@ -280,6 +280,95 @@ pub async fn delete_group(
.await?
}
+#[api(
+ input: {
+ properties: {
+ store: { schema: DATASTORE_SCHEMA },
+ ns: {
+ type: BackupNamespace,
+ optional: true,
+ },
+ group: {
+ type: pbs_api_types::BackupGroup,
+ flatten: true,
+ },
+ "target-ns": {
+ type: BackupNamespace,
+ optional: true,
+ },
+ "merge-group": {
+ type: bool,
+ optional: true,
+ default: true,
+ description: "If the group already exists in the target namespace, merge \
+ snapshots into it. Requires matching ownership and non-overlapping \
+ snapshot times.",
+ },
+ },
+ },
+ returns: {
+ schema: UPID_SCHEMA,
+ },
+ access: {
+ permission: &Permission::Anybody,
+ description: "Requires DATASTORE_MODIFY or DATASTORE_PRUNE (+ group ownership) on the \
+ source namespace and DATASTORE_MODIFY or DATASTORE_BACKUP (+ group ownership) on \
+ the target namespace.",
+ },
+)]
+/// Move a backup group to a different namespace within the same datastore.
+pub fn move_group(
+ store: String,
+ ns: Option<BackupNamespace>,
+ group: pbs_api_types::BackupGroup,
+ target_ns: Option<BackupNamespace>,
+ merge_group: bool,
+ rpcenv: &mut dyn RpcEnvironment,
+) -> Result<Value, Error> {
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
+ let ns = ns.unwrap_or_default();
+ let target_ns = target_ns.unwrap_or_default();
+
+ let source_limited = check_ns_privs_full(
+ &store,
+ &ns,
+ &auth_id,
+ PRIV_DATASTORE_MODIFY,
+ PRIV_DATASTORE_PRUNE,
+ )?;
+ let target_limited = check_ns_privs_full(
+ &store,
+ &target_ns,
+ &auth_id,
+ PRIV_DATASTORE_MODIFY,
+ PRIV_DATASTORE_BACKUP,
+ )?;
+
+ let datastore = DataStore::lookup_datastore(lookup_with(&store, Operation::Write))?;
+
+ if source_limited || target_limited {
+ let owner = datastore.get_owner(&ns, &group)?;
+ check_backup_owner(&owner, &auth_id)?;
+ }
+
+ // Best-effort pre-checks for a fast synchronous error before spawning a worker.
+ // The worker re-runs the same check post-lock, which is the authoritative gate.
+ datastore.check_move_group(&ns, &target_ns, &group, merge_group)?;
+
+ let worker_id = format!("{store}:{ns}/{group}:{target_ns}");
+ let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
+
+ let upid_str = WorkerTask::new_thread(
+ "move-group",
+ Some(worker_id),
+ auth_id.to_string(),
+ to_stdout,
+ move |_worker| datastore.move_group(&ns, &group, &target_ns, merge_group),
+ )?;
+
+ Ok(json!(upid_str))
+}
+
#[api(
input: {
properties: {
@@ -2849,6 +2938,7 @@ const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
.delete(&API_METHOD_DELETE_GROUP),
),
("mount", &Router::new().post(&API_METHOD_MOUNT)),
+ ("move-group", &Router::new().post(&API_METHOD_MOVE_GROUP)),
(
"namespace",
// FIXME: move into datastore:: sub-module?!
--
2.47.3
^ permalink raw reply [flat|nested] 21+ messages in thread* [PATCH proxmox-backup v8 10/13] api: add POST endpoint for move-namespace
2026-04-22 13:39 [PATCH proxmox-backup v8 00/13] fixes #6195: add support for moving groups and namespaces Hannes Laimer
` (8 preceding siblings ...)
2026-04-22 13:39 ` [PATCH proxmox-backup v8 09/13] api: add POST endpoint for move-group Hannes Laimer
@ 2026-04-22 13:39 ` Hannes Laimer
2026-04-22 13:39 ` [PATCH proxmox-backup v8 11/13] ui: add move group action Hannes Laimer
` (4 subsequent siblings)
14 siblings, 0 replies; 21+ messages in thread
From: Hannes Laimer @ 2026-04-22 13:39 UTC (permalink / raw)
To: pbs-devel
Add a dedicated /move-namespace endpoint for moving namespaces to a
new location within the same datastore. Exposes max-depth,
delete-source, and merge-groups as optional parameters.
Signed-off-by: Hannes Laimer <h.laimer@proxmox.com>
---
src/api2/admin/datastore.rs | 4 ++
src/api2/admin/namespace.rs | 85 ++++++++++++++++++++++++++++++++++++-
2 files changed, 87 insertions(+), 2 deletions(-)
diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs
index b035bd9e..85e828bb 100644
--- a/src/api2/admin/datastore.rs
+++ b/src/api2/admin/datastore.rs
@@ -2939,6 +2939,10 @@ const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
),
("mount", &Router::new().post(&API_METHOD_MOUNT)),
("move-group", &Router::new().post(&API_METHOD_MOVE_GROUP)),
+ (
+ "move-namespace",
+ &Router::new().post(&crate::api2::admin::namespace::API_METHOD_MOVE_NAMESPACE),
+ ),
(
"namespace",
// FIXME: move into datastore:: sub-module?!
diff --git a/src/api2/admin/namespace.rs b/src/api2/admin/namespace.rs
index c885ab54..56f42002 100644
--- a/src/api2/admin/namespace.rs
+++ b/src/api2/admin/namespace.rs
@@ -1,12 +1,16 @@
use anyhow::{bail, Error};
use pbs_config::CachedUserInfo;
-use proxmox_router::{http_bail, ApiMethod, Permission, Router, RpcEnvironment};
+use proxmox_rest_server::WorkerTask;
+use proxmox_router::{
+ http_bail, ApiMethod, Permission, Router, RpcEnvironment, RpcEnvironmentType,
+};
use proxmox_schema::*;
+use serde_json::{json, Value};
use pbs_api_types::{
Authid, BackupGroupDeleteStats, BackupNamespace, NamespaceListItem, Operation,
- DATASTORE_SCHEMA, NS_MAX_DEPTH_SCHEMA, PROXMOX_SAFE_ID_FORMAT,
+ DATASTORE_SCHEMA, NS_MAX_DEPTH_SCHEMA, PROXMOX_SAFE_ID_FORMAT, UPID_SCHEMA,
};
use pbs_datastore::DataStore;
@@ -192,6 +196,83 @@ pub fn delete_namespace(
Ok(stats)
}
+#[api(
+ input: {
+ properties: {
+ store: { schema: DATASTORE_SCHEMA },
+ ns: {
+ type: BackupNamespace,
+ },
+ "target-ns": {
+ type: BackupNamespace,
+ },
+ "max-depth": {
+ schema: NS_MAX_DEPTH_SCHEMA,
+ optional: true,
+ },
+ "delete-source": {
+ type: bool,
+ optional: true,
+ default: true,
+ description: "Remove the source namespace after moving all contents. \
+ Defaults to true.",
+ },
+ "merge-groups": {
+ type: bool,
+ optional: true,
+ default: true,
+ description: "If a group with the same name already exists in the target \
+ namespace, merge snapshots into it. Requires matching ownership and \
+ non-overlapping snapshot times.",
+ },
+ },
+ },
+ returns: {
+ schema: UPID_SCHEMA,
+ },
+ access: {
+ permission: &Permission::Anybody,
+ description: "Requires DATASTORE_MODIFY on the parent of 'ns' and on the parent of 'target-ns'.",
+ },
+)]
+/// Move a backup namespace (including all child namespaces and groups) to a new location.
+pub fn move_namespace(
+ store: String,
+ ns: BackupNamespace,
+ target_ns: BackupNamespace,
+ max_depth: Option<usize>,
+ delete_source: bool,
+ merge_groups: bool,
+ rpcenv: &mut dyn RpcEnvironment,
+) -> Result<Value, Error> {
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
+
+ check_ns_modification_privs(&store, &ns, &auth_id)?;
+ check_ns_modification_privs(&store, &target_ns, &auth_id)?;
+
+ let datastore =
+ DataStore::lookup_datastore(crate::tools::lookup_with(&store, Operation::Write))?;
+
+ // Best-effort pre-checks for a fast synchronous error before spawning a worker.
+ // The worker re-runs the same check, which is the authoritative gate.
+ datastore.check_move_namespace(&ns, &target_ns)?;
+
+ let worker_id = format!("{store}:{ns}:{target_ns}");
+ let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
+
+ let upid_str = WorkerTask::new_thread(
+ "move-namespace",
+ Some(worker_id),
+ auth_id.to_string(),
+ to_stdout,
+ move |_worker| {
+ datastore.move_namespace(&ns, &target_ns, max_depth, delete_source, merge_groups)
+ },
+ )?;
+
+ Ok(json!(upid_str))
+}
+
pub const ROUTER: Router = Router::new()
.get(&API_METHOD_LIST_NAMESPACES)
.post(&API_METHOD_CREATE_NAMESPACE)
--
2.47.3
^ permalink raw reply [flat|nested] 21+ messages in thread* [PATCH proxmox-backup v8 11/13] ui: add move group action
2026-04-22 13:39 [PATCH proxmox-backup v8 00/13] fixes #6195: add support for moving groups and namespaces Hannes Laimer
` (9 preceding siblings ...)
2026-04-22 13:39 ` [PATCH proxmox-backup v8 10/13] api: add POST endpoint for move-namespace Hannes Laimer
@ 2026-04-22 13:39 ` Hannes Laimer
2026-04-23 13:35 ` Michael Köppl
2026-04-22 13:39 ` [PATCH proxmox-backup v8 12/13] ui: add move namespace action Hannes Laimer
` (3 subsequent siblings)
14 siblings, 1 reply; 21+ messages in thread
From: Hannes Laimer @ 2026-04-22 13:39 UTC (permalink / raw)
To: pbs-devel
Add a "Move" action to the backup group context menu and action
column. Opens a dialog where the user selects a target namespace,
then submits a POST to the move-group API endpoint.
Signed-off-by: Hannes Laimer <h.laimer@proxmox.com>
---
www/Makefile | 1 +
www/datastore/Content.js | 47 ++++++++++++++++++++++++++
www/window/GroupMove.js | 71 ++++++++++++++++++++++++++++++++++++++++
3 files changed, 119 insertions(+)
create mode 100644 www/window/GroupMove.js
diff --git a/www/Makefile b/www/Makefile
index 5a60e47e..06441c02 100644
--- a/www/Makefile
+++ b/www/Makefile
@@ -76,6 +76,7 @@ JSSRC= \
config/PruneAndGC.js \
window/ACLEdit.js \
window/BackupGroupChangeOwner.js \
+ window/GroupMove.js \
window/CreateDirectory.js \
window/DataStoreEdit.js \
window/NamespaceEdit.js \
diff --git a/www/datastore/Content.js b/www/datastore/Content.js
index dfb7787c..548f5670 100644
--- a/www/datastore/Content.js
+++ b/www/datastore/Content.js
@@ -668,6 +668,27 @@ Ext.define('PBS.DataStoreContent', {
});
},
+ moveGroup: function (data) {
+ let me = this;
+ let view = me.getView();
+ let group = `${data.backup_type}/${data.backup_id}`;
+ Ext.create('PBS.window.GroupMove', {
+ datastore: view.datastore,
+ namespace: view.namespace,
+ backupType: data.backup_type,
+ backupId: data.backup_id,
+ group,
+ taskDone: () => me.reload(),
+ }).show();
+ },
+
+ onMove: function (table, rI, cI, item, e, { data }) {
+ let me = this;
+ if (data.ty === 'group') {
+ me.moveGroup(data);
+ }
+ },
+
forgetGroup: function (data) {
let me = this;
let view = me.getView();
@@ -972,6 +993,7 @@ Ext.define('PBS.DataStoreContent', {
onVerify: createControllerCallback('onVerify'),
onChangeOwner: createControllerCallback('onChangeOwner'),
onPrune: createControllerCallback('onPrune'),
+ onMove: createControllerCallback('onMove'),
onForget: createControllerCallback('onForget'),
});
} else if (record.data.ty === 'dir') {
@@ -1086,6 +1108,20 @@ Ext.define('PBS.DataStoreContent', {
: 'pmx-hidden',
isActionDisabled: (v, r, c, i, rec) => !!rec.data.leaf,
},
+ {
+ handler: 'onMove',
+ getTip: (v, m, { data }) => {
+ if (data.ty === 'group') {
+ return Ext.String.format(gettext("Move group '{0}'"), v);
+ }
+ return '';
+ },
+ getClass: (v, m, { data }) => {
+ if (data.ty === 'group') { return 'fa fa-arrows'; }
+ return 'pmx-hidden';
+ },
+ isActionDisabled: (v, r, c, i, { data }) => data.ty !== 'group',
+ },
{
handler: 'onChangeOwner',
getClass: (v, m, { data }) =>
@@ -1438,6 +1474,7 @@ Ext.define('PBS.datastore.GroupCmdMenu', {
onVerify: undefined,
onChangeOwner: undefined,
onPrune: undefined,
+ onMove: undefined,
onForget: undefined,
items: [
@@ -1461,6 +1498,16 @@ Ext.define('PBS.datastore.GroupCmdMenu', {
hidden: '{!onVerify}',
},
},
+ {
+ text: gettext('Move'),
+ iconCls: 'fa fa-arrows',
+ handler: function () {
+ this.up('menu').onMove();
+ },
+ cbind: {
+ hidden: '{!onMove}',
+ },
+ },
{
text: gettext('Change owner'),
iconCls: 'fa fa-user',
diff --git a/www/window/GroupMove.js b/www/window/GroupMove.js
new file mode 100644
index 00000000..6fa5588a
--- /dev/null
+++ b/www/window/GroupMove.js
@@ -0,0 +1,71 @@
+Ext.define('PBS.window.GroupMove', {
+ extend: 'Proxmox.window.Edit',
+ alias: 'widget.pbsGroupMove',
+ mixins: ['Proxmox.Mixin.CBind'],
+
+ onlineHelp: 'storage-move-namespaces-groups',
+
+ isCreate: true,
+ submitText: gettext('Move'),
+ showTaskViewer: true,
+
+ cbind: {
+ url: '/api2/extjs/admin/datastore/{datastore}/move-group',
+ title: (get) => Ext.String.format(gettext("Move Backup Group '{0}'"), get('group')),
+ },
+ method: 'POST',
+
+ width: 400,
+ fieldDefaults: {
+ labelWidth: 120,
+ },
+
+ items: {
+ xtype: 'inputpanel',
+ onGetValues: function (values) {
+ let win = this.up('window');
+ let result = {
+ 'backup-type': win.backupType,
+ 'backup-id': win.backupId,
+ 'target-ns': values['target-ns'] || '',
+ };
+ if (win.namespace && win.namespace !== '') {
+ result.ns = win.namespace;
+ }
+ if (values['merge-group'] !== undefined) {
+ result['merge-group'] = values['merge-group'];
+ }
+ return result;
+ },
+ items: [
+ {
+ xtype: 'displayfield',
+ fieldLabel: gettext('Group'),
+ cbind: {
+ value: '{group}',
+ },
+ },
+ {
+ xtype: 'pbsNamespaceSelector',
+ name: 'target-ns',
+ fieldLabel: gettext('Target Namespace'),
+ allowBlank: true,
+ cbind: {
+ datastore: '{datastore}',
+ },
+ },
+ ],
+ advancedItems: [
+ {
+ xtype: 'proxmoxcheckbox',
+ name: 'merge-group',
+ fieldLabel: gettext('Merge Group'),
+ checked: true,
+ autoEl: {
+ tag: 'div',
+ 'data-qtip': gettext('If a group with the same name already exists in the target namespace, merge snapshots into it. Requires matching ownership and non-overlapping snapshot times.'),
+ },
+ },
+ ],
+ },
+});
--
2.47.3
^ permalink raw reply [flat|nested] 21+ messages in thread* Re: [PATCH proxmox-backup v8 11/13] ui: add move group action
2026-04-22 13:39 ` [PATCH proxmox-backup v8 11/13] ui: add move group action Hannes Laimer
@ 2026-04-23 13:35 ` Michael Köppl
2026-04-23 13:47 ` Hannes Laimer
0 siblings, 1 reply; 21+ messages in thread
From: Michael Köppl @ 2026-04-23 13:35 UTC (permalink / raw)
To: Hannes Laimer, pbs-devel
On Wed Apr 22, 2026 at 3:39 PM CEST, Hannes Laimer wrote:
[snip]
> + items: {
> + xtype: 'inputpanel',
> + onGetValues: function (values) {
> + let win = this.up('window');
> + let result = {
> + 'backup-type': win.backupType,
> + 'backup-id': win.backupId,
> + 'target-ns': values['target-ns'] || '',
> + };
> + if (win.namespace && win.namespace !== '') {
> + result.ns = win.namespace;
> + }
> + if (values['merge-group'] !== undefined) {
> + result['merge-group'] = values['merge-group'];
> + }
when I uncheck the "Merge Group" checkbox, the `merge-group` param is
not set in the request. since the default value in the backend is
`true`, merging is still attempted.
> + return result;
> + },
> + items: [
> + {
> + xtype: 'displayfield',
> + fieldLabel: gettext('Group'),
> + cbind: {
> + value: '{group}',
> + },
[snip]
^ permalink raw reply [flat|nested] 21+ messages in thread* Re: [PATCH proxmox-backup v8 11/13] ui: add move group action
2026-04-23 13:35 ` Michael Köppl
@ 2026-04-23 13:47 ` Hannes Laimer
0 siblings, 0 replies; 21+ messages in thread
From: Hannes Laimer @ 2026-04-23 13:47 UTC (permalink / raw)
To: Michael Köppl, pbs-devel
On 2026-04-23 15:34, Michael Köppl wrote:
> On Wed Apr 22, 2026 at 3:39 PM CEST, Hannes Laimer wrote:
>
> [snip]
>
>> + items: {
>> + xtype: 'inputpanel',
>> + onGetValues: function (values) {
>> + let win = this.up('window');
>> + let result = {
>> + 'backup-type': win.backupType,
>> + 'backup-id': win.backupId,
>> + 'target-ns': values['target-ns'] || '',
>> + };
>> + if (win.namespace && win.namespace !== '') {
>> + result.ns = win.namespace;
>> + }
>> + if (values['merge-group'] !== undefined) {
>> + result['merge-group'] = values['merge-group'];
>> + }
>
> when I uncheck the "Merge Group" checkbox, the `merge-group` param is
> not set in the request. since the default value in the backend is
> `true`, merging is still attempted.
>
right, good catch! should just use `delete_if_default(.., true)`
>> + return result;
>> + },
>> + items: [
>> + {
>> + xtype: 'displayfield',
>> + fieldLabel: gettext('Group'),
>> + cbind: {
>> + value: '{group}',
>> + },
>
> [snip]
^ permalink raw reply [flat|nested] 21+ messages in thread
* [PATCH proxmox-backup v8 12/13] ui: add move namespace action
2026-04-22 13:39 [PATCH proxmox-backup v8 00/13] fixes #6195: add support for moving groups and namespaces Hannes Laimer
` (10 preceding siblings ...)
2026-04-22 13:39 ` [PATCH proxmox-backup v8 11/13] ui: add move group action Hannes Laimer
@ 2026-04-22 13:39 ` Hannes Laimer
2026-04-23 14:49 ` Michael Köppl
2026-04-22 13:39 ` [PATCH proxmox-backup v8 13/13] cli: add move-namespace and move-group commands Hannes Laimer
` (2 subsequent siblings)
14 siblings, 1 reply; 21+ messages in thread
From: Hannes Laimer @ 2026-04-22 13:39 UTC (permalink / raw)
To: pbs-devel
Add a "Move" action to the namespace action column. Opens a dialog
where the user selects a new parent namespace and name, then submits
a POST to the move-namespace API endpoint.
The source namespace and its descendants are excluded from the parent
selector to prevent cycles. An advanced section exposes the max-depth
and delete-source options.
Signed-off-by: Hannes Laimer <h.laimer@proxmox.com>
---
www/Makefile | 1 +
www/datastore/Content.js | 35 ++++++++-
www/form/NamespaceSelector.js | 11 +++
www/window/NamespaceMove.js | 136 ++++++++++++++++++++++++++++++++++
4 files changed, 181 insertions(+), 2 deletions(-)
create mode 100644 www/window/NamespaceMove.js
diff --git a/www/Makefile b/www/Makefile
index 06441c02..bad243cf 100644
--- a/www/Makefile
+++ b/www/Makefile
@@ -80,6 +80,7 @@ JSSRC= \
window/CreateDirectory.js \
window/DataStoreEdit.js \
window/NamespaceEdit.js \
+ window/NamespaceMove.js \
window/MaintenanceOptions.js \
window/NotesEdit.js \
window/NotificationThresholds.js \
diff --git a/www/datastore/Content.js b/www/datastore/Content.js
index 548f5670..52248756 100644
--- a/www/datastore/Content.js
+++ b/www/datastore/Content.js
@@ -668,6 +668,26 @@ Ext.define('PBS.DataStoreContent', {
});
},
+ moveNS: function () {
+ let me = this;
+ let view = me.getView();
+ if (!view.namespace || view.namespace === '') {
+ return;
+ }
+ let win = Ext.create('PBS.window.NamespaceMove', {
+ datastore: view.datastore,
+ namespace: view.namespace,
+ taskDone: (success) => {
+ if (success) {
+ let newNs = win.getNewNamespace();
+ view.down('pbsNamespaceSelector').store?.load();
+ me.nsChange(null, newNs);
+ }
+ },
+ });
+ win.show();
+ },
+
moveGroup: function (data) {
let me = this;
let view = me.getView();
@@ -686,6 +706,8 @@ Ext.define('PBS.DataStoreContent', {
let me = this;
if (data.ty === 'group') {
me.moveGroup(data);
+ } else if (data.ty === 'ns') {
+ me.moveNS();
}
},
@@ -1114,13 +1136,22 @@ Ext.define('PBS.DataStoreContent', {
if (data.ty === 'group') {
return Ext.String.format(gettext("Move group '{0}'"), v);
}
- return '';
+ return Ext.String.format(gettext("Move namespace '{0}'"), v);
},
getClass: (v, m, { data }) => {
if (data.ty === 'group') { return 'fa fa-arrows'; }
+ if (data.ty === 'ns' && !data.isRootNS && data.ns === undefined) {
+ return 'fa fa-arrows';
+ }
return 'pmx-hidden';
},
- isActionDisabled: (v, r, c, i, { data }) => data.ty !== 'group',
+ isActionDisabled: (v, r, c, i, { data }) => {
+ if (data.ty === 'group') { return false; }
+ if (data.ty === 'ns' && !data.isRootNS && data.ns === undefined) {
+ return false;
+ }
+ return true;
+ },
},
{
handler: 'onChangeOwner',
diff --git a/www/form/NamespaceSelector.js b/www/form/NamespaceSelector.js
index ddf68254..d349b568 100644
--- a/www/form/NamespaceSelector.js
+++ b/www/form/NamespaceSelector.js
@@ -90,6 +90,17 @@ Ext.define('PBS.form.NamespaceSelector', {
},
});
+ if (me.excludeNs) {
+ me.store.addFilter(
+ new Ext.util.Filter({
+ filterFn: (rec) => {
+ let ns = rec.data.ns;
+ return ns !== me.excludeNs && !ns.startsWith(`${me.excludeNs}/`);
+ },
+ }),
+ );
+ }
+
me.callParent();
},
});
diff --git a/www/window/NamespaceMove.js b/www/window/NamespaceMove.js
new file mode 100644
index 00000000..2bbc0269
--- /dev/null
+++ b/www/window/NamespaceMove.js
@@ -0,0 +1,136 @@
+Ext.define('PBS.window.NamespaceMove', {
+ extend: 'Proxmox.window.Edit',
+ alias: 'widget.pbsNamespaceMove',
+ mixins: ['Proxmox.Mixin.CBind'],
+
+ onlineHelp: 'storage-move-namespaces-groups',
+
+ submitText: gettext('Move'),
+ isCreate: true,
+ showTaskViewer: true,
+
+ cbind: {
+ url: '/api2/extjs/admin/datastore/{datastore}/move-namespace',
+ title: (get) => Ext.String.format(gettext("Move Namespace '{0}'"), get('namespace')),
+ },
+ method: 'POST',
+
+ width: 450,
+ fieldDefaults: {
+ labelWidth: 120,
+ },
+
+ cbindData: function (initialConfig) {
+ let ns = initialConfig.namespace ?? '';
+ let parts = ns.split('/');
+ let nsName = parts.pop();
+ return {
+ nsName,
+ nsParent: parts.join('/'),
+ };
+ },
+
+ // Compose the submitted target namespace from the current field values.
+ getTargetNs: function () {
+ let me = this;
+ let parent = me.down('[name=parent]').getValue() || '';
+ let name = me.down('[name=name]').getValue();
+ return parent ? `${parent}/${name}` : name;
+ },
+
+ // Returns the target-ns path that was submitted, for use by the caller after success.
+ getNewNamespace: function () {
+ return this.getTargetNs();
+ },
+
+ items: {
+ xtype: 'inputpanel',
+ onGetValues: function (values) {
+ let win = this.up('window');
+ let result = {
+ ns: win.namespace,
+ 'target-ns': win.getTargetNs(),
+ };
+ if (values['delete-source'] !== undefined) {
+ result['delete-source'] = values['delete-source'];
+ }
+ if (values['merge-groups'] !== undefined) {
+ result['merge-groups'] = values['merge-groups'];
+ }
+ if (values['max-depth'] !== undefined && values['max-depth'] !== '') {
+ result['max-depth'] = values['max-depth'];
+ }
+ return result;
+ },
+ items: [
+ {
+ xtype: 'displayfield',
+ fieldLabel: gettext('Namespace'),
+ cbind: {
+ value: '{namespace}',
+ },
+ },
+ {
+ xtype: 'pbsNamespaceSelector',
+ name: 'parent',
+ fieldLabel: gettext('New Parent'),
+ allowBlank: true,
+ cbind: {
+ datastore: '{datastore}',
+ excludeNs: '{namespace}',
+ value: '{nsParent}',
+ },
+ },
+ {
+ xtype: 'proxmoxtextfield',
+ name: 'name',
+ fieldLabel: gettext('New Name'),
+ allowBlank: false,
+ maxLength: 31,
+ regex: PBS.Utils.SAFE_ID_RE,
+ regexText: gettext("Only alpha numerical, '_' and '-' (if not at start) allowed"),
+ cbind: {
+ value: '{nsName}',
+ },
+ },
+ ],
+ advancedItems: [
+ {
+ xtype: 'pbsNamespaceMaxDepthReduced',
+ name: 'max-depth',
+ fieldLabel: gettext('Max Depth'),
+ emptyText: gettext('Unlimited'),
+ listeners: {
+ afterrender: function (field) {
+ let win = field.up('window');
+ field.setLimit(win.namespace, null);
+ },
+ },
+ autoEl: {
+ tag: 'div',
+ 'data-qtip': gettext('Limit how many levels of child namespaces to include. Leave empty to move the entire subtree.'),
+ },
+ },
+ {
+ xtype: 'proxmoxcheckbox',
+ name: 'merge-groups',
+ fieldLabel: gettext('Merge Groups'),
+ checked: true,
+ autoEl: {
+ tag: 'div',
+ 'data-qtip': gettext('Merge snapshots into existing groups with the same name in the target namespace. Requires matching ownership and non-overlapping snapshot times.'),
+ },
+ },
+ {
+ xtype: 'proxmoxcheckbox',
+ name: 'delete-source',
+ fieldLabel: gettext('Delete Source'),
+ checked: true,
+ autoEl: {
+ tag: 'div',
+ 'data-qtip': gettext('Remove the empty source namespace directories after moving all groups. Uncheck to keep the namespace structure.'),
+ },
+ },
+ ],
+ },
+});
--
2.47.3
^ permalink raw reply [flat|nested] 21+ messages in thread* Re: [PATCH proxmox-backup v8 12/13] ui: add move namespace action
2026-04-22 13:39 ` [PATCH proxmox-backup v8 12/13] ui: add move namespace action Hannes Laimer
@ 2026-04-23 14:49 ` Michael Köppl
0 siblings, 0 replies; 21+ messages in thread
From: Michael Köppl @ 2026-04-23 14:49 UTC (permalink / raw)
To: Hannes Laimer, pbs-devel
On Wed Apr 22, 2026 at 3:39 PM CEST, Hannes Laimer wrote:
[snip]
> + items: {
> + xtype: 'inputpanel',
> + onGetValues: function (values) {
> + let win = this.up('window');
> + let result = {
> + ns: win.namespace,
> + 'target-ns': win.getTargetNs(),
> + };
> + if (values['delete-source'] !== undefined) {
> + result['delete-source'] = values['delete-source'];
> + }
> + if (values['merge-groups'] !== undefined) {
> + result['merge-groups'] = values['merge-groups'];
> + }
You probably noticed this already, but wanted to mention it nonetheless:
these are also not sent to the backend if the checkbox is unchecked. So
in this case it would e.g. delete the source even if you unchecked.
> + if (values['max-depth'] !== undefined && values['max-depth'] !== '') {
> + result['max-depth'] = values['max-depth'];
> + }
> + return result;
> + },
> + items: [
> + {
[snip]
^ permalink raw reply [flat|nested] 21+ messages in thread
* [PATCH proxmox-backup v8 13/13] cli: add move-namespace and move-group commands
2026-04-22 13:39 [PATCH proxmox-backup v8 00/13] fixes #6195: add support for moving groups and namespaces Hannes Laimer
` (11 preceding siblings ...)
2026-04-22 13:39 ` [PATCH proxmox-backup v8 12/13] ui: add move namespace action Hannes Laimer
@ 2026-04-22 13:39 ` Hannes Laimer
2026-04-23 16:29 ` [PATCH proxmox-backup v8 00/13] fixes #6195: add support for moving groups and namespaces Michael Köppl
2026-04-23 22:38 ` applied: " Thomas Lamprecht
14 siblings, 0 replies; 21+ messages in thread
From: Hannes Laimer @ 2026-04-22 13:39 UTC (permalink / raw)
To: pbs-devel
Add 'move-namespace' and 'move-group' subcommands to
proxmox-backup-manager datastore. Both call the corresponding API
handler and wait for the worker task to complete.
move-namespace accepts optional --max-depth and --delete-source
flags matching the API parameters.
Signed-off-by: Hannes Laimer <h.laimer@proxmox.com>
---
src/bin/proxmox_backup_manager/datastore.rs | 113 +++++++++++++++++++-
1 file changed, 112 insertions(+), 1 deletion(-)
diff --git a/src/bin/proxmox_backup_manager/datastore.rs b/src/bin/proxmox_backup_manager/datastore.rs
index 5c65c5ec..0ad97fda 100644
--- a/src/bin/proxmox_backup_manager/datastore.rs
+++ b/src/bin/proxmox_backup_manager/datastore.rs
@@ -1,5 +1,6 @@
use pbs_api_types::{
- DataStoreConfig, DataStoreConfigUpdater, DATASTORE_SCHEMA, PROXMOX_CONFIG_DIGEST_SCHEMA,
+ BackupNamespace, DataStoreConfig, DataStoreConfigUpdater, DATASTORE_SCHEMA,
+ NS_MAX_DEPTH_SCHEMA, PROXMOX_CONFIG_DIGEST_SCHEMA,
};
use pbs_client::view_task_result;
use proxmox_router::{cli::*, ApiHandler, RpcEnvironment};
@@ -323,6 +324,100 @@ async fn uuid_mount(mut param: Value, _rpcenv: &mut dyn RpcEnvironment) -> Resul
Ok(Value::Null)
}
+#[api(
+ protected: true,
+ input: {
+ properties: {
+ store: {
+ schema: DATASTORE_SCHEMA,
+ },
+ ns: {
+ type: BackupNamespace,
+ },
+ "target-ns": {
+ type: BackupNamespace,
+ },
+ "max-depth": {
+ schema: NS_MAX_DEPTH_SCHEMA,
+ optional: true,
+ },
+ "delete-source": {
+ type: bool,
+ optional: true,
+ default: true,
+ description: "Remove the source namespace after moving all contents. \
+ Defaults to true.",
+ },
+ "merge-groups": {
+ type: bool,
+ optional: true,
+ default: true,
+ description: "If a group with the same name already exists in the target \
+ namespace, merge snapshots into it. Requires matching ownership and \
+ non-overlapping snapshot times.",
+ },
+ },
+ },
+)]
+/// Move a backup namespace to a new location within the same datastore.
+async fn cli_move_namespace(
+ mut param: Value,
+ rpcenv: &mut dyn RpcEnvironment,
+) -> Result<(), Error> {
+ param["node"] = "localhost".into();
+
+ let info = &api2::admin::namespace::API_METHOD_MOVE_NAMESPACE;
+ let result = match info.handler {
+ ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
+ _ => unreachable!(),
+ };
+
+ crate::wait_for_local_worker(result.as_str().unwrap()).await?;
+ Ok(())
+}
+
+#[api(
+ protected: true,
+ input: {
+ properties: {
+ store: {
+ schema: DATASTORE_SCHEMA,
+ },
+ ns: {
+ type: BackupNamespace,
+ },
+ group: {
+ type: pbs_api_types::BackupGroup,
+ flatten: true,
+ },
+ "target-ns": {
+ type: BackupNamespace,
+ },
+ "merge-group": {
+ type: bool,
+ optional: true,
+ default: true,
+ description: "If the group already exists in the target namespace, merge \
+ snapshots into it. Requires matching ownership and non-overlapping \
+ snapshot times.",
+ },
+ },
+ },
+)]
+/// Move a backup group to a different namespace within the same datastore.
+async fn cli_move_group(mut param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<(), Error> {
+ param["node"] = "localhost".into();
+
+ let info = &api2::admin::datastore::API_METHOD_MOVE_GROUP;
+ let result = match info.handler {
+ ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
+ _ => unreachable!(),
+ };
+
+ crate::wait_for_local_worker(result.as_str().unwrap()).await?;
+ Ok(())
+}
+
#[api(
protected: true,
input: {
@@ -407,6 +502,22 @@ pub fn datastore_commands() -> CommandLineInterface {
CliCommand::new(&API_METHOD_DELETE_DATASTORE)
.arg_param(&["name"])
.completion_cb("name", pbs_config::datastore::complete_datastore_name),
+ )
+ .insert(
+ "move-namespace",
+ CliCommand::new(&API_METHOD_CLI_MOVE_NAMESPACE)
+ .arg_param(&["store"])
+ .completion_cb("store", pbs_config::datastore::complete_datastore_name)
+ .completion_cb("ns", crate::complete_sync_local_datastore_namespace)
+ .completion_cb("target-ns", crate::complete_sync_local_datastore_namespace),
+ )
+ .insert(
+ "move-group",
+ CliCommand::new(&API_METHOD_CLI_MOVE_GROUP)
+ .arg_param(&["store"])
+ .completion_cb("store", pbs_config::datastore::complete_datastore_name)
+ .completion_cb("ns", crate::complete_sync_local_datastore_namespace)
+ .completion_cb("target-ns", crate::complete_sync_local_datastore_namespace),
);
cmd_def.into()
--
2.47.3
^ permalink raw reply [flat|nested] 21+ messages in thread* Re: [PATCH proxmox-backup v8 00/13] fixes #6195: add support for moving groups and namespaces
2026-04-22 13:39 [PATCH proxmox-backup v8 00/13] fixes #6195: add support for moving groups and namespaces Hannes Laimer
` (12 preceding siblings ...)
2026-04-22 13:39 ` [PATCH proxmox-backup v8 13/13] cli: add move-namespace and move-group commands Hannes Laimer
@ 2026-04-23 16:29 ` Michael Köppl
2026-04-23 22:38 ` applied: " Thomas Lamprecht
14 siblings, 0 replies; 21+ messages in thread
From: Michael Köppl @ 2026-04-23 16:29 UTC (permalink / raw)
To: Hannes Laimer, pbs-devel
Had a closer look at this series. Tested the following:
- generally moving group as well as namespaces in various ways
- checked that the merge invariants hold by explicitly creating groups
where snapshot times overlap and that moving is not possible if
ownership does not match
- checked with an s3-backed datastore that moving notes can be repeated
in case any of the steps fail (made the rename of the notes file fail
by making the file immutable) and also that the source stays intact
- ran moves concurrently to see if I'd run into problems w.r.t. where
the group is moved or if any data is lost
Noticed the following:
- The values of unchecked checkboxes aren't sent to the backend,
resulting in them being interpreted as set to true (the default).
Left comments on the affected patches.
Using proxmox-backup-manager like this:
`proxmox-backup-manager datastore move-namespace
<datastore> --ns ns2 --target-ns ns1`
generally to cause problems. There's nothing really telling me that
anything didn't work in its output, but doing this resulted in the
following problems for me:
- When moving a namespace with groups in it as described above, I'm
afterwards unable to delete the moved group (in its new namespace) or
the new namespace containing it because the removal of the snapshot
fails because the snapshot lock seemingly cannot be acquired.
- Given namespaces ns1, ns2, ns2/a with a group in ns2/a, when using
`proxmox-backup-manager datastore move-namespace
<datastore> --ns ns2 --target-ns ns1`
the resulting ns1/a namespace did not show the group in ns1/a for me
until many seconds (~15-20s) later even though the log suggests
everything is done:
2026-04-23T17:52:50+02:00: moving namespace 'ns2' -> 'ns1': 2 namespaces
2026-04-23T17:52:50+02:00: moving group 'ct/104' from 'ns2/a' to 'ns1/a'
2026-04-23T17:52:50+02:00: removing backup group "/datastore/test/ns/ns2/ns/a/ct/104"
2026-04-23T17:52:50+02:00: moved group 1 ('ct/104' in 'ns2/a')
2026-04-23T17:52:50+02:00: TASK OK
Also tested this with `--ns ns2 --target-ns ns1/ns2` and it happens
there as well.
- It's possible to use namespaces that don't exist yet for --target-ns.
I suppose it's on purpose, but I could imagine that it's frustrating
if users want to move a namespace, have a typo in their --target-ns
and then the content of the namespace is in different namespace than
they intended. Wanted to note that since the UI also doesn't allow
using namespaces that don't exist.
On Wed Apr 22, 2026 at 3:39 PM CEST, Hannes Laimer wrote:
> Add support for moving backup groups and entire namespace subtrees to
> a different location within the same datastore.
>
> Groups are moved with exclusive per-group and per-snapshot locking.
> For S3, objects are copied to the target prefix before deleting the
> source. Namespace moves process groups individually, deferring and
> retrying lock conflicts once, so partially failed moves can be
> completed with move-group.
>
>
> v8, thanks @Chris and @Fabian:
> - shared check_move_group/check_move_namespace
> - have BackupGroup::destroy consume the group lock so move can hand over
> cleanup instead of duplicating it
> - split remove_namespace into flat and recursive variants, and lift the
> empty-type-dir cleanup so the flat path prunes after move
> - lift check_namespace_depth_limit into pbs-datastore
> - move snapshot-move to BackupDir::move_to with typed target and
> same-store assertion
> - check_merge_invariants: take the target group directly, stream
> min/first-overlap instead of collecting, include both namespaces in
> the error
> - reuse create_locked_backup_group (new non-pub variant) for the target
> group, differentiating soft/hard errors
> - rename MoveGroupError -> BackupGroupOpError
> - move_notes_to: S3 upload first, local rename second, with upload
> failures propagated as fatal instead of warn+continue
> - source-ns cleanup: distinguish depth-limited (silent) from concurrent
> (warn, named) children
> - iter_backup_groups errors recorded per-ns instead of aborting
> - running moved-group counter in the task log, final summary splits
> fatal from lock(rerun may succeed)
> - docs: fix merge-groups default, document permission asymmetry, note
> stacked destructive defaults
> - cli: --ns, --target-ns completion
> - new patch 5/13, thanks @Chris!: fix data-loss race between
> move-group/move-namespace and a concurrent GC phase 1 with a
> per-datastore write-ahead journal at
> /run/proxmox-backup/locks/<datastore>/move-journal
>
> v7, thanks @Fabian and @Arthur!:
> - allow moving into existing target namespaces
> - merge groups with the same name when owner matches and source
> snapshots are strictly newer than target snapshots
> - lock and move snapshots in batches(512) to avoid FD exhaustion
> - assert new-style locking upfront
> - pre-create target group dir with source owner
> - use remove_dir for source cleanup, log errors instead of swallowing
> - rename 'new-ns' to 'target-ns', use dedicated POST endpoints
> /move-group and /move-namespace
> - relax move-group permissions: DATASTORE_PRUNE + ownership on
> source, DATASTORE_BACKUP + ownership on target
> - add max-depth, delete-source, and merge-group/merge-groups flags
> - add docs section and online help link for move dialogs
> - make ns and target-ns required in CLI, no need to keep the
> empty -> root thing since this is new
>
> v6, thanks @Fabian and @Dominik!:
> - drop ns locks, lock everything directly, like we do for delete
> - ui: disable prune for empty groups
> - ui: dont render verification status for empty groups
>
> v5, thanks @Chris!:
> - lock dir instead of .ns-lock file
> - improve cleanup of partially failed s3 moves
> - ui: show empty groups, re-order actions, add context menu
> - add cli commands for both ns and group moves
>
>
>
> Hannes Laimer (13):
> ui: show empty groups
> datastore: lift check_namespace_depth_limit to pbs-datastore
> datastore: have BackupGroup::destroy consume the group lock
> datastore: split remove_namespace into flat and recursive variants
> datastore: add move journal for coordinating with gc phase 1
> datastore: add move-group
> datastore: add move-namespace
> docs: add section on moving namespaces and groups
> api: add POST endpoint for move-group
> api: add POST endpoint for move-namespace
> ui: add move group action
> ui: add move namespace action
> cli: add move-namespace and move-group commands
>
> docs/storage.rst | 60 +-
> pbs-datastore/src/backup_info.rs | 269 ++++++-
> pbs-datastore/src/datastore.rs | 732 ++++++++++++++++++--
> pbs-datastore/src/lib.rs | 7 +-
> pbs-datastore/src/move_journal.rs | 149 ++++
> src/api2/admin/datastore.rs | 94 +++
> src/api2/admin/namespace.rs | 85 ++-
> src/bin/proxmox_backup_manager/datastore.rs | 113 ++-
> src/server/pull.rs | 16 +-
> src/server/push.rs | 7 +-
> src/server/sync.rs | 21 -
> www/Makefile | 2 +
> www/OnlineHelpInfo.js | 4 +
> www/datastore/Content.js | 167 ++++-
> www/form/NamespaceSelector.js | 11 +
> www/window/GroupMove.js | 71 ++
> www/window/NamespaceMove.js | 136 ++++
> 17 files changed, 1792 insertions(+), 152 deletions(-)
> create mode 100644 pbs-datastore/src/move_journal.rs
> create mode 100644 www/window/GroupMove.js
> create mode 100644 www/window/NamespaceMove.js
^ permalink raw reply [flat|nested] 21+ messages in thread* applied: [PATCH proxmox-backup v8 00/13] fixes #6195: add support for moving groups and namespaces
2026-04-22 13:39 [PATCH proxmox-backup v8 00/13] fixes #6195: add support for moving groups and namespaces Hannes Laimer
` (13 preceding siblings ...)
2026-04-23 16:29 ` [PATCH proxmox-backup v8 00/13] fixes #6195: add support for moving groups and namespaces Michael Köppl
@ 2026-04-23 22:38 ` Thomas Lamprecht
2026-04-24 8:31 ` Fabian Grünbichler
14 siblings, 1 reply; 21+ messages in thread
From: Thomas Lamprecht @ 2026-04-23 22:38 UTC (permalink / raw)
To: pbs-devel, Hannes Laimer
On Wed, 22 Apr 2026 15:39:38 +0200, Hannes Laimer wrote:
> Add support for moving backup groups and entire namespace subtrees to
> a different location within the same datastore.
>
> Groups are moved with exclusive per-group and per-snapshot locking.
> For S3, objects are copied to the target prefix before deleting the
> source. Namespace moves process groups individually, deferring and
> retrying lock conflicts once, so partially failed moves can be
> completed with move-group.
>
> [...]
Applied, thanks!
Squashed in the relatively trivial fixes for what Michael found, but I really
could not find any issue with locking after multiple looks, so either that came
from some side effect in testing or we'd need more info for a good reproducer.
[01/13] ui: show empty groups
commit: f3cc03637f70a938808ccb20edbc9def1254a16e
[02/13] datastore: lift check_namespace_depth_limit to pbs-datastore
commit: 650204594b49107578ccfb595cfa2a95ac53a02c
[03/13] datastore: have BackupGroup::destroy consume the group lock
commit: e9196f3260cb9fc1637af0cdd07a1d9dd551baf2
[04/13] datastore: split remove_namespace into flat and recursive variants
commit: 0e21312aff8caf20addc074067457d4f01e296e5
[05/13] datastore: add move journal for coordinating with gc phase 1
commit: a02cad5e602b9ac23a3e06ff4b7294bed8ac9089
[06/13] datastore: add move-group
commit: 303917ccd7f3f65491807ab34c661879f4d7f2bf
[07/13] datastore: add move-namespace
commit: e2d8f1e87e005b7a3d98d1b5d2f4222db0a9cc64
[08/13] docs: add section on moving namespaces and groups
commit: 4f8e1c9cc7437473665a283b7a4e5a2fa6e9b2ef
[09/13] api: add POST endpoint for move-group
commit: 1182f0b0976d6b23e205c89af60979ba84168e78
[10/13] api: add POST endpoint for move-namespace
commit: 2b8f5044ec74cb108180ba4f438aee9953baf337
[11/13] ui: add move group action
commit: 20ee63d174f736f0da6e4b672b14d3b5e6b9715a
[12/13] ui: add move namespace action
commit: aa3983e0685ddede540b7b64fb496c67e59a7a06
[13/13] cli: add move-namespace and move-group commands
commit: 4d74e20dc03cafa2c5978bb8d9e3edff7131872c
^ permalink raw reply [flat|nested] 21+ messages in thread* Re: applied: [PATCH proxmox-backup v8 00/13] fixes #6195: add support for moving groups and namespaces
2026-04-23 22:38 ` applied: " Thomas Lamprecht
@ 2026-04-24 8:31 ` Fabian Grünbichler
2026-04-24 8:43 ` Hannes Laimer
0 siblings, 1 reply; 21+ messages in thread
From: Fabian Grünbichler @ 2026-04-24 8:31 UTC (permalink / raw)
To: Hannes Laimer, pbs-devel, Thomas Lamprecht
On April 24, 2026 12:38 am, Thomas Lamprecht wrote:
> On Wed, 22 Apr 2026 15:39:38 +0200, Hannes Laimer wrote:
>> Add support for moving backup groups and entire namespace subtrees to
>> a different location within the same datastore.
>>
>> Groups are moved with exclusive per-group and per-snapshot locking.
>> For S3, objects are copied to the target prefix before deleting the
>> source. Namespace moves process groups individually, deferring and
>> retrying lock conflicts once, so partially failed moves can be
>> completed with move-group.
>>
>> [...]
>
> Applied, thanks!
>
> Squashed in the relatively trivial fixes for what Michael found, but I really
> could not find any issue with locking after multiple looks, so either that came
> from some side effect in testing or we'd need more info for a good reproducer.
I can reproduce this easily, and the cause is that the
proxmox-backup-manager CLI handlers directly execute the API endpoints,
instead of going via the proxy..
that causes some parent dirs in the locking hiearchy to have the wrong
ownership (755 root:root instead of 755 backup:backup), which then turns
into
failed to delete snapshot - Error { context: "while destroying snapshot \'BackupDir { store: \"test\", ns: BackupNamespace { inner: [\"foobar\"], len: 7 }, dir: BackupDir { group: BackupGroup { ty: Host, id: \"bookworm\" }, time: 1674040234 }, backup_time_string: \"2023-01-18T11:10:34Z\" }\'", source: Error { context: "unable to acquire snapshot lock \"/run/proxmox-backup/locks/test/foobar/host-bookworm-2023\\\\x2d01\\\\x2d18T11\\\\x3a10\\\\x3a34Z\"", source: "mkstemp \"/run/proxmox-backup/locks/test/foobar/host-bookworm-2023\\\\x2d01\\\\x2d18T11\\\\x3a10\\\\x3a34Z.tmp_XXXXXX\" failed: EACCES: Permission denied", }, } (400)
because it's not possible for the user backup to create new lock files
in that directory.. cleared by a reboot or recursive chown, and fixed by
either moving those commands to the client, or letting manager connect
to the API.
it's also a bit weird that
- create namespace
- list namespaces
- delete namespace
for namespaces are all in proxmox-backup-client, but moving a namespace
is only available in proxmox-backup-manager which lacks all of the
above..
similarly, interactions with groups are in the client:
- create snapshot (and thus implictly groups)
- forget snapshots by pruning a group
- forget individual snapshots
- forgetting a whole group
- listing snapshots (and thus groups)
- changing group ownership
but moving a group is only available in proxmox-backup-manager again,
which has no other group-level commands AFAICT?
^ permalink raw reply [flat|nested] 21+ messages in thread* Re: applied: [PATCH proxmox-backup v8 00/13] fixes #6195: add support for moving groups and namespaces
2026-04-24 8:31 ` Fabian Grünbichler
@ 2026-04-24 8:43 ` Hannes Laimer
0 siblings, 0 replies; 21+ messages in thread
From: Hannes Laimer @ 2026-04-24 8:43 UTC (permalink / raw)
To: Fabian Grünbichler, pbs-devel, Thomas Lamprecht
On 2026-04-24 10:30, Fabian Grünbichler wrote:
> On April 24, 2026 12:38 am, Thomas Lamprecht wrote:
>> On Wed, 22 Apr 2026 15:39:38 +0200, Hannes Laimer wrote:
>>> Add support for moving backup groups and entire namespace subtrees to
>>> a different location within the same datastore.
>>>
>>> Groups are moved with exclusive per-group and per-snapshot locking.
>>> For S3, objects are copied to the target prefix before deleting the
>>> source. Namespace moves process groups individually, deferring and
>>> retrying lock conflicts once, so partially failed moves can be
>>> completed with move-group.
>>>
>>> [...]
>>
>> Applied, thanks!
>>
>> Squashed in the relatively trivial fixes for what Michael found, but I really
>> could not find any issue with locking after multiple looks, so either that came
>> from some side effect in testing or we'd need more info for a good reproducer.
>
> I can reproduce this easily, and the cause is that the
> proxmox-backup-manager CLI handlers directly execute the API endpoints,
> instead of going via the proxy..
>
> that causes some parent dirs in the locking hiearchy to have the wrong
> ownership (755 root:root instead of 755 backup:backup), which then turns
> into
>
> failed to delete snapshot - Error { context: "while destroying snapshot \'BackupDir { store: \"test\", ns: BackupNamespace { inner: [\"foobar\"], len: 7 }, dir: BackupDir { group: BackupGroup { ty: Host, id: \"bookworm\" }, time: 1674040234 }, backup_time_string: \"2023-01-18T11:10:34Z\" }\'", source: Error { context: "unable to acquire snapshot lock \"/run/proxmox-backup/locks/test/foobar/host-bookworm-2023\\\\x2d01\\\\x2d18T11\\\\x3a10\\\\x3a34Z\"", source: "mkstemp \"/run/proxmox-backup/locks/test/foobar/host-bookworm-2023\\\\x2d01\\\\x2d18T11\\\\x3a10\\\\x3a34Z.tmp_XXXXXX\" failed: EACCES: Permission denied", }, } (400)
>
> because it's not possible for the user backup to create new lock files
> in that directory.. cleared by a reboot or recursive chown, and fixed by
> either moving those commands to the client, or letting manager connect
> to the API.
>
> it's also a bit weird that
> - create namespace
> - list namespaces
> - delete namespace
>
> for namespaces are all in proxmox-backup-client, but moving a namespace
> is only available in proxmox-backup-manager which lacks all of the
> above..
>
> similarly, interactions with groups are in the client:
> - create snapshot (and thus implictly groups)
> - forget snapshots by pruning a group
> - forget individual snapshots
> - forgetting a whole group
> - listing snapshots (and thus groups)
> - changing group ownership
>
> but moving a group is only available in proxmox-backup-manager again,
> which has no other group-level commands AFAICT?
right, these shouldn't be in the manager. I'll send a fixup moving them!
that should also fix the permissions problem, thanks!
^ permalink raw reply [flat|nested] 21+ messages in thread