From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [212.224.123.68]) by lore.proxmox.com (Postfix) with ESMTPS id E99961FF13B for ; Wed, 11 Mar 2026 16:13:42 +0100 (CET) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id 39EDB3020B; Wed, 11 Mar 2026 16:13:38 +0100 (CET) From: Hannes Laimer To: pbs-devel@lists.proxmox.com Subject: [PATCH proxmox-backup v4 3/7] datastore: add move_namespace Date: Wed, 11 Mar 2026 16:13:11 +0100 Message-ID: <20260311151315.133637-4-h.laimer@proxmox.com> X-Mailer: git-send-email 2.47.3 In-Reply-To: <20260311151315.133637-1-h.laimer@proxmox.com> References: <20260311151315.133637-1-h.laimer@proxmox.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Bm-Milter-Handled: 55990f41-d878-4baa-be0a-ee34c49e34d2 X-Bm-Transport-Timestamp: 1773241972377 X-SPAM-LEVEL: Spam detection results: 0 AWL 0.068 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record Message-ID-Hash: K37N5AC6BB2EPA7MUAYMPNEFDXEJU6DU X-Message-ID-Hash: K37N5AC6BB2EPA7MUAYMPNEFDXEJU6DU X-MailFrom: h.laimer@proxmox.com X-Mailman-Rule-Misses: dmarc-mitigation; no-senders; approved; loop; banned-address; emergency; member-moderation; nonmember-moderation; administrivia; implicit-dest; max-recipients; max-size; news-moderation; no-subject; digests; suspicious-header X-Mailman-Version: 3.3.10 Precedence: list List-Id: Proxmox Backup Server development discussion List-Help: List-Owner: List-Post: List-Subscribe: List-Unsubscribe: move_namespace relocates an entire namespace subtree (the given namespace, all child namespaces, and their groups) to a new location within the same datastore. For the filesystem backend the entire subtree is relocated with a single atomic rename. For the S3 backend groups are moved one at a time via BackupGroup::move_to(). Groups that fail are left at the source and listed as an error in the task log so they can be retried with move_group individually. Source namespaces where all groups succeeded have their S3 markers and local cache directories removed, deepest-first. Signed-off-by: Hannes Laimer --- pbs-datastore/src/datastore.rs | 216 ++++++++++++++++++++++++++++++++- 1 file changed, 215 insertions(+), 1 deletion(-) diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs index 51813acb..81066faf 100644 --- a/pbs-datastore/src/datastore.rs +++ b/pbs-datastore/src/datastore.rs @@ -31,7 +31,7 @@ use pbs_api_types::{ ArchiveType, Authid, BackupGroupDeleteStats, BackupNamespace, BackupType, ChunkOrder, DataStoreConfig, DatastoreBackendConfig, DatastoreBackendType, DatastoreFSyncLevel, DatastoreTuning, GarbageCollectionCacheStats, GarbageCollectionStatus, MaintenanceMode, - MaintenanceType, Operation, UPID, + MaintenanceType, Operation, MAX_NAMESPACE_DEPTH, UPID, }; use pbs_config::s3::S3_CFG_TYPE_ID; use pbs_config::{BackupLockGuard, ConfigVersionCache}; @@ -1001,6 +1001,220 @@ impl DataStore { Ok((removed_all_requested, stats)) } + /// Move a backup namespace (including all child namespaces and groups) to a new location. + /// + /// The entire subtree rooted at `source_ns` is relocated to `target_ns`. Exclusive namespace + /// locks are held on both source and target namespaces for the duration to block concurrent + /// readers and writers. + /// + /// For the filesystem backend the rename is atomic. For the S3 backend groups are moved + /// one at a time. A group that fails to copy is left at the source and can be moved + /// individually with `move_group`. The operation returns an error listing any such groups. + /// + /// Fails if: + /// - `source_ns` is the root namespace + /// - `source_ns` == `target_ns` + /// - `source_ns` does not exist + /// - `target_ns` already exists (to prevent silent merging) + /// - `target_ns`'s parent does not exist + /// - `source_ns` is an ancestor of `target_ns` + /// - the move would exceed the maximum namespace depth + pub fn move_namespace( + self: &Arc, + source_ns: &BackupNamespace, + target_ns: &BackupNamespace, + ) -> Result<(), Error> { + if source_ns.is_root() { + bail!("cannot move root namespace"); + } + if source_ns == target_ns { + bail!("source and target namespace must be different"); + } + + // lock_namespace also acquires shared locks on all ancestors, so a concurrent + // move_namespace on a child namespace (which also walks up to ancestors) will be blocked + // by our exclusive lock, and we will be blocked by any in-progress move's targeting one of + // our ancestors. + let _source_ns_guard = lock_namespace(self.name(), source_ns) + .with_context(|| format!("failed to lock source namespace '{source_ns}' for move"))?; + // Lock target_ns to prevent two concurrent moves from racing to create the same target. + let _target_ns_guard = lock_namespace(self.name(), target_ns) + .with_context(|| format!("failed to lock target namespace '{target_ns}' for move"))?; + + if !self.namespace_exists(source_ns) { + bail!("source namespace '{source_ns}' does not exist"); + } + if self.namespace_exists(target_ns) { + bail!("target namespace '{target_ns}' already exists"); + } + let target_parent = target_ns.parent(); + if !self.namespace_exists(&target_parent) { + bail!("target namespace parent '{target_parent}' does not exist"); + } + if source_ns.contains(target_ns).is_some() { + bail!( + "cannot move namespace '{source_ns}' into its own subtree (target: '{target_ns}')" + ); + } + + let all_source_ns: Vec = self + .recursive_iter_backup_ns(source_ns.clone())? + .collect::, Error>>()?; + + let all_source_groups: Vec = all_source_ns + .iter() + .map(|ns| self.iter_backup_groups(ns.clone())) + .collect::, Error>>()? + .into_iter() + .flatten() + .collect::, Error>>()?; + + let subtree_depth = all_source_ns + .iter() + .map(BackupNamespace::depth) + .max() + .map_or(0, |d| d - source_ns.depth()); + if subtree_depth + target_ns.depth() > MAX_NAMESPACE_DEPTH { + bail!( + "move would exceed maximum namespace depth \ + ({subtree_depth}+{} > {MAX_NAMESPACE_DEPTH})", + target_ns.depth(), + ); + } + + let backend = self.backend()?; + + log::info!( + "moving namespace '{source_ns}' -> '{target_ns}': {} namespaces, {} groups", + all_source_ns.len(), + all_source_groups.len(), + ); + + match &backend { + DatastoreBackend::Filesystem => { + let src_path = self.namespace_path(source_ns); + let dst_path = self.namespace_path(target_ns); + if let Some(dst_parent) = dst_path.parent() { + std::fs::create_dir_all(dst_parent).with_context(|| { + format!("failed to create parent directory for namespace rename '{source_ns}' -> '{target_ns}'") + })?; + } + log::debug!("renaming namespace directory '{src_path:?}' -> '{dst_path:?}'"); + std::fs::rename(&src_path, &dst_path).with_context(|| { + format!("failed to rename namespace directory '{source_ns}' -> '{target_ns}'") + })?; + } + DatastoreBackend::S3(s3_client) => { + // Create target local namespace directories upfront (covers empty namespaces). + for ns in &all_source_ns { + let target_child = ns.map_prefix(source_ns, target_ns)?; + std::fs::create_dir_all(self.namespace_path(&target_child)).with_context( + || { + format!( + "failed to create local dir for target namespace '{target_child}'" + ) + }, + )?; + } + + // Create S3 namespace markers for all target namespaces. + for ns in &all_source_ns { + let target_child = ns.map_prefix(source_ns, target_ns)?; + let object_key = crate::s3::object_key_from_path( + &target_child.path(), + NAMESPACE_MARKER_FILENAME, + ) + .context("invalid namespace marker object key")?; + log::debug!( + "creating S3 namespace marker for '{target_child}': {object_key:?}" + ); + proxmox_async::runtime::block_on( + s3_client.upload_no_replace_with_retry(object_key, Bytes::from("")), + ) + .context("failed to create namespace marker on S3 backend")?; + } + + // Move each group. Failed groups are skipped and remain at the source in + // both S3 and local cache. Collect the namespaces of any failed groups so we + // know which source namespaces still have content after the loop. + let mut failed_groups: Vec = Vec::new(); + let mut failed_ns: HashSet = HashSet::new(); + + for group in &all_source_groups { + let target_group_ns = group.backup_ns().map_prefix(source_ns, target_ns)?; + + // Ensure the target type directory exists before move_to renames into it. + if let Err(err) = + std::fs::create_dir_all(self.type_path(&target_group_ns, group.group().ty)) + { + warn!( + "move_namespace: failed to create type dir for '{}' in '{}': {err:#}", + group.group(), + target_group_ns + ); + failed_groups.push(group.group().to_string()); + failed_ns.insert(group.backup_ns().clone()); + continue; + } + + if let Err(err) = group.move_to(&target_group_ns, &backend) { + warn!( + "move_namespace: failed to move group '{}' from '{}' to '{}': {err:#}", + group.group(), + group.backup_ns(), + target_group_ns + ); + failed_groups.push(group.group().to_string()); + failed_ns.insert(group.backup_ns().clone()); + } + } + + // Clean up source namespaces that are now fully empty (all groups moved). + // Process deepest-first so parent directories are already empty when reached. + for ns in all_source_ns.iter().rev() { + // Skip if this namespace itself or any descendant still has groups. + let has_remaining = failed_ns + .iter() + .any(|fns| fns == ns || ns.contains(fns).is_some()); + if has_remaining { + continue; + } + + // Delete the source S3 namespace marker. + let object_key = + crate::s3::object_key_from_path(&ns.path(), NAMESPACE_MARKER_FILENAME) + .context("invalid namespace marker object key")?; + log::debug!("deleting source S3 namespace marker for '{ns}': {object_key:?}"); + proxmox_async::runtime::block_on(s3_client.delete_object(object_key)) + .context("failed to delete source namespace marker on S3 backend")?; + + // Remove the source local cache directory. Try type subdirectories first + // (they should be empty after the per-group renames), then the namespace dir. + let ns_path = self.namespace_path(ns); + if let Ok(entries) = std::fs::read_dir(&ns_path) { + for entry in entries.flatten() { + let _ = std::fs::remove_dir(entry.path()); + } + } + let _ = std::fs::remove_dir(&ns_path); + } + + if !failed_groups.is_empty() { + bail!( + "namespace move partially completed; {} group(s) could not be moved \ + and remain at source '{}': {}. \ + Use move group to move them individually.", + failed_groups.len(), + source_ns, + failed_groups.join(", ") + ); + } + } + } + + Ok(()) + } + /// Remove a complete backup group including all snapshots. /// /// Returns `BackupGroupDeleteStats`, containing the number of deleted snapshots -- 2.47.3