From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [IPv6:2a01:7e0:0:424::9]) by lore.proxmox.com (Postfix) with ESMTPS id 1FE7F1FF13B for ; Wed, 08 Apr 2026 09:35:39 +0200 (CEST) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id 9E4E92106; Wed, 8 Apr 2026 09:36:14 +0200 (CEST) Date: Wed, 08 Apr 2026 09:35:37 +0200 From: Fabian =?iso-8859-1?q?Gr=FCnbichler?= Subject: Re: [PATCH proxmox-backup v6 4/8] api: add PUT endpoint for move_group To: Hannes Laimer , pbs-devel@lists.proxmox.com References: <20260331123409.198353-1-h.laimer@proxmox.com> <20260331123409.198353-5-h.laimer@proxmox.com> In-Reply-To: <20260331123409.198353-5-h.laimer@proxmox.com> MIME-Version: 1.0 User-Agent: astroid/0.17.0 (https://github.com/astroidmail/astroid) Message-Id: <1775632797.wefshqb4o4.astroid@yuna.none> Content-Type: text/plain; charset=utf-8 Content-Transfer-Encoding: quoted-printable X-Bm-Milter-Handled: 55990f41-d878-4baa-be0a-ee34c49e34d2 X-Bm-Transport-Timestamp: 1775633673946 X-SPAM-LEVEL: Spam detection results: 0 AWL -0.097 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment POISEN_SPAM_PILL 0.1 Meta: its spam POISEN_SPAM_PILL_1 0.1 random spam to be learned in bayes POISEN_SPAM_PILL_3 0.1 random spam to be learned in bayes RCVD_IN_VALIDITY_CERTIFIED_BLOCKED 0.001 ADMINISTRATOR NOTICE: The query to Validity was blocked. See https://knowledge.validity.com/hc/en-us/articles/20961730681243 for more information. RCVD_IN_VALIDITY_RPBL_BLOCKED 0.001 ADMINISTRATOR NOTICE: The query to Validity was blocked. See https://knowledge.validity.com/hc/en-us/articles/20961730681243 for more information. RCVD_IN_VALIDITY_SAFE_BLOCKED 0.001 ADMINISTRATOR NOTICE: The query to Validity was blocked. See https://knowledge.validity.com/hc/en-us/articles/20961730681243 for more information. SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record Message-ID-Hash: LETJ3DV7BXQ7PDCLLH6GAONY5HUORXYM X-Message-ID-Hash: LETJ3DV7BXQ7PDCLLH6GAONY5HUORXYM X-MailFrom: f.gruenbichler@proxmox.com X-Mailman-Rule-Misses: dmarc-mitigation; no-senders; approved; loop; banned-address; emergency; member-moderation; nonmember-moderation; administrivia; implicit-dest; max-recipients; max-size; news-moderation; no-subject; digests; suspicious-header X-Mailman-Version: 3.3.10 Precedence: list List-Id: Proxmox Backup Server development discussion List-Help: List-Owner: List-Post: List-Subscribe: List-Unsubscribe: On March 31, 2026 2:34 pm, Hannes Laimer wrote: > Add a PUT handler on /admin/datastore/{store}/groups to move a single > backup group to a different namespace within the same datastore. The > handler performs fast pre-checks synchronously and spawns a worker > task for the actual move. >=20 > Requires DATASTORE_MODIFY on both the source and target namespaces. >=20 > Signed-off-by: Hannes Laimer > --- > src/api2/admin/datastore.rs | 78 ++++++++++++++++++++++++++++++++++++- > 1 file changed, 77 insertions(+), 1 deletion(-) >=20 > diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs > index cca34055..68b1bbfc 100644 > --- a/src/api2/admin/datastore.rs > +++ b/src/api2/admin/datastore.rs > @@ -69,7 +69,9 @@ use proxmox_rest_server::{formatter, worker_is_active, = WorkerTask}; > =20 > use crate::api2::backup::optional_ns_param; > use crate::api2::node::rrd::create_value_from_rrd; > -use crate::backup::{check_ns_privs_full, ListAccessibleBackupGroups, Ver= ifyWorker, NS_PRIVS_OK}; > +use crate::backup::{ > + check_ns_privs, check_ns_privs_full, ListAccessibleBackupGroups, Ver= ifyWorker, NS_PRIVS_OK, > +}; > use crate::server::jobstate::{compute_schedule_status, Job, JobState}; > use crate::tools::{backup_info_to_snapshot_list_item, get_all_snapshot_f= iles, read_backup_index}; > =20 > @@ -278,6 +280,79 @@ pub async fn delete_group( > .await? > } > =20 > +#[api( > + input: { > + properties: { > + store: { schema: DATASTORE_SCHEMA }, > + ns: { > + type: BackupNamespace, > + optional: true, > + }, > + group: { > + type: pbs_api_types::BackupGroup, > + flatten: true, > + }, > + "new-ns": { > + type: BackupNamespace, > + optional: true, this isn't a new namespace, it's the target (like in the error messages below) or destination namespace. > + }, > + }, > + }, > + returns: { > + schema: UPID_SCHEMA, > + }, > + access: { > + permission: &Permission::Anybody, > + description: "Requires DATASTORE_MODIFY on both the source and t= arget namespace.", this is a quite high requirement, wouldn't it be enough to be allowed to delete the group in the source and create it in the target? i.e.: source: DATASTORE_MODIFY or DATASTORE_PRUNE+group ownership target: DATASTORE_MODIFY or DATASTORE_BACKUP+group ownership > + }, > +)] > +/// Move a backup group to a different namespace within the same datasto= re. > +pub fn move_group( > + store: String, > + ns: Option, > + group: pbs_api_types::BackupGroup, > + new_ns: Option, > + rpcenv: &mut dyn RpcEnvironment, > +) -> Result { > + let auth_id: Authid =3D rpcenv.get_auth_id().unwrap().parse()?; > + let ns =3D ns.unwrap_or_default(); > + let new_ns =3D new_ns.unwrap_or_default(); > + > + check_ns_privs(&store, &ns, &auth_id, PRIV_DATASTORE_MODIFY)?; > + check_ns_privs(&store, &new_ns, &auth_id, PRIV_DATASTORE_MODIFY)?; > + > + let datastore =3D DataStore::lookup_datastore(&store, Operation::Wri= te)?; > + > + // Best-effort pre-checks for a fast synchronous error before spawni= ng a worker. > + if ns =3D=3D new_ns { > + bail!("source and target namespace must be different"); > + } > + if !datastore.namespace_exists(&new_ns) { > + bail!("target namespace '{new_ns}' does not exist"); > + } > + let source_group =3D datastore.backup_group(ns.clone(), group.clone(= )); > + if !source_group.exists() { > + bail!("group '{group}' does not exist in namespace '{ns}'"); > + } > + let target_group =3D datastore.backup_group(new_ns.clone(), group.cl= one()); > + if target_group.exists() { > + bail!("group '{group}' already exists in target namespace '{new_= ns}'"); > + } > + > + let worker_id =3D format!("{store}:{ns}:{group}"); should we encode the target NS as well, to allow filtering on both ends? > + let to_stdout =3D rpcenv.env_type() =3D=3D RpcEnvironmentType::CLI; > + > + let upid_str =3D WorkerTask::new_thread( > + "move-group", > + Some(worker_id), > + auth_id.to_string(), > + to_stdout, > + move |_worker| datastore.move_group(&ns, &group, &new_ns), > + )?; > + > + Ok(json!(upid_str)) > +} > + > #[api( > input: { > properties: { > @@ -2828,6 +2903,7 @@ const DATASTORE_INFO_SUBDIRS: SubdirMap =3D &[ > "groups", > &Router::new() > .get(&API_METHOD_LIST_GROUPS) > + .put(&API_METHOD_MOVE_GROUP) I am not sure this is a good fit.. why not make it a separate endpoint? either a single one for both move operations, or two endpoints? the whole (pre-existing) structure here is a bit weird, because it's a mix and match of datatore level, namespace level, group level and snapshot level endpoints without an actual hierarchy/tree.. > .delete(&API_METHOD_DELETE_GROUP), > ), > ("mount", &Router::new().post(&API_METHOD_MOUNT)), > --=20 > 2.47.3 >=20 >=20 >=20 >=20 >=20 >=20