From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [IPv6:2a01:7e0:0:424::9]) by lore.proxmox.com (Postfix) with ESMTPS id B762D1FF187 for ; Tue, 18 Nov 2025 13:21:08 +0100 (CET) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id 0F99B12AF0; Tue, 18 Nov 2025 13:21:13 +0100 (CET) From: Hannes Laimer To: pdm-devel@lists.proxmox.com Date: Tue, 18 Nov 2025 13:21:04 +0100 Message-ID: <20251118122105.119918-2-h.laimer@proxmox.com> X-Mailer: git-send-email 2.47.3 In-Reply-To: <20251118122105.119918-1-h.laimer@proxmox.com> References: <20251118122105.119918-1-h.laimer@proxmox.com> MIME-Version: 1.0 X-Bm-Milter-Handled: 55990f41-d878-4baa-be0a-ee34c49e34d2 X-Bm-Transport-Timestamp: 1763468437438 X-SPAM-LEVEL: Spam detection results: 0 AWL 0.048 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record Subject: [pdm-devel] [PATCH datacenter-manager 1/2] Revert "server: use types indead of string for migration parameters" X-BeenThere: pdm-devel@lists.proxmox.com X-Mailman-Version: 2.1.29 Precedence: list List-Id: Proxmox Datacenter Manager development discussion List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: Proxmox Datacenter Manager development discussion Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Errors-To: pdm-devel-bounces@lists.proxmox.com Sender: "pdm-devel" The problem with using the flattened PVE type directly is that the `target-endpoint` it expects differs from what PDM wants as `target-endpoint`. Specififically, PDM expects it to be an optional string identifying the target node. The PVE type however has a PropertyString as `target-endpoint`, which has two mandatory fields (host and api-token). The only way to have this working with the PVE type is to have the UI set `target-endpoint` to something like: `host=192.168.1.34,apitoken=dummy` It does not really allow for auto-select node to be encoded properly. This reverts commit dbb8524549c335987046ebe6e742635e1357aa3d. Signed-off-by: Hannes Laimer --- server/src/api/pve/lxc.rs | 133 ++++++++++++++++++++++++++++-------- server/src/api/pve/qemu.rs | 135 +++++++++++++++++++++++++++++-------- 2 files changed, 212 insertions(+), 56 deletions(-) diff --git a/server/src/api/pve/lxc.rs b/server/src/api/pve/lxc.rs index 1b05a30..1ef936d 100644 --- a/server/src/api/pve/lxc.rs +++ b/server/src/api/pve/lxc.rs @@ -288,10 +288,29 @@ pub async fn lxc_shutdown( schema: NODE_SCHEMA, optional: true, }, + target: { schema: NODE_SCHEMA }, vmid: { schema: VMID_SCHEMA }, - migrate: { - type: pve_api_types::MigrateLxc, - flatten: true, + online: { + type: bool, + description: "Attempt an online migration if the container is running.", + optional: true, + }, + restart: { + type: bool, + description: "Perform a restart-migration if the container is running.", + optional: true, + }, + "target-storage": { + description: "Mapping of source storages to target storages.", + optional: true, + }, + bwlimit: { + description: "Override I/O bandwidth limit (in KiB/s).", + optional: true, + }, + timeout: { + description: "Shutdown timeout for restart-migrations.", + optional: true, }, }, }, @@ -308,23 +327,35 @@ pub async fn lxc_migrate( remote: String, node: Option, vmid: u32, - migrate: pve_api_types::MigrateLxc, + bwlimit: Option, + restart: Option, + online: Option, + target: String, + target_storage: Option, + timeout: Option, ) -> Result { - log::info!( - "in-cluster migration requested for remote {remote:?} ct {vmid} to node {:?}", - migrate.target - ); + let bwlimit = bwlimit.map(|n| n as f64); + + log::info!("in-cluster migration requested for remote {remote:?} ct {vmid} to node {target:?}"); let (remotes, _) = pdm_config::remotes::config()?; let pve = connect_to_remote(&remotes, &remote)?; let node = find_node_for_vm(node, vmid, pve.as_ref()).await?; - if node == migrate.target { + if node == target { bail!("refusing migration to the same node"); } - let upid = pve.migrate_lxc(&node, vmid, migrate).await?; + let params = pve_api_types::MigrateLxc { + bwlimit, + online, + restart, + target, + target_storage, + timeout, + }; + let upid = pve.migrate_lxc(&node, vmid, params).await?; new_remote_upid(remote, upid).await } @@ -339,10 +370,44 @@ pub async fn lxc_migrate( optional: true, }, vmid: { schema: VMID_SCHEMA }, + "target-vmid": { + optional: true, + schema: VMID_SCHEMA, + }, + delete: { + description: "Delete the original VM and related data after successful migration.", + optional: true, + default: false, + }, + online: { + type: bool, + description: "Perform an online migration if the vm is running.", + optional: true, + default: false, + }, + "target-storage": { + description: "Mapping of source storages to target storages.", + }, + "target-bridge": { + description: "Mapping of source bridges to remote bridges.", + }, + bwlimit: { + description: "Override I/O bandwidth limit (in KiB/s).", + optional: true, + }, + restart: { + description: "Perform a restart-migration.", + optional: true, + }, + timeout: { + description: "Add a shutdown timeout for the restart-migration.", + optional: true, + }, // TODO better to change remote migration to proxy to node? - remote_migrate: { - type: pve_api_types::RemoteMigrateLxc, - flatten: true, + "target-endpoint": { + type: String, + optional: true, + description: "The target endpoint to use for the connection.", }, }, }, @@ -360,7 +425,15 @@ pub async fn lxc_remote_migrate( target: String, // this is the destination remote name node: Option, vmid: u32, - remote_migrate: pve_api_types::RemoteMigrateLxc, + target_vmid: Option, + delete: bool, + online: bool, + target_storage: String, + target_bridge: String, + bwlimit: Option, + restart: Option, + timeout: Option, + target_endpoint: Option, rpcenv: &mut dyn RpcEnvironment, ) -> Result { let user_info = CachedUserInfo::new()?; @@ -374,7 +447,7 @@ pub async fn lxc_remote_migrate( "resource", &target, "guest", - &remote_migrate.target_vmid.unwrap_or(vmid).to_string(), + &target_vmid.unwrap_or(vmid).to_string(), ], ); if target_privs & PRIV_RESOURCE_MIGRATE == 0 { @@ -383,7 +456,7 @@ pub async fn lxc_remote_migrate( "missing PRIV_RESOURCE_MIGRATE on target remote+vmid" ); } - if remote_migrate.delete.unwrap_or_default() { + if delete { check_guest_delete_perms(rpcenv, &remote, vmid)?; } @@ -404,17 +477,14 @@ pub async fn lxc_remote_migrate( // FIXME: For now we'll only try with the first node but we should probably try others, too, in // case some are offline? - // TODO: target_endpoint optional? if single node i guess let target_node = target .nodes .iter() - .find( - |endpoint| match Some(remote_migrate.target_endpoint.clone()).as_deref() { - Some(target) => target == endpoint.hostname, - None => true, - }, - ) - .ok_or_else(|| match Some(remote_migrate.target_endpoint.clone()) { + .find(|endpoint| match target_endpoint.as_deref() { + Some(target) => target == endpoint.hostname, + None => true, + }) + .ok_or_else(|| match target_endpoint { Some(endpoint) => format_err!("{endpoint} not configured for target cluster"), None => format_err!("no nodes configured for target cluster"), })?; @@ -434,10 +504,19 @@ pub async fn lxc_remote_migrate( } log::info!("forwarding remote migration requested"); + let params = pve_api_types::RemoteMigrateLxc { + target_bridge, + target_storage, + delete: Some(delete), + online: Some(online), + target_vmid, + target_endpoint, + bwlimit: bwlimit.map(|limit| limit as f64), + restart, + timeout, + }; log::info!("migrating vm {vmid} of node {node:?}"); - let upid = source_conn - .remote_migrate_lxc(&node, vmid, remote_migrate) - .await?; + let upid = source_conn.remote_migrate_lxc(&node, vmid, params).await?; new_remote_upid(source, upid).await } diff --git a/server/src/api/pve/qemu.rs b/server/src/api/pve/qemu.rs index 05fa92c..5e66a48 100644 --- a/server/src/api/pve/qemu.rs +++ b/server/src/api/pve/qemu.rs @@ -10,11 +10,11 @@ use proxmox_sortable_macro::sortable; use pdm_api_types::remotes::REMOTE_ID_SCHEMA; use pdm_api_types::{ - Authid, ConfigurationState, RemoteUpid, NODE_SCHEMA, PRIV_RESOURCE_AUDIT, PRIV_RESOURCE_MANAGE, - PRIV_RESOURCE_MIGRATE, SNAPSHOT_NAME_SCHEMA, VMID_SCHEMA, + Authid, ConfigurationState, RemoteUpid, CIDR_FORMAT, NODE_SCHEMA, PRIV_RESOURCE_AUDIT, + PRIV_RESOURCE_MANAGE, PRIV_RESOURCE_MIGRATE, SNAPSHOT_NAME_SCHEMA, VMID_SCHEMA, }; -use pve_api_types::QemuMigratePreconditions; +use pve_api_types::{QemuMigratePreconditions, StartQemuMigrationType}; use crate::api::pve::get_remote; @@ -297,9 +297,37 @@ pub async fn qemu_shutdown( }, target: { schema: NODE_SCHEMA }, vmid: { schema: VMID_SCHEMA }, - migrate: { - type: pve_api_types::MigrateQemu, - flatten: true, + online: { + type: bool, + description: "Perform an online migration if the vm is running.", + optional: true, + }, + "target-storage": { + description: "Mapping of source storages to target storages.", + optional: true, + }, + bwlimit: { + description: "Override I/O bandwidth limit (in KiB/s).", + optional: true, + }, + "migration-network": { + description: "CIDR of the (sub) network that is used for migration.", + type: String, + format: &CIDR_FORMAT, + optional: true, + }, + "migration-type": { + type: StartQemuMigrationType, + optional: true, + }, + force: { + description: "Allow to migrate VMs with local devices.", + optional: true, + default: false, + }, + "with-local-disks": { + description: "Enable live storage migration for local disks.", + optional: true, }, }, }, @@ -316,23 +344,38 @@ pub async fn qemu_migrate( remote: String, node: Option, vmid: u32, - migrate: pve_api_types::MigrateQemu, + bwlimit: Option, + force: Option, + migration_network: Option, + migration_type: Option, + online: Option, + target: String, + target_storage: Option, + with_local_disks: Option, ) -> Result { - log::info!( - "in-cluster migration requested for remote {remote:?} vm {vmid} to node {:?}", - migrate.target - ); + log::info!("in-cluster migration requested for remote {remote:?} vm {vmid} to node {target:?}"); let (remotes, _) = pdm_config::remotes::config()?; let pve = connect_to_remote(&remotes, &remote)?; let node = find_node_for_vm(node, vmid, pve.as_ref()).await?; - if node == migrate.target { + if node == target { bail!("refusing migration to the same node"); } - let upid = pve.migrate_qemu(&node, vmid, migrate).await?; + let params = pve_api_types::MigrateQemu { + bwlimit, + force, + migration_network, + migration_type, + online, + target, + targetstorage: target_storage, + with_local_disks, + with_conntrack_state: None, + }; + let upid = pve.migrate_qemu(&node, vmid, params).await?; new_remote_upid(remote, upid).await } @@ -388,9 +431,32 @@ async fn qemu_migrate_preconditions( optional: true, schema: VMID_SCHEMA, }, - remote_migrate: { - type: pve_api_types::RemoteMigrateQemu, - flatten: true, + delete: { + description: "Delete the original VM and related data after successful migration.", + optional: true, + default: false, + }, + online: { + type: bool, + description: "Perform an online migration if the vm is running.", + optional: true, + default: false, + }, + "target-storage": { + description: "Mapping of source storages to target storages.", + }, + "target-bridge": { + description: "Mapping of source bridges to remote bridges.", + }, + bwlimit: { + description: "Override I/O bandwidth limit (in KiB/s).", + optional: true, + }, + // TODO better to change remote migration to proxy to node? + "target-endpoint": { + type: String, + optional: true, + description: "The target endpoint to use for the connection.", }, }, }, @@ -408,7 +474,13 @@ pub async fn qemu_remote_migrate( target: String, // this is the destination remote name node: Option, vmid: u32, - remote_migrate: pve_api_types::RemoteMigrateQemu, + target_vmid: Option, + delete: bool, + online: bool, + target_storage: String, + target_bridge: String, + bwlimit: Option, + target_endpoint: Option, rpcenv: &mut dyn RpcEnvironment, ) -> Result { let user_info = CachedUserInfo::new()?; @@ -422,7 +494,7 @@ pub async fn qemu_remote_migrate( "resource", &target, "guest", - &remote_migrate.target_vmid.unwrap_or(vmid).to_string(), + &target_vmid.unwrap_or(vmid).to_string(), ], ); if target_privs & PRIV_RESOURCE_MIGRATE == 0 { @@ -432,7 +504,7 @@ pub async fn qemu_remote_migrate( ); } - if remote_migrate.delete.unwrap_or_default() { + if delete { check_guest_delete_perms(rpcenv, &remote, vmid)?; } @@ -456,13 +528,11 @@ pub async fn qemu_remote_migrate( let target_node = target .nodes .iter() - .find( - |endpoint| match Some(remote_migrate.target_endpoint.clone()).as_deref() { - Some(target) => target == endpoint.hostname, - None => true, - }, - ) - .ok_or_else(|| match Some(remote_migrate.target_endpoint.clone()) { + .find(|endpoint| match target_endpoint.as_deref() { + Some(target) => target == endpoint.hostname, + None => true, + }) + .ok_or_else(|| match target_endpoint { Some(endpoint) => format_err!("{endpoint} not configured for target cluster"), None => format_err!("no nodes configured for target cluster"), })?; @@ -482,10 +552,17 @@ pub async fn qemu_remote_migrate( } log::info!("forwarding remote migration requested"); + let params = pve_api_types::RemoteMigrateQemu { + target_bridge, + target_storage, + delete: Some(delete), + online: Some(online), + target_vmid, + target_endpoint, + bwlimit, + }; log::info!("migrating vm {vmid} of node {node:?}"); - let upid = source_conn - .remote_migrate_qemu(&node, vmid, remote_migrate) - .await?; + let upid = source_conn.remote_migrate_qemu(&node, vmid, params).await?; new_remote_upid(source, upid).await } -- 2.47.3 _______________________________________________ pdm-devel mailing list pdm-devel@lists.proxmox.com https://lists.proxmox.com/cgi-bin/mailman/listinfo/pdm-devel