From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [212.224.123.68]) by lore.proxmox.com (Postfix) with ESMTPS id E37F11FF191 for ; Tue, 21 Oct 2025 15:50:30 +0200 (CEST) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id 681281FC1F; Tue, 21 Oct 2025 15:50:55 +0200 (CEST) From: Hannes Laimer To: pdm-devel@lists.proxmox.com Date: Tue, 21 Oct 2025 15:50:18 +0200 Message-ID: <20251021135018.88877-5-h.laimer@proxmox.com> X-Mailer: git-send-email 2.47.3 In-Reply-To: <20251021135018.88877-1-h.laimer@proxmox.com> References: <20251021135018.88877-1-h.laimer@proxmox.com> MIME-Version: 1.0 X-Bm-Milter-Handled: 55990f41-d878-4baa-be0a-ee34c49e34d2 X-Bm-Transport-Timestamp: 1761054614317 X-SPAM-LEVEL: Spam detection results: 0 AWL 0.043 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record Subject: [pdm-devel] [PATCH proxmox-datacenter-manager 1/1] server: use types indead of string for migration parameters X-BeenThere: pdm-devel@lists.proxmox.com X-Mailman-Version: 2.1.29 Precedence: list List-Id: Proxmox Datacenter Manager development discussion List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: Proxmox Datacenter Manager development discussion Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Errors-To: pdm-devel-bounces@lists.proxmox.com Sender: "pdm-devel" This is specifically about target-storage and target-bridge, these were strings, but are now actaul Vec's with mappings as items. Since pve-api-types now generates Vec's instead of Strings for list parameters we can directly use the type. Signed-off-by: Hannes Laimer --- server/src/api/pve/lxc.rs | 133 ++++++++---------------------------- server/src/api/pve/qemu.rs | 135 ++++++++----------------------------- 2 files changed, 56 insertions(+), 212 deletions(-) diff --git a/server/src/api/pve/lxc.rs b/server/src/api/pve/lxc.rs index 83f9f4a..61db8ff 100644 --- a/server/src/api/pve/lxc.rs +++ b/server/src/api/pve/lxc.rs @@ -288,29 +288,10 @@ pub async fn lxc_shutdown( schema: NODE_SCHEMA, optional: true, }, - target: { schema: NODE_SCHEMA }, vmid: { schema: VMID_SCHEMA }, - online: { - type: bool, - description: "Attempt an online migration if the container is running.", - optional: true, - }, - restart: { - type: bool, - description: "Perform a restart-migration if the container is running.", - optional: true, - }, - "target-storage": { - description: "Mapping of source storages to target storages.", - optional: true, - }, - bwlimit: { - description: "Override I/O bandwidth limit (in KiB/s).", - optional: true, - }, - timeout: { - description: "Shutdown timeout for restart-migrations.", - optional: true, + migrate: { + type: pve_api_types::MigrateLxc, + flatten: true, }, }, }, @@ -327,35 +308,23 @@ pub async fn lxc_migrate( remote: String, node: Option, vmid: u32, - bwlimit: Option, - restart: Option, - online: Option, - target: String, - target_storage: Option, - timeout: Option, + migrate: pve_api_types::MigrateLxc, ) -> Result { - let bwlimit = bwlimit.map(|n| n as f64); - - log::info!("in-cluster migration requested for remote {remote:?} ct {vmid} to node {target:?}"); + log::info!( + "in-cluster migration requested for remote {remote:?} ct {vmid} to node {:?}", + migrate.target + ); let (remotes, _) = pdm_config::remotes::config()?; let pve = connect_to_remote(&remotes, &remote)?; let node = find_node_for_vm(node, vmid, pve.as_ref()).await?; - if node == target { + if node == migrate.target { bail!("refusing migration to the same node"); } - let params = pve_api_types::MigrateLxc { - bwlimit, - online, - restart, - target, - target_storage, - timeout, - }; - let upid = pve.migrate_lxc(&node, vmid, params).await?; + let upid = pve.migrate_lxc(&node, vmid, migrate).await?; new_remote_upid(remote, upid).await } @@ -370,44 +339,10 @@ pub async fn lxc_migrate( optional: true, }, vmid: { schema: VMID_SCHEMA }, - "target-vmid": { - optional: true, - schema: VMID_SCHEMA, - }, - delete: { - description: "Delete the original VM and related data after successful migration.", - optional: true, - default: false, - }, - online: { - type: bool, - description: "Perform an online migration if the vm is running.", - optional: true, - default: false, - }, - "target-storage": { - description: "Mapping of source storages to target storages.", - }, - "target-bridge": { - description: "Mapping of source bridges to remote bridges.", - }, - bwlimit: { - description: "Override I/O bandwidth limit (in KiB/s).", - optional: true, - }, - restart: { - description: "Perform a restart-migration.", - optional: true, - }, - timeout: { - description: "Add a shutdown timeout for the restart-migration.", - optional: true, - }, // TODO better to change remote migration to proxy to node? - "target-endpoint": { - type: String, - optional: true, - description: "The target endpoint to use for the connection.", + remote_migrate: { + type: pve_api_types::RemoteMigrateLxc, + flatten: true, }, }, }, @@ -425,15 +360,7 @@ pub async fn lxc_remote_migrate( target: String, // this is the destination remote name node: Option, vmid: u32, - target_vmid: Option, - delete: bool, - online: bool, - target_storage: String, - target_bridge: String, - bwlimit: Option, - restart: Option, - timeout: Option, - target_endpoint: Option, + remote_migrate: pve_api_types::RemoteMigrateLxc, rpcenv: &mut dyn RpcEnvironment, ) -> Result { let user_info = CachedUserInfo::new()?; @@ -447,7 +374,7 @@ pub async fn lxc_remote_migrate( "resource", &target, "guest", - &target_vmid.unwrap_or(vmid).to_string(), + &remote_migrate.target_vmid.unwrap_or(vmid).to_string(), ], ); if target_privs & PRIV_RESOURCE_MIGRATE == 0 { @@ -456,7 +383,7 @@ pub async fn lxc_remote_migrate( "missing PRIV_RESOURCE_MIGRATE on target remote+vmid" ); } - if delete { + if remote_migrate.delete.unwrap_or_default() { check_guest_delete_perms(rpcenv, &remote, vmid)?; } @@ -477,14 +404,17 @@ pub async fn lxc_remote_migrate( // FIXME: For now we'll only try with the first node but we should probably try others, too, in // case some are offline? + // TODO: target_endpoint optional? if single node i guess let target_node = target .nodes .iter() - .find(|endpoint| match target_endpoint.as_deref() { - Some(target) => target == endpoint.hostname, - None => true, - }) - .ok_or_else(|| match target_endpoint { + .find( + |endpoint| match Some(remote_migrate.target_endpoint.clone()).as_deref() { + Some(target) => target == endpoint.hostname, + None => true, + }, + ) + .ok_or_else(|| match Some(remote_migrate.target_endpoint.clone()) { Some(endpoint) => format_err!("{endpoint} not configured for target cluster"), None => format_err!("no nodes configured for target cluster"), })?; @@ -504,19 +434,10 @@ pub async fn lxc_remote_migrate( } log::info!("forwarding remote migration requested"); - let params = pve_api_types::RemoteMigrateLxc { - target_bridge, - target_storage, - delete: Some(delete), - online: Some(online), - target_vmid, - target_endpoint, - bwlimit: bwlimit.map(|limit| limit as f64), - restart, - timeout, - }; log::info!("migrating vm {vmid} of node {node:?}"); - let upid = source_conn.remote_migrate_lxc(&node, vmid, params).await?; + let upid = source_conn + .remote_migrate_lxc(&node, vmid, remote_migrate) + .await?; new_remote_upid(source, upid).await } diff --git a/server/src/api/pve/qemu.rs b/server/src/api/pve/qemu.rs index 54ede11..6158bef 100644 --- a/server/src/api/pve/qemu.rs +++ b/server/src/api/pve/qemu.rs @@ -10,11 +10,11 @@ use proxmox_sortable_macro::sortable; use pdm_api_types::remotes::REMOTE_ID_SCHEMA; use pdm_api_types::{ - Authid, ConfigurationState, RemoteUpid, CIDR_FORMAT, NODE_SCHEMA, PRIV_RESOURCE_AUDIT, - PRIV_RESOURCE_MANAGE, PRIV_RESOURCE_MIGRATE, SNAPSHOT_NAME_SCHEMA, VMID_SCHEMA, + Authid, ConfigurationState, RemoteUpid, NODE_SCHEMA, PRIV_RESOURCE_AUDIT, PRIV_RESOURCE_MANAGE, + PRIV_RESOURCE_MIGRATE, SNAPSHOT_NAME_SCHEMA, VMID_SCHEMA, }; -use pve_api_types::{QemuMigratePreconditions, StartQemuMigrationType}; +use pve_api_types::QemuMigratePreconditions; use crate::api::pve::get_remote; @@ -297,37 +297,9 @@ pub async fn qemu_shutdown( }, target: { schema: NODE_SCHEMA }, vmid: { schema: VMID_SCHEMA }, - online: { - type: bool, - description: "Perform an online migration if the vm is running.", - optional: true, - }, - "target-storage": { - description: "Mapping of source storages to target storages.", - optional: true, - }, - bwlimit: { - description: "Override I/O bandwidth limit (in KiB/s).", - optional: true, - }, - "migration-network": { - description: "CIDR of the (sub) network that is used for migration.", - type: String, - format: &CIDR_FORMAT, - optional: true, - }, - "migration-type": { - type: StartQemuMigrationType, - optional: true, - }, - force: { - description: "Allow to migrate VMs with local devices.", - optional: true, - default: false, - }, - "with-local-disks": { - description: "Enable live storage migration for local disks.", - optional: true, + migrate: { + type: pve_api_types::MigrateQemu, + flatten: true, }, }, }, @@ -344,38 +316,23 @@ pub async fn qemu_migrate( remote: String, node: Option, vmid: u32, - bwlimit: Option, - force: Option, - migration_network: Option, - migration_type: Option, - online: Option, - target: String, - target_storage: Option, - with_local_disks: Option, + migrate: pve_api_types::MigrateQemu, ) -> Result { - log::info!("in-cluster migration requested for remote {remote:?} vm {vmid} to node {target:?}"); + log::info!( + "in-cluster migration requested for remote {remote:?} vm {vmid} to node {:?}", + migrate.target + ); let (remotes, _) = pdm_config::remotes::config()?; let pve = connect_to_remote(&remotes, &remote)?; let node = find_node_for_vm(node, vmid, pve.as_ref()).await?; - if node == target { + if node == migrate.target { bail!("refusing migration to the same node"); } - let params = pve_api_types::MigrateQemu { - bwlimit, - force, - migration_network, - migration_type, - online, - target, - targetstorage: target_storage, - with_local_disks, - with_conntrack_state: None, - }; - let upid = pve.migrate_qemu(&node, vmid, params).await?; + let upid = pve.migrate_qemu(&node, vmid, migrate).await?; new_remote_upid(remote, upid).await } @@ -431,32 +388,9 @@ async fn qemu_migrate_preconditions( optional: true, schema: VMID_SCHEMA, }, - delete: { - description: "Delete the original VM and related data after successful migration.", - optional: true, - default: false, - }, - online: { - type: bool, - description: "Perform an online migration if the vm is running.", - optional: true, - default: false, - }, - "target-storage": { - description: "Mapping of source storages to target storages.", - }, - "target-bridge": { - description: "Mapping of source bridges to remote bridges.", - }, - bwlimit: { - description: "Override I/O bandwidth limit (in KiB/s).", - optional: true, - }, - // TODO better to change remote migration to proxy to node? - "target-endpoint": { - type: String, - optional: true, - description: "The target endpoint to use for the connection.", + remote_migrate: { + type: pve_api_types::RemoteMigrateQemu, + flatten: true, }, }, }, @@ -474,13 +408,7 @@ pub async fn qemu_remote_migrate( target: String, // this is the destination remote name node: Option, vmid: u32, - target_vmid: Option, - delete: bool, - online: bool, - target_storage: String, - target_bridge: String, - bwlimit: Option, - target_endpoint: Option, + remote_migrate: pve_api_types::RemoteMigrateQemu, rpcenv: &mut dyn RpcEnvironment, ) -> Result { let user_info = CachedUserInfo::new()?; @@ -494,7 +422,7 @@ pub async fn qemu_remote_migrate( "resource", &target, "guest", - &target_vmid.unwrap_or(vmid).to_string(), + &remote_migrate.target_vmid.unwrap_or(vmid).to_string(), ], ); if target_privs & PRIV_RESOURCE_MIGRATE == 0 { @@ -504,7 +432,7 @@ pub async fn qemu_remote_migrate( ); } - if delete { + if remote_migrate.delete.unwrap_or_default() { check_guest_delete_perms(rpcenv, &remote, vmid)?; } @@ -528,11 +456,13 @@ pub async fn qemu_remote_migrate( let target_node = target .nodes .iter() - .find(|endpoint| match target_endpoint.as_deref() { - Some(target) => target == endpoint.hostname, - None => true, - }) - .ok_or_else(|| match target_endpoint { + .find( + |endpoint| match Some(remote_migrate.target_endpoint.clone()).as_deref() { + Some(target) => target == endpoint.hostname, + None => true, + }, + ) + .ok_or_else(|| match Some(remote_migrate.target_endpoint.clone()) { Some(endpoint) => format_err!("{endpoint} not configured for target cluster"), None => format_err!("no nodes configured for target cluster"), })?; @@ -552,17 +482,10 @@ pub async fn qemu_remote_migrate( } log::info!("forwarding remote migration requested"); - let params = pve_api_types::RemoteMigrateQemu { - target_bridge, - target_storage, - delete: Some(delete), - online: Some(online), - target_vmid, - target_endpoint, - bwlimit, - }; log::info!("migrating vm {vmid} of node {node:?}"); - let upid = source_conn.remote_migrate_qemu(&node, vmid, params).await?; + let upid = source_conn + .remote_migrate_qemu(&node, vmid, remote_migrate) + .await?; new_remote_upid(source, upid).await } -- 2.47.3 _______________________________________________ pdm-devel mailing list pdm-devel@lists.proxmox.com https://lists.proxmox.com/cgi-bin/mailman/listinfo/pdm-devel