public inbox for pdm-devel@lists.proxmox.com
 help / color / mirror / Atom feed
From: Hannes Laimer <h.laimer@proxmox.com>
To: pdm-devel@lists.proxmox.com
Subject: [pdm-devel] [PATCH datacenter-manager 1/2] Revert "server: use types indead of string for migration parameters"
Date: Tue, 18 Nov 2025 13:21:04 +0100	[thread overview]
Message-ID: <20251118122105.119918-2-h.laimer@proxmox.com> (raw)
In-Reply-To: <20251118122105.119918-1-h.laimer@proxmox.com>

The problem with using the flattened PVE type directly is that the
`target-endpoint` it expects differs from what PDM wants as
`target-endpoint`. Specififically, PDM expects it to be an optional
string identifying the target node. The PVE type however has a
PropertyString<ProxmoxRemote> as `target-endpoint`, which has two
mandatory fields (host and api-token). The only way to have this working
with the PVE type is to have the UI set `target-endpoint` to something
like:
`host=192.168.1.34,apitoken=dummy`
It does not really allow for auto-select node to be encoded properly.

This reverts commit dbb8524549c335987046ebe6e742635e1357aa3d.

Signed-off-by: Hannes Laimer <h.laimer@proxmox.com>
---
 server/src/api/pve/lxc.rs  | 133 ++++++++++++++++++++++++++++--------
 server/src/api/pve/qemu.rs | 135 +++++++++++++++++++++++++++++--------
 2 files changed, 212 insertions(+), 56 deletions(-)

diff --git a/server/src/api/pve/lxc.rs b/server/src/api/pve/lxc.rs
index 1b05a30..1ef936d 100644
--- a/server/src/api/pve/lxc.rs
+++ b/server/src/api/pve/lxc.rs
@@ -288,10 +288,29 @@ pub async fn lxc_shutdown(
                 schema: NODE_SCHEMA,
                 optional: true,
             },
+            target: { schema: NODE_SCHEMA },
             vmid: { schema: VMID_SCHEMA },
-            migrate: {
-                type: pve_api_types::MigrateLxc,
-                flatten: true,
+            online: {
+                type: bool,
+                description: "Attempt an online migration if the container is running.",
+                optional: true,
+            },
+            restart: {
+                type: bool,
+                description: "Perform a restart-migration if the container is running.",
+                optional: true,
+            },
+            "target-storage": {
+                description: "Mapping of source storages to target storages.",
+                optional: true,
+            },
+            bwlimit: {
+                description: "Override I/O bandwidth limit (in KiB/s).",
+                optional: true,
+            },
+            timeout: {
+                description: "Shutdown timeout for restart-migrations.",
+                optional: true,
             },
         },
     },
@@ -308,23 +327,35 @@ pub async fn lxc_migrate(
     remote: String,
     node: Option<String>,
     vmid: u32,
-    migrate: pve_api_types::MigrateLxc,
+    bwlimit: Option<u64>,
+    restart: Option<bool>,
+    online: Option<bool>,
+    target: String,
+    target_storage: Option<String>,
+    timeout: Option<i64>,
 ) -> Result<RemoteUpid, Error> {
-    log::info!(
-        "in-cluster migration requested for remote {remote:?} ct {vmid} to node {:?}",
-        migrate.target
-    );
+    let bwlimit = bwlimit.map(|n| n as f64);
+
+    log::info!("in-cluster migration requested for remote {remote:?} ct {vmid} to node {target:?}");
 
     let (remotes, _) = pdm_config::remotes::config()?;
     let pve = connect_to_remote(&remotes, &remote)?;
 
     let node = find_node_for_vm(node, vmid, pve.as_ref()).await?;
 
-    if node == migrate.target {
+    if node == target {
         bail!("refusing migration to the same node");
     }
 
-    let upid = pve.migrate_lxc(&node, vmid, migrate).await?;
+    let params = pve_api_types::MigrateLxc {
+        bwlimit,
+        online,
+        restart,
+        target,
+        target_storage,
+        timeout,
+    };
+    let upid = pve.migrate_lxc(&node, vmid, params).await?;
 
     new_remote_upid(remote, upid).await
 }
@@ -339,10 +370,44 @@ pub async fn lxc_migrate(
                 optional: true,
             },
             vmid: { schema: VMID_SCHEMA },
+            "target-vmid": {
+                optional: true,
+                schema: VMID_SCHEMA,
+            },
+            delete: {
+                description: "Delete the original VM and related data after successful migration.",
+                optional: true,
+                default: false,
+            },
+            online: {
+                type: bool,
+                description: "Perform an online migration if the vm is running.",
+                optional: true,
+                default: false,
+            },
+            "target-storage": {
+                description: "Mapping of source storages to target storages.",
+            },
+            "target-bridge": {
+                description: "Mapping of source bridges to remote bridges.",
+            },
+            bwlimit: {
+                description: "Override I/O bandwidth limit (in KiB/s).",
+                optional: true,
+            },
+            restart: {
+                description: "Perform a restart-migration.",
+                optional: true,
+            },
+            timeout: {
+                description: "Add a shutdown timeout for the restart-migration.",
+                optional: true,
+            },
             // TODO better to change remote migration to proxy to node?
-            remote_migrate: {
-                type: pve_api_types::RemoteMigrateLxc,
-                flatten: true,
+            "target-endpoint": {
+                type: String,
+                optional: true,
+                description: "The target endpoint to use for the connection.",
             },
         },
     },
@@ -360,7 +425,15 @@ pub async fn lxc_remote_migrate(
     target: String, // this is the destination remote name
     node: Option<String>,
     vmid: u32,
-    remote_migrate: pve_api_types::RemoteMigrateLxc,
+    target_vmid: Option<u32>,
+    delete: bool,
+    online: bool,
+    target_storage: String,
+    target_bridge: String,
+    bwlimit: Option<u64>,
+    restart: Option<bool>,
+    timeout: Option<i64>,
+    target_endpoint: Option<String>,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<RemoteUpid, Error> {
     let user_info = CachedUserInfo::new()?;
@@ -374,7 +447,7 @@ pub async fn lxc_remote_migrate(
             "resource",
             &target,
             "guest",
-            &remote_migrate.target_vmid.unwrap_or(vmid).to_string(),
+            &target_vmid.unwrap_or(vmid).to_string(),
         ],
     );
     if target_privs & PRIV_RESOURCE_MIGRATE == 0 {
@@ -383,7 +456,7 @@ pub async fn lxc_remote_migrate(
             "missing PRIV_RESOURCE_MIGRATE on target remote+vmid"
         );
     }
-    if remote_migrate.delete.unwrap_or_default() {
+    if delete {
         check_guest_delete_perms(rpcenv, &remote, vmid)?;
     }
 
@@ -404,17 +477,14 @@ pub async fn lxc_remote_migrate(
     // FIXME: For now we'll only try with the first node but we should probably try others, too, in
     // case some are offline?
 
-    // TODO: target_endpoint optional? if single node i guess
     let target_node = target
         .nodes
         .iter()
-        .find(
-            |endpoint| match Some(remote_migrate.target_endpoint.clone()).as_deref() {
-                Some(target) => target == endpoint.hostname,
-                None => true,
-            },
-        )
-        .ok_or_else(|| match Some(remote_migrate.target_endpoint.clone()) {
+        .find(|endpoint| match target_endpoint.as_deref() {
+            Some(target) => target == endpoint.hostname,
+            None => true,
+        })
+        .ok_or_else(|| match target_endpoint {
             Some(endpoint) => format_err!("{endpoint} not configured for target cluster"),
             None => format_err!("no nodes configured for target cluster"),
         })?;
@@ -434,10 +504,19 @@ pub async fn lxc_remote_migrate(
     }
 
     log::info!("forwarding remote migration requested");
+    let params = pve_api_types::RemoteMigrateLxc {
+        target_bridge,
+        target_storage,
+        delete: Some(delete),
+        online: Some(online),
+        target_vmid,
+        target_endpoint,
+        bwlimit: bwlimit.map(|limit| limit as f64),
+        restart,
+        timeout,
+    };
     log::info!("migrating vm {vmid} of node {node:?}");
-    let upid = source_conn
-        .remote_migrate_lxc(&node, vmid, remote_migrate)
-        .await?;
+    let upid = source_conn.remote_migrate_lxc(&node, vmid, params).await?;
 
     new_remote_upid(source, upid).await
 }
diff --git a/server/src/api/pve/qemu.rs b/server/src/api/pve/qemu.rs
index 05fa92c..5e66a48 100644
--- a/server/src/api/pve/qemu.rs
+++ b/server/src/api/pve/qemu.rs
@@ -10,11 +10,11 @@ use proxmox_sortable_macro::sortable;
 
 use pdm_api_types::remotes::REMOTE_ID_SCHEMA;
 use pdm_api_types::{
-    Authid, ConfigurationState, RemoteUpid, NODE_SCHEMA, PRIV_RESOURCE_AUDIT, PRIV_RESOURCE_MANAGE,
-    PRIV_RESOURCE_MIGRATE, SNAPSHOT_NAME_SCHEMA, VMID_SCHEMA,
+    Authid, ConfigurationState, RemoteUpid, CIDR_FORMAT, NODE_SCHEMA, PRIV_RESOURCE_AUDIT,
+    PRIV_RESOURCE_MANAGE, PRIV_RESOURCE_MIGRATE, SNAPSHOT_NAME_SCHEMA, VMID_SCHEMA,
 };
 
-use pve_api_types::QemuMigratePreconditions;
+use pve_api_types::{QemuMigratePreconditions, StartQemuMigrationType};
 
 use crate::api::pve::get_remote;
 
@@ -297,9 +297,37 @@ pub async fn qemu_shutdown(
             },
             target: { schema: NODE_SCHEMA },
             vmid: { schema: VMID_SCHEMA },
-            migrate: {
-                type: pve_api_types::MigrateQemu,
-                flatten: true,
+            online: {
+                type: bool,
+                description: "Perform an online migration if the vm is running.",
+                optional: true,
+            },
+            "target-storage": {
+                description: "Mapping of source storages to target storages.",
+                optional: true,
+            },
+            bwlimit: {
+                description: "Override I/O bandwidth limit (in KiB/s).",
+                optional: true,
+            },
+            "migration-network": {
+                description: "CIDR of the (sub) network that is used for migration.",
+                type: String,
+                format: &CIDR_FORMAT,
+                optional: true,
+            },
+            "migration-type": {
+                type: StartQemuMigrationType,
+                optional: true,
+            },
+            force: {
+                description: "Allow to migrate VMs with local devices.",
+                optional: true,
+                default: false,
+            },
+            "with-local-disks": {
+                description: "Enable live storage migration for local disks.",
+                optional: true,
             },
         },
     },
@@ -316,23 +344,38 @@ pub async fn qemu_migrate(
     remote: String,
     node: Option<String>,
     vmid: u32,
-    migrate: pve_api_types::MigrateQemu,
+    bwlimit: Option<u64>,
+    force: Option<bool>,
+    migration_network: Option<String>,
+    migration_type: Option<StartQemuMigrationType>,
+    online: Option<bool>,
+    target: String,
+    target_storage: Option<String>,
+    with_local_disks: Option<bool>,
 ) -> Result<RemoteUpid, Error> {
-    log::info!(
-        "in-cluster migration requested for remote {remote:?} vm {vmid} to node {:?}",
-        migrate.target
-    );
+    log::info!("in-cluster migration requested for remote {remote:?} vm {vmid} to node {target:?}");
 
     let (remotes, _) = pdm_config::remotes::config()?;
     let pve = connect_to_remote(&remotes, &remote)?;
 
     let node = find_node_for_vm(node, vmid, pve.as_ref()).await?;
 
-    if node == migrate.target {
+    if node == target {
         bail!("refusing migration to the same node");
     }
 
-    let upid = pve.migrate_qemu(&node, vmid, migrate).await?;
+    let params = pve_api_types::MigrateQemu {
+        bwlimit,
+        force,
+        migration_network,
+        migration_type,
+        online,
+        target,
+        targetstorage: target_storage,
+        with_local_disks,
+        with_conntrack_state: None,
+    };
+    let upid = pve.migrate_qemu(&node, vmid, params).await?;
 
     new_remote_upid(remote, upid).await
 }
@@ -388,9 +431,32 @@ async fn qemu_migrate_preconditions(
                 optional: true,
                 schema: VMID_SCHEMA,
             },
-            remote_migrate: {
-                type: pve_api_types::RemoteMigrateQemu,
-                flatten: true,
+            delete: {
+                description: "Delete the original VM and related data after successful migration.",
+                optional: true,
+                default: false,
+            },
+            online: {
+                type: bool,
+                description: "Perform an online migration if the vm is running.",
+                optional: true,
+                default: false,
+            },
+            "target-storage": {
+                description: "Mapping of source storages to target storages.",
+            },
+            "target-bridge": {
+                description: "Mapping of source bridges to remote bridges.",
+            },
+            bwlimit: {
+                description: "Override I/O bandwidth limit (in KiB/s).",
+                optional: true,
+            },
+            // TODO better to change remote migration to proxy to node?
+            "target-endpoint": {
+                type: String,
+                optional: true,
+                description: "The target endpoint to use for the connection.",
             },
         },
     },
@@ -408,7 +474,13 @@ pub async fn qemu_remote_migrate(
     target: String, // this is the destination remote name
     node: Option<String>,
     vmid: u32,
-    remote_migrate: pve_api_types::RemoteMigrateQemu,
+    target_vmid: Option<u32>,
+    delete: bool,
+    online: bool,
+    target_storage: String,
+    target_bridge: String,
+    bwlimit: Option<u64>,
+    target_endpoint: Option<String>,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<RemoteUpid, Error> {
     let user_info = CachedUserInfo::new()?;
@@ -422,7 +494,7 @@ pub async fn qemu_remote_migrate(
             "resource",
             &target,
             "guest",
-            &remote_migrate.target_vmid.unwrap_or(vmid).to_string(),
+            &target_vmid.unwrap_or(vmid).to_string(),
         ],
     );
     if target_privs & PRIV_RESOURCE_MIGRATE == 0 {
@@ -432,7 +504,7 @@ pub async fn qemu_remote_migrate(
         );
     }
 
-    if remote_migrate.delete.unwrap_or_default() {
+    if delete {
         check_guest_delete_perms(rpcenv, &remote, vmid)?;
     }
 
@@ -456,13 +528,11 @@ pub async fn qemu_remote_migrate(
     let target_node = target
         .nodes
         .iter()
-        .find(
-            |endpoint| match Some(remote_migrate.target_endpoint.clone()).as_deref() {
-                Some(target) => target == endpoint.hostname,
-                None => true,
-            },
-        )
-        .ok_or_else(|| match Some(remote_migrate.target_endpoint.clone()) {
+        .find(|endpoint| match target_endpoint.as_deref() {
+            Some(target) => target == endpoint.hostname,
+            None => true,
+        })
+        .ok_or_else(|| match target_endpoint {
             Some(endpoint) => format_err!("{endpoint} not configured for target cluster"),
             None => format_err!("no nodes configured for target cluster"),
         })?;
@@ -482,10 +552,17 @@ pub async fn qemu_remote_migrate(
     }
 
     log::info!("forwarding remote migration requested");
+    let params = pve_api_types::RemoteMigrateQemu {
+        target_bridge,
+        target_storage,
+        delete: Some(delete),
+        online: Some(online),
+        target_vmid,
+        target_endpoint,
+        bwlimit,
+    };
     log::info!("migrating vm {vmid} of node {node:?}");
-    let upid = source_conn
-        .remote_migrate_qemu(&node, vmid, remote_migrate)
-        .await?;
+    let upid = source_conn.remote_migrate_qemu(&node, vmid, params).await?;
 
     new_remote_upid(source, upid).await
 }
-- 
2.47.3



_______________________________________________
pdm-devel mailing list
pdm-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pdm-devel


  reply	other threads:[~2025-11-18 12:21 UTC|newest]

Thread overview: 4+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-11-18 12:21 [pdm-devel] [PATCH datacenter-manager 0/2] fix PDM migrate endpoints Hannes Laimer
2025-11-18 12:21 ` Hannes Laimer [this message]
2025-11-18 12:21 ` [pdm-devel] [PATCH datacenter-manager 2/2] api: migrate: use arrays for storage and bridge mappings Hannes Laimer
2025-11-19 20:39 ` [pdm-devel] applied: [PATCH datacenter-manager 0/2] fix PDM migrate endpoints Thomas Lamprecht

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20251118122105.119918-2-h.laimer@proxmox.com \
    --to=h.laimer@proxmox.com \
    --cc=pdm-devel@lists.proxmox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox
Service provided by Proxmox Server Solutions GmbH | Privacy | Legal