public inbox for pdm-devel@lists.proxmox.com
 help / color / mirror / Atom feed
From: Hannes Laimer <h.laimer@proxmox.com>
To: pdm-devel@lists.proxmox.com
Subject: [pdm-devel] [PATCH proxmox-datacenter-manager 1/1] server: use types indead of string for migration parameters
Date: Tue, 21 Oct 2025 15:50:18 +0200	[thread overview]
Message-ID: <20251021135018.88877-5-h.laimer@proxmox.com> (raw)
In-Reply-To: <20251021135018.88877-1-h.laimer@proxmox.com>

This is specifically about target-storage and target-bridge, these were
strings, but are now actaul Vec's with mappings as items. Since
pve-api-types now generates Vec's instead of Strings for list parameters
we can directly use the type.

Signed-off-by: Hannes Laimer <h.laimer@proxmox.com>
---
 server/src/api/pve/lxc.rs  | 133 ++++++++----------------------------
 server/src/api/pve/qemu.rs | 135 ++++++++-----------------------------
 2 files changed, 56 insertions(+), 212 deletions(-)

diff --git a/server/src/api/pve/lxc.rs b/server/src/api/pve/lxc.rs
index 83f9f4a..61db8ff 100644
--- a/server/src/api/pve/lxc.rs
+++ b/server/src/api/pve/lxc.rs
@@ -288,29 +288,10 @@ pub async fn lxc_shutdown(
                 schema: NODE_SCHEMA,
                 optional: true,
             },
-            target: { schema: NODE_SCHEMA },
             vmid: { schema: VMID_SCHEMA },
-            online: {
-                type: bool,
-                description: "Attempt an online migration if the container is running.",
-                optional: true,
-            },
-            restart: {
-                type: bool,
-                description: "Perform a restart-migration if the container is running.",
-                optional: true,
-            },
-            "target-storage": {
-                description: "Mapping of source storages to target storages.",
-                optional: true,
-            },
-            bwlimit: {
-                description: "Override I/O bandwidth limit (in KiB/s).",
-                optional: true,
-            },
-            timeout: {
-                description: "Shutdown timeout for restart-migrations.",
-                optional: true,
+            migrate: {
+                type: pve_api_types::MigrateLxc,
+                flatten: true,
             },
         },
     },
@@ -327,35 +308,23 @@ pub async fn lxc_migrate(
     remote: String,
     node: Option<String>,
     vmid: u32,
-    bwlimit: Option<u64>,
-    restart: Option<bool>,
-    online: Option<bool>,
-    target: String,
-    target_storage: Option<String>,
-    timeout: Option<i64>,
+    migrate: pve_api_types::MigrateLxc,
 ) -> Result<RemoteUpid, Error> {
-    let bwlimit = bwlimit.map(|n| n as f64);
-
-    log::info!("in-cluster migration requested for remote {remote:?} ct {vmid} to node {target:?}");
+    log::info!(
+        "in-cluster migration requested for remote {remote:?} ct {vmid} to node {:?}",
+        migrate.target
+    );
 
     let (remotes, _) = pdm_config::remotes::config()?;
     let pve = connect_to_remote(&remotes, &remote)?;
 
     let node = find_node_for_vm(node, vmid, pve.as_ref()).await?;
 
-    if node == target {
+    if node == migrate.target {
         bail!("refusing migration to the same node");
     }
 
-    let params = pve_api_types::MigrateLxc {
-        bwlimit,
-        online,
-        restart,
-        target,
-        target_storage,
-        timeout,
-    };
-    let upid = pve.migrate_lxc(&node, vmid, params).await?;
+    let upid = pve.migrate_lxc(&node, vmid, migrate).await?;
 
     new_remote_upid(remote, upid).await
 }
@@ -370,44 +339,10 @@ pub async fn lxc_migrate(
                 optional: true,
             },
             vmid: { schema: VMID_SCHEMA },
-            "target-vmid": {
-                optional: true,
-                schema: VMID_SCHEMA,
-            },
-            delete: {
-                description: "Delete the original VM and related data after successful migration.",
-                optional: true,
-                default: false,
-            },
-            online: {
-                type: bool,
-                description: "Perform an online migration if the vm is running.",
-                optional: true,
-                default: false,
-            },
-            "target-storage": {
-                description: "Mapping of source storages to target storages.",
-            },
-            "target-bridge": {
-                description: "Mapping of source bridges to remote bridges.",
-            },
-            bwlimit: {
-                description: "Override I/O bandwidth limit (in KiB/s).",
-                optional: true,
-            },
-            restart: {
-                description: "Perform a restart-migration.",
-                optional: true,
-            },
-            timeout: {
-                description: "Add a shutdown timeout for the restart-migration.",
-                optional: true,
-            },
             // TODO better to change remote migration to proxy to node?
-            "target-endpoint": {
-                type: String,
-                optional: true,
-                description: "The target endpoint to use for the connection.",
+            remote_migrate: {
+                type: pve_api_types::RemoteMigrateLxc,
+                flatten: true,
             },
         },
     },
@@ -425,15 +360,7 @@ pub async fn lxc_remote_migrate(
     target: String, // this is the destination remote name
     node: Option<String>,
     vmid: u32,
-    target_vmid: Option<u32>,
-    delete: bool,
-    online: bool,
-    target_storage: String,
-    target_bridge: String,
-    bwlimit: Option<u64>,
-    restart: Option<bool>,
-    timeout: Option<i64>,
-    target_endpoint: Option<String>,
+    remote_migrate: pve_api_types::RemoteMigrateLxc,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<RemoteUpid, Error> {
     let user_info = CachedUserInfo::new()?;
@@ -447,7 +374,7 @@ pub async fn lxc_remote_migrate(
             "resource",
             &target,
             "guest",
-            &target_vmid.unwrap_or(vmid).to_string(),
+            &remote_migrate.target_vmid.unwrap_or(vmid).to_string(),
         ],
     );
     if target_privs & PRIV_RESOURCE_MIGRATE == 0 {
@@ -456,7 +383,7 @@ pub async fn lxc_remote_migrate(
             "missing PRIV_RESOURCE_MIGRATE on target remote+vmid"
         );
     }
-    if delete {
+    if remote_migrate.delete.unwrap_or_default() {
         check_guest_delete_perms(rpcenv, &remote, vmid)?;
     }
 
@@ -477,14 +404,17 @@ pub async fn lxc_remote_migrate(
     // FIXME: For now we'll only try with the first node but we should probably try others, too, in
     // case some are offline?
 
+    // TODO: target_endpoint optional? if single node i guess
     let target_node = target
         .nodes
         .iter()
-        .find(|endpoint| match target_endpoint.as_deref() {
-            Some(target) => target == endpoint.hostname,
-            None => true,
-        })
-        .ok_or_else(|| match target_endpoint {
+        .find(
+            |endpoint| match Some(remote_migrate.target_endpoint.clone()).as_deref() {
+                Some(target) => target == endpoint.hostname,
+                None => true,
+            },
+        )
+        .ok_or_else(|| match Some(remote_migrate.target_endpoint.clone()) {
             Some(endpoint) => format_err!("{endpoint} not configured for target cluster"),
             None => format_err!("no nodes configured for target cluster"),
         })?;
@@ -504,19 +434,10 @@ pub async fn lxc_remote_migrate(
     }
 
     log::info!("forwarding remote migration requested");
-    let params = pve_api_types::RemoteMigrateLxc {
-        target_bridge,
-        target_storage,
-        delete: Some(delete),
-        online: Some(online),
-        target_vmid,
-        target_endpoint,
-        bwlimit: bwlimit.map(|limit| limit as f64),
-        restart,
-        timeout,
-    };
     log::info!("migrating vm {vmid} of node {node:?}");
-    let upid = source_conn.remote_migrate_lxc(&node, vmid, params).await?;
+    let upid = source_conn
+        .remote_migrate_lxc(&node, vmid, remote_migrate)
+        .await?;
 
     new_remote_upid(source, upid).await
 }
diff --git a/server/src/api/pve/qemu.rs b/server/src/api/pve/qemu.rs
index 54ede11..6158bef 100644
--- a/server/src/api/pve/qemu.rs
+++ b/server/src/api/pve/qemu.rs
@@ -10,11 +10,11 @@ use proxmox_sortable_macro::sortable;
 
 use pdm_api_types::remotes::REMOTE_ID_SCHEMA;
 use pdm_api_types::{
-    Authid, ConfigurationState, RemoteUpid, CIDR_FORMAT, NODE_SCHEMA, PRIV_RESOURCE_AUDIT,
-    PRIV_RESOURCE_MANAGE, PRIV_RESOURCE_MIGRATE, SNAPSHOT_NAME_SCHEMA, VMID_SCHEMA,
+    Authid, ConfigurationState, RemoteUpid, NODE_SCHEMA, PRIV_RESOURCE_AUDIT, PRIV_RESOURCE_MANAGE,
+    PRIV_RESOURCE_MIGRATE, SNAPSHOT_NAME_SCHEMA, VMID_SCHEMA,
 };
 
-use pve_api_types::{QemuMigratePreconditions, StartQemuMigrationType};
+use pve_api_types::QemuMigratePreconditions;
 
 use crate::api::pve::get_remote;
 
@@ -297,37 +297,9 @@ pub async fn qemu_shutdown(
             },
             target: { schema: NODE_SCHEMA },
             vmid: { schema: VMID_SCHEMA },
-            online: {
-                type: bool,
-                description: "Perform an online migration if the vm is running.",
-                optional: true,
-            },
-            "target-storage": {
-                description: "Mapping of source storages to target storages.",
-                optional: true,
-            },
-            bwlimit: {
-                description: "Override I/O bandwidth limit (in KiB/s).",
-                optional: true,
-            },
-            "migration-network": {
-                description: "CIDR of the (sub) network that is used for migration.",
-                type: String,
-                format: &CIDR_FORMAT,
-                optional: true,
-            },
-            "migration-type": {
-                type: StartQemuMigrationType,
-                optional: true,
-            },
-            force: {
-                description: "Allow to migrate VMs with local devices.",
-                optional: true,
-                default: false,
-            },
-            "with-local-disks": {
-                description: "Enable live storage migration for local disks.",
-                optional: true,
+            migrate: {
+                type: pve_api_types::MigrateQemu,
+                flatten: true,
             },
         },
     },
@@ -344,38 +316,23 @@ pub async fn qemu_migrate(
     remote: String,
     node: Option<String>,
     vmid: u32,
-    bwlimit: Option<u64>,
-    force: Option<bool>,
-    migration_network: Option<String>,
-    migration_type: Option<StartQemuMigrationType>,
-    online: Option<bool>,
-    target: String,
-    target_storage: Option<String>,
-    with_local_disks: Option<bool>,
+    migrate: pve_api_types::MigrateQemu,
 ) -> Result<RemoteUpid, Error> {
-    log::info!("in-cluster migration requested for remote {remote:?} vm {vmid} to node {target:?}");
+    log::info!(
+        "in-cluster migration requested for remote {remote:?} vm {vmid} to node {:?}",
+        migrate.target
+    );
 
     let (remotes, _) = pdm_config::remotes::config()?;
     let pve = connect_to_remote(&remotes, &remote)?;
 
     let node = find_node_for_vm(node, vmid, pve.as_ref()).await?;
 
-    if node == target {
+    if node == migrate.target {
         bail!("refusing migration to the same node");
     }
 
-    let params = pve_api_types::MigrateQemu {
-        bwlimit,
-        force,
-        migration_network,
-        migration_type,
-        online,
-        target,
-        targetstorage: target_storage,
-        with_local_disks,
-        with_conntrack_state: None,
-    };
-    let upid = pve.migrate_qemu(&node, vmid, params).await?;
+    let upid = pve.migrate_qemu(&node, vmid, migrate).await?;
 
     new_remote_upid(remote, upid).await
 }
@@ -431,32 +388,9 @@ async fn qemu_migrate_preconditions(
                 optional: true,
                 schema: VMID_SCHEMA,
             },
-            delete: {
-                description: "Delete the original VM and related data after successful migration.",
-                optional: true,
-                default: false,
-            },
-            online: {
-                type: bool,
-                description: "Perform an online migration if the vm is running.",
-                optional: true,
-                default: false,
-            },
-            "target-storage": {
-                description: "Mapping of source storages to target storages.",
-            },
-            "target-bridge": {
-                description: "Mapping of source bridges to remote bridges.",
-            },
-            bwlimit: {
-                description: "Override I/O bandwidth limit (in KiB/s).",
-                optional: true,
-            },
-            // TODO better to change remote migration to proxy to node?
-            "target-endpoint": {
-                type: String,
-                optional: true,
-                description: "The target endpoint to use for the connection.",
+            remote_migrate: {
+                type: pve_api_types::RemoteMigrateQemu,
+                flatten: true,
             },
         },
     },
@@ -474,13 +408,7 @@ pub async fn qemu_remote_migrate(
     target: String, // this is the destination remote name
     node: Option<String>,
     vmid: u32,
-    target_vmid: Option<u32>,
-    delete: bool,
-    online: bool,
-    target_storage: String,
-    target_bridge: String,
-    bwlimit: Option<u64>,
-    target_endpoint: Option<String>,
+    remote_migrate: pve_api_types::RemoteMigrateQemu,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<RemoteUpid, Error> {
     let user_info = CachedUserInfo::new()?;
@@ -494,7 +422,7 @@ pub async fn qemu_remote_migrate(
             "resource",
             &target,
             "guest",
-            &target_vmid.unwrap_or(vmid).to_string(),
+            &remote_migrate.target_vmid.unwrap_or(vmid).to_string(),
         ],
     );
     if target_privs & PRIV_RESOURCE_MIGRATE == 0 {
@@ -504,7 +432,7 @@ pub async fn qemu_remote_migrate(
         );
     }
 
-    if delete {
+    if remote_migrate.delete.unwrap_or_default() {
         check_guest_delete_perms(rpcenv, &remote, vmid)?;
     }
 
@@ -528,11 +456,13 @@ pub async fn qemu_remote_migrate(
     let target_node = target
         .nodes
         .iter()
-        .find(|endpoint| match target_endpoint.as_deref() {
-            Some(target) => target == endpoint.hostname,
-            None => true,
-        })
-        .ok_or_else(|| match target_endpoint {
+        .find(
+            |endpoint| match Some(remote_migrate.target_endpoint.clone()).as_deref() {
+                Some(target) => target == endpoint.hostname,
+                None => true,
+            },
+        )
+        .ok_or_else(|| match Some(remote_migrate.target_endpoint.clone()) {
             Some(endpoint) => format_err!("{endpoint} not configured for target cluster"),
             None => format_err!("no nodes configured for target cluster"),
         })?;
@@ -552,17 +482,10 @@ pub async fn qemu_remote_migrate(
     }
 
     log::info!("forwarding remote migration requested");
-    let params = pve_api_types::RemoteMigrateQemu {
-        target_bridge,
-        target_storage,
-        delete: Some(delete),
-        online: Some(online),
-        target_vmid,
-        target_endpoint,
-        bwlimit,
-    };
     log::info!("migrating vm {vmid} of node {node:?}");
-    let upid = source_conn.remote_migrate_qemu(&node, vmid, params).await?;
+    let upid = source_conn
+        .remote_migrate_qemu(&node, vmid, remote_migrate)
+        .await?;
 
     new_remote_upid(source, upid).await
 }
-- 
2.47.3



_______________________________________________
pdm-devel mailing list
pdm-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pdm-devel


      parent reply	other threads:[~2025-10-21 13:50 UTC|newest]

Thread overview: 5+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-10-21 13:50 [pdm-devel] [PATCH proxmox{, -datacenter-manager} 0/4] generate Vec's for string-lists Hannes Laimer
2025-10-21 13:50 ` [pdm-devel] [PATCH proxmox 1/3] pve-api-types: schema2rust: generate arrays for types with format `-list` Hannes Laimer
2025-10-21 13:50 ` [pdm-devel] [PATCH proxmox 2/3] pve-api-types: add regex for both storage- and bridge-pair Hannes Laimer
2025-10-21 13:50 ` [pdm-devel] [PATCH proxmox 3/3] pve-api-types: regenerate Hannes Laimer
2025-10-21 13:50 ` Hannes Laimer [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20251021135018.88877-5-h.laimer@proxmox.com \
    --to=h.laimer@proxmox.com \
    --cc=pdm-devel@lists.proxmox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox
Service provided by Proxmox Server Solutions GmbH | Privacy | Legal