From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [IPv6:2a01:7e0:0:424::9]) by lore.proxmox.com (Postfix) with ESMTPS id 862441FF173 for ; Mon, 13 Jan 2025 16:46:19 +0100 (CET) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id EF9A74F0C; Mon, 13 Jan 2025 16:46:02 +0100 (CET) From: Dominik Csapak To: pdm-devel@lists.proxmox.com Date: Mon, 13 Jan 2025 16:45:42 +0100 Message-Id: <20250113154550.3462139-4-d.csapak@proxmox.com> X-Mailer: git-send-email 2.39.5 In-Reply-To: <20250113154550.3462139-1-d.csapak@proxmox.com> References: <20250113154550.3462139-1-d.csapak@proxmox.com> MIME-Version: 1.0 X-SPAM-LEVEL: Spam detection results: 0 AWL -0.134 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment POISEN_SPAM_PILL 0.1 Meta: its spam POISEN_SPAM_PILL_1 0.1 random spam to be learned in bayes POISEN_SPAM_PILL_3 0.1 random spam to be learned in bayes SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record Subject: [pdm-devel] [PATCH datacenter-manager 1/9] server: factor qemu/lxc code into own modules X-BeenThere: pdm-devel@lists.proxmox.com X-Mailman-Version: 2.1.29 Precedence: list List-Id: Proxmox Datacenter Manager development discussion List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: Proxmox Datacenter Manager development discussion Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Errors-To: pdm-devel-bounces@lists.proxmox.com Sender: "pdm-devel" so the modules don't get overly big Signed-off-by: Dominik Csapak --- server/src/api/pve/lxc.rs | 507 ++++++++++++++++++ server/src/api/pve/mod.rs | 1029 +----------------------------------- server/src/api/pve/qemu.rs | 552 +++++++++++++++++++ 3 files changed, 1066 insertions(+), 1022 deletions(-) create mode 100644 server/src/api/pve/lxc.rs create mode 100644 server/src/api/pve/qemu.rs diff --git a/server/src/api/pve/lxc.rs b/server/src/api/pve/lxc.rs new file mode 100644 index 0000000..b16d268 --- /dev/null +++ b/server/src/api/pve/lxc.rs @@ -0,0 +1,507 @@ +use anyhow::{bail, format_err, Error}; +use http::uri::Authority; + +use proxmox_access_control::CachedUserInfo; +use proxmox_router::{ + http_bail, list_subdirs_api_method, Permission, Router, RpcEnvironment, SubdirMap, +}; +use proxmox_schema::api; +use proxmox_sortable_macro::sortable; + +use pdm_api_types::remotes::REMOTE_ID_SCHEMA; +use pdm_api_types::{ + Authid, ConfigurationState, RemoteUpid, NODE_SCHEMA, PRIV_RESOURCE_AUDIT, PRIV_RESOURCE_MANAGE, + PRIV_RESOURCE_MIGRATE, SNAPSHOT_NAME_SCHEMA, VMID_SCHEMA, +}; + +use crate::api::pve::get_remote; + +use super::{ + check_guest_delete_perms, check_guest_list_permissions, check_guest_permissions, + connect_to_remote, new_remote_upid, +}; + +use super::find_node_for_vm; + +pub const ROUTER: Router = Router::new() + .get(&API_METHOD_LIST_LXC) + .match_all("vmid", &LXC_VM_ROUTER); + +const LXC_VM_ROUTER: Router = Router::new() + .get(&list_subdirs_api_method!(LXC_VM_SUBDIRS)) + .subdirs(LXC_VM_SUBDIRS); +#[sortable] +const LXC_VM_SUBDIRS: SubdirMap = &sorted!([ + ("config", &Router::new().get(&API_METHOD_LXC_GET_CONFIG)), + ("rrddata", &super::rrddata::LXC_RRD_ROUTER), + ("start", &Router::new().post(&API_METHOD_LXC_START)), + ("status", &Router::new().get(&API_METHOD_LXC_GET_STATUS)), + ("stop", &Router::new().post(&API_METHOD_LXC_STOP)), + ("shutdown", &Router::new().post(&API_METHOD_LXC_SHUTDOWN)), + ("migrate", &Router::new().post(&API_METHOD_LXC_MIGRATE)), + ( + "remote-migrate", + &Router::new().post(&API_METHOD_LXC_REMOTE_MIGRATE) + ), +]); + +#[api( + input: { + properties: { + remote: { schema: REMOTE_ID_SCHEMA }, + node: { + schema: NODE_SCHEMA, + optional: true, + }, + }, + }, + returns: { + type: Array, + description: "Get a list of containers.", + items: { type: pve_api_types::VmEntry }, + }, + access: { + permission: &Permission::Privilege(&["resource", "{remote}"], PRIV_RESOURCE_AUDIT, false), + }, +)] +/// Query the remote's list of lxc containers. If no node is provided, the all nodes are queried. +pub async fn list_lxc( + remote: String, + node: Option, + rpcenv: &mut dyn RpcEnvironment, +) -> Result, Error> { + // FIXME: top_level_allowed is always true because of schema check above, replace with Anybody + // and fine-grained checks once those are implemented for all API calls.. + let (auth_id, user_info, top_level_allowed) = check_guest_list_permissions(&remote, rpcenv)?; + + let (remotes, _) = pdm_config::remotes::config()?; + + let pve = connect_to_remote(&remotes, &remote)?; + + let list = if let Some(node) = node { + pve.list_lxc(&node).await? + } else { + let mut list = Vec::new(); + for node in pve.list_nodes().await? { + list.extend(pve.list_lxc(&node.node).await?); + } + list + }; + + if top_level_allowed { + return Ok(list); + } + + Ok(list + .into_iter() + .filter(|entry| { + check_guest_permissions( + &auth_id, + &user_info, + &remote, + PRIV_RESOURCE_AUDIT, + entry.vmid, + ) + }) + .collect()) +} + +#[api( + input: { + properties: { + remote: { schema: REMOTE_ID_SCHEMA }, + node: { + schema: NODE_SCHEMA, + optional: true, + }, + vmid: { schema: VMID_SCHEMA }, + state: { type: ConfigurationState }, + snapshot: { + schema: SNAPSHOT_NAME_SCHEMA, + optional: true, + }, + }, + }, + returns: { type: pve_api_types::LxcConfig }, + access: { + permission: &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_AUDIT, false), + }, +)] +/// Get the configuration of an lxc container from a remote. If a node is provided, the container +/// must be on that node, otherwise the node is determined automatically. +pub async fn lxc_get_config( + remote: String, + node: Option, + vmid: u32, + state: ConfigurationState, + snapshot: Option, +) -> Result { + let (remotes, _) = pdm_config::remotes::config()?; + + let pve = connect_to_remote(&remotes, &remote)?; + + let node = find_node_for_vm(node, vmid, pve.as_ref()).await?; + + Ok(pve + .lxc_get_config(&node, vmid, state.current(), snapshot) + .await?) +} + +#[api( + input: { + properties: { + remote: { schema: REMOTE_ID_SCHEMA }, + node: { + schema: NODE_SCHEMA, + optional: true, + }, + vmid: { schema: VMID_SCHEMA }, + }, + }, + returns: { type: pve_api_types::QemuStatus }, + access: { + permission: &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_AUDIT, false), + }, +)] +/// Get the status of an LXC guest from a remote. If a node is provided, the guest must be on that +/// node, otherwise the node is determined automatically. +pub async fn lxc_get_status( + remote: String, + node: Option, + vmid: u32, +) -> Result { + let (remotes, _) = pdm_config::remotes::config()?; + + let pve = connect_to_remote(&remotes, &remote)?; + + let node = find_node_for_vm(node, vmid, pve.as_ref()).await?; + + Ok(pve.lxc_get_status(&node, vmid).await?) +} + +#[api( + input: { + properties: { + remote: { schema: REMOTE_ID_SCHEMA }, + node: { + schema: NODE_SCHEMA, + optional: true, + }, + vmid: { schema: VMID_SCHEMA }, + }, + }, + returns: { type: RemoteUpid }, + access: { + permission: &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_MANAGE, false), + }, +)] +/// Start a remote lxc container. +pub async fn lxc_start( + remote: String, + node: Option, + vmid: u32, +) -> Result { + let (remotes, _) = pdm_config::remotes::config()?; + + let pve = connect_to_remote(&remotes, &remote)?; + + let node = find_node_for_vm(node, vmid, pve.as_ref()).await?; + + let upid = pve.start_lxc_async(&node, vmid, Default::default()).await?; + + new_remote_upid(remote, upid) +} + +#[api( + input: { + properties: { + remote: { schema: REMOTE_ID_SCHEMA }, + node: { + schema: NODE_SCHEMA, + optional: true, + }, + vmid: { schema: VMID_SCHEMA }, + }, + }, + returns: { type: RemoteUpid }, + access: { + permission: &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_MANAGE, false), + }, +)] +/// Stop a remote lxc container. +pub async fn lxc_stop( + remote: String, + node: Option, + vmid: u32, +) -> Result { + let (remotes, _) = pdm_config::remotes::config()?; + + let pve = connect_to_remote(&remotes, &remote)?; + + let node = find_node_for_vm(node, vmid, pve.as_ref()).await?; + + let upid = pve.stop_lxc_async(&node, vmid, Default::default()).await?; + + new_remote_upid(remote, upid) +} + +#[api( + input: { + properties: { + remote: { schema: REMOTE_ID_SCHEMA }, + node: { + schema: NODE_SCHEMA, + optional: true, + }, + vmid: { schema: VMID_SCHEMA }, + }, + }, + returns: { type: RemoteUpid }, + access: { + permission: &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_MANAGE, false), + }, +)] +/// Perform a shutdown of a remote lxc container. +pub async fn lxc_shutdown( + remote: String, + node: Option, + vmid: u32, +) -> Result { + let (remotes, _) = pdm_config::remotes::config()?; + + let pve = connect_to_remote(&remotes, &remote)?; + + let node = find_node_for_vm(node, vmid, pve.as_ref()).await?; + + let upid = pve + .shutdown_lxc_async(&node, vmid, Default::default()) + .await?; + + new_remote_upid(remote, upid) +} + +#[api( + input: { + properties: { + remote: { schema: REMOTE_ID_SCHEMA }, + node: { + schema: NODE_SCHEMA, + optional: true, + }, + target: { schema: NODE_SCHEMA }, + vmid: { schema: VMID_SCHEMA }, + online: { + type: bool, + description: "Attempt an online migration if the container is running.", + optional: true, + }, + restart: { + type: bool, + description: "Perform a restart-migration if the container is running.", + optional: true, + }, + "target-storage": { + description: "Mapping of source storages to target storages.", + optional: true, + }, + bwlimit: { + description: "Override I/O bandwidth limit (in KiB/s).", + optional: true, + }, + timeout: { + description: "Shutdown timeout for restart-migrations.", + optional: true, + }, + }, + }, + returns: { type: RemoteUpid }, + access: { + permission: &Permission::And(&[ + &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_MIGRATE, false), + ]), + }, +)] +/// Perform an in-cluster migration of a VM. +#[allow(clippy::too_many_arguments)] +pub async fn lxc_migrate( + remote: String, + node: Option, + vmid: u32, + bwlimit: Option, + restart: Option, + online: Option, + target: String, + target_storage: Option, + timeout: Option, +) -> Result { + let bwlimit = bwlimit.map(|n| n as f64); + + log::info!("in-cluster migration requested for remote {remote:?} ct {vmid} to node {target:?}"); + + let (remotes, _) = pdm_config::remotes::config()?; + let pve = connect_to_remote(&remotes, &remote)?; + + let node = find_node_for_vm(node, vmid, pve.as_ref()).await?; + + if node == target { + bail!("refusing migration to the same node"); + } + + let params = pve_api_types::MigrateLxc { + bwlimit, + online, + restart, + target, + target_storage, + timeout, + }; + let upid = pve.migrate_lxc(&node, vmid, params).await?; + + new_remote_upid(remote, upid) +} + +#[api( + input: { + properties: { + remote: { schema: REMOTE_ID_SCHEMA }, + target: { schema: REMOTE_ID_SCHEMA }, + node: { + schema: NODE_SCHEMA, + optional: true, + }, + vmid: { schema: VMID_SCHEMA }, + "target-vmid": { + optional: true, + schema: VMID_SCHEMA, + }, + delete: { + description: "Delete the original VM and related data after successful migration.", + optional: true, + default: false, + }, + online: { + type: bool, + description: "Perform an online migration if the vm is running.", + optional: true, + default: false, + }, + "target-storage": { + description: "Mapping of source storages to target storages.", + }, + "target-bridge": { + description: "Mapping of source bridges to remote bridges.", + }, + bwlimit: { + description: "Override I/O bandwidth limit (in KiB/s).", + optional: true, + }, + restart: { + description: "Perform a restart-migration.", + optional: true, + }, + timeout: { + description: "Add a shutdown timeout for the restart-migration.", + optional: true, + }, + }, + }, + returns: { type: RemoteUpid }, + access: { + permission: + &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_MIGRATE, false), + description: "requires PRIV_RESOURCE_MIGRATE on /resource/{remote}/guest/{vmid} for source and target remove and vmid", + }, +)] +/// Perform a remote migration of an lxc container. +#[allow(clippy::too_many_arguments)] +pub async fn lxc_remote_migrate( + remote: String, // this is the source + target: String, // this is the destination remote name + node: Option, + vmid: u32, + target_vmid: Option, + delete: bool, + online: bool, + target_storage: String, + target_bridge: String, + bwlimit: Option, + restart: Option, + timeout: Option, + rpcenv: &mut dyn RpcEnvironment, +) -> Result { + let user_info = CachedUserInfo::new()?; + let auth_id: Authid = rpcenv + .get_auth_id() + .ok_or_else(|| format_err!("no authid available"))? + .parse()?; + let target_privs = user_info.lookup_privs( + &auth_id, + &[ + "resource", + &target, + "guest", + &target_vmid.unwrap_or(vmid).to_string(), + ], + ); + if target_privs & PRIV_RESOURCE_MIGRATE == 0 { + http_bail!( + UNAUTHORIZED, + "missing PRIV_RESOURCE_MIGRATE on target remote+vmid" + ); + } + if delete { + check_guest_delete_perms(rpcenv, &remote, vmid)?; + } + + let source = remote; // let's stick to "source" and "target" naming + + log::info!("remote migration requested"); + + if source == target { + bail!("source and destination clusters must be different"); + } + + let (remotes, _) = pdm_config::remotes::config()?; + let target = get_remote(&remotes, &target)?; + let source_conn = connect_to_remote(&remotes, &source)?; + + let node = find_node_for_vm(node, vmid, source_conn.as_ref()).await?; + + // FIXME: For now we'll only try with the first node but we should probably try others, too, in + // case some are offline? + + let target_node = target + .nodes + .first() + .ok_or_else(|| format_err!("no nodes configured for target cluster"))?; + let target_host_port: Authority = target_node.hostname.parse()?; + let mut target_endpoint = format!( + "host={host},port={port},apitoken=PVEAPIToken={authid}={secret}", + host = target_host_port.host(), + authid = target.authid, + secret = target.token, + port = target_host_port.port_u16().unwrap_or(8006), + ); + if let Some(fp) = target_node.fingerprint.as_deref() { + target_endpoint.reserve(fp.len() + ",fingerprint=".len()); + target_endpoint.push_str(",fingerprint="); + target_endpoint.push_str(fp); + } + + log::info!("forwarding remote migration requested"); + let params = pve_api_types::RemoteMigrateLxc { + target_bridge, + target_storage, + delete: Some(delete), + online: Some(online), + target_vmid, + target_endpoint, + bwlimit: bwlimit.map(|limit| limit as f64), + restart, + timeout, + }; + log::info!("migrating vm {vmid} of node {node:?}"); + let upid = source_conn.remote_migrate_lxc(&node, vmid, params).await?; + + new_remote_upid(source, upid) +} diff --git a/server/src/api/pve/mod.rs b/server/src/api/pve/mod.rs index ae44722..48e16b2 100644 --- a/server/src/api/pve/mod.rs +++ b/server/src/api/pve/mod.rs @@ -3,7 +3,6 @@ use std::sync::Arc; use anyhow::{bail, format_err, Error}; -use http::uri::Authority; use proxmox_access_control::CachedUserInfo; use proxmox_router::{ @@ -17,22 +16,20 @@ use proxmox_sortable_macro::sortable; use pdm_api_types::remotes::{NodeUrl, Remote, RemoteType, REMOTE_ID_SCHEMA}; use pdm_api_types::resource::PveResource; use pdm_api_types::{ - Authid, ConfigurationState, RemoteUpid, CIDR_FORMAT, HOST_OPTIONAL_PORT_FORMAT, NODE_SCHEMA, - PRIV_RESOURCE_AUDIT, PRIV_RESOURCE_DELETE, PRIV_RESOURCE_MANAGE, PRIV_RESOURCE_MIGRATE, - PRIV_SYS_MODIFY, SNAPSHOT_NAME_SCHEMA, VMID_SCHEMA, + Authid, RemoteUpid, HOST_OPTIONAL_PORT_FORMAT, PRIV_RESOURCE_AUDIT, PRIV_RESOURCE_DELETE, + PRIV_SYS_MODIFY, }; use pve_api_types::client::PveClient; -use pve_api_types::{ - ClusterResourceKind, ClusterResourceType, ListRealm, PveUpid, QemuMigratePreconditions, - StartQemuMigrationType, -}; +use pve_api_types::{ClusterResourceKind, ClusterResourceType, ListRealm, PveUpid}; use super::resources::{map_pve_lxc, map_pve_node, map_pve_qemu, map_pve_storage}; use crate::{connection, task_cache}; +mod lxc; mod node; +mod qemu; mod rrddata; pub mod tasks; @@ -58,66 +55,17 @@ const MAIN_ROUTER: Router = Router::new() #[sortable] const REMOTE_SUBDIRS: SubdirMap = &sorted!([ - ("lxc", &LXC_ROUTER), + ("lxc", &lxc::ROUTER), ("nodes", &NODES_ROUTER), - ("qemu", &QEMU_ROUTER), + ("qemu", &qemu::ROUTER), ("resources", &RESOURCES_ROUTER), ("tasks", &tasks::ROUTER), ]); -const LXC_ROUTER: Router = Router::new() - .get(&API_METHOD_LIST_LXC) - .match_all("vmid", &LXC_VM_ROUTER); - -const LXC_VM_ROUTER: Router = Router::new() - .get(&list_subdirs_api_method!(LXC_VM_SUBDIRS)) - .subdirs(LXC_VM_SUBDIRS); -#[sortable] -const LXC_VM_SUBDIRS: SubdirMap = &sorted!([ - ("config", &Router::new().get(&API_METHOD_LXC_GET_CONFIG)), - ("rrddata", &rrddata::LXC_RRD_ROUTER), - ("start", &Router::new().post(&API_METHOD_LXC_START)), - ("status", &Router::new().get(&API_METHOD_LXC_GET_STATUS)), - ("stop", &Router::new().post(&API_METHOD_LXC_STOP)), - ("shutdown", &Router::new().post(&API_METHOD_LXC_SHUTDOWN)), - ("migrate", &Router::new().post(&API_METHOD_LXC_MIGRATE)), - ( - "remote-migrate", - &Router::new().post(&API_METHOD_LXC_REMOTE_MIGRATE) - ), -]); - const NODES_ROUTER: Router = Router::new() .get(&API_METHOD_LIST_NODES) .match_all("node", &node::ROUTER); -const QEMU_ROUTER: Router = Router::new() - .get(&API_METHOD_LIST_QEMU) - .match_all("vmid", &QEMU_VM_ROUTER); - -const QEMU_VM_ROUTER: Router = Router::new() - .get(&list_subdirs_api_method!(QEMU_VM_SUBDIRS)) - .subdirs(QEMU_VM_SUBDIRS); -#[sortable] -const QEMU_VM_SUBDIRS: SubdirMap = &sorted!([ - ("config", &Router::new().get(&API_METHOD_QEMU_GET_CONFIG)), - ("rrddata", &rrddata::QEMU_RRD_ROUTER), - ("start", &Router::new().post(&API_METHOD_QEMU_START)), - ("status", &Router::new().get(&API_METHOD_QEMU_GET_STATUS)), - ("stop", &Router::new().post(&API_METHOD_QEMU_STOP)), - ("shutdown", &Router::new().post(&API_METHOD_QEMU_SHUTDOWN)), - ( - "migrate", - &Router::new() - .get(&API_METHOD_QEMU_MIGRATE_PRECONDITIONS) - .post(&API_METHOD_QEMU_MIGRATE) - ), - ( - "remote-migrate", - &Router::new().post(&API_METHOD_QEMU_REMOTE_MIGRATE) - ), -]); - const RESOURCES_ROUTER: Router = Router::new().get(&API_METHOD_CLUSTER_RESOURCES); // converts a remote + PveUpid into a RemoteUpid and starts tracking it @@ -274,128 +222,6 @@ fn check_guest_permissions( auth_privs & privilege != 0 } -#[api( - input: { - properties: { - remote: { schema: REMOTE_ID_SCHEMA }, - node: { - schema: NODE_SCHEMA, - optional: true, - }, - }, - }, - returns: { - type: Array, - description: "Get a list of VMs", - items: { type: pve_api_types::VmEntry }, - }, - access: { - permission: &Permission::Privilege(&["resource", "{remote}"], PRIV_RESOURCE_AUDIT, false), - }, -)] -/// Query the remote's list of qemu VMs. If no node is provided, the all nodes are queried. -pub async fn list_qemu( - remote: String, - node: Option, - rpcenv: &mut dyn RpcEnvironment, -) -> Result, Error> { - // FIXME: top_level_allowed is always true because of schema check above, replace with Anybody - // and fine-grained checks once those are implemented for all API calls.. - let (auth_id, user_info, top_level_allowed) = check_guest_list_permissions(&remote, rpcenv)?; - - let (remotes, _) = pdm_config::remotes::config()?; - - let pve = connect_to_remote(&remotes, &remote)?; - - let list = if let Some(node) = node { - pve.list_qemu(&node, None).await? - } else { - let mut list = Vec::new(); - for node in pve.list_nodes().await? { - list.extend(pve.list_qemu(&node.node, None).await?); - } - list - }; - - if top_level_allowed { - return Ok(list); - } - - Ok(list - .into_iter() - .filter(|entry| { - check_guest_permissions( - &auth_id, - &user_info, - &remote, - PRIV_RESOURCE_AUDIT, - entry.vmid, - ) - }) - .collect()) -} - -#[api( - input: { - properties: { - remote: { schema: REMOTE_ID_SCHEMA }, - node: { - schema: NODE_SCHEMA, - optional: true, - }, - }, - }, - returns: { - type: Array, - description: "Get a list of containers.", - items: { type: pve_api_types::VmEntry }, - }, - access: { - permission: &Permission::Privilege(&["resource", "{remote}"], PRIV_RESOURCE_AUDIT, false), - }, -)] -/// Query the remote's list of lxc containers. If no node is provided, the all nodes are queried. -pub async fn list_lxc( - remote: String, - node: Option, - rpcenv: &mut dyn RpcEnvironment, -) -> Result, Error> { - // FIXME: top_level_allowed is always true because of schema check above, replace with Anybody - // and fine-grained checks once those are implemented for all API calls.. - let (auth_id, user_info, top_level_allowed) = check_guest_list_permissions(&remote, rpcenv)?; - - let (remotes, _) = pdm_config::remotes::config()?; - - let pve = connect_to_remote(&remotes, &remote)?; - - let list = if let Some(node) = node { - pve.list_lxc(&node).await? - } else { - let mut list = Vec::new(); - for node in pve.list_nodes().await? { - list.extend(pve.list_lxc(&node.node).await?); - } - list - }; - - if top_level_allowed { - return Ok(list); - } - - Ok(list - .into_iter() - .filter(|entry| { - check_guest_permissions( - &auth_id, - &user_info, - &remote, - PRIV_RESOURCE_AUDIT, - entry.vmid, - ) - }) - .collect()) -} - async fn find_node_for_vm( node: Option, vmid: u32, @@ -414,183 +240,6 @@ async fn find_node_for_vm( }) } -#[api( - input: { - properties: { - remote: { schema: REMOTE_ID_SCHEMA }, - node: { - schema: NODE_SCHEMA, - optional: true, - }, - vmid: { schema: VMID_SCHEMA }, - state: { type: ConfigurationState }, - snapshot: { - schema: SNAPSHOT_NAME_SCHEMA, - optional: true, - }, - }, - }, - returns: { type: pve_api_types::QemuConfig }, - access: { - permission: &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_AUDIT, false), - }, -)] -/// Get the configuration of a qemu VM from a remote. If a node is provided, the VM must be on that -/// node, otherwise the node is determined automatically. -pub async fn qemu_get_config( - remote: String, - node: Option, - vmid: u32, - state: ConfigurationState, - snapshot: Option, -) -> Result { - let (remotes, _) = pdm_config::remotes::config()?; - - let pve = connect_to_remote(&remotes, &remote)?; - - let node = find_node_for_vm(node, vmid, pve.as_ref()).await?; - - Ok(pve - .qemu_get_config(&node, vmid, state.current(), snapshot) - .await?) -} - -#[api( - input: { - properties: { - remote: { schema: REMOTE_ID_SCHEMA }, - node: { - schema: NODE_SCHEMA, - optional: true, - }, - vmid: { schema: VMID_SCHEMA }, - }, - }, - returns: { type: pve_api_types::QemuStatus }, - access: { - permission: &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_AUDIT, false), - }, -)] -/// Get the status of a qemu VM from a remote. If a node is provided, the VM must be on that -/// node, otherwise the node is determined automatically. -pub async fn qemu_get_status( - remote: String, - node: Option, - vmid: u32, -) -> Result { - let (remotes, _) = pdm_config::remotes::config()?; - - let pve = connect_to_remote(&remotes, &remote)?; - - let node = find_node_for_vm(node, vmid, pve.as_ref()).await?; - - Ok(pve.qemu_get_status(&node, vmid).await?) -} - -#[api( - input: { - properties: { - remote: { schema: REMOTE_ID_SCHEMA }, - node: { - schema: NODE_SCHEMA, - optional: true, - }, - vmid: { schema: VMID_SCHEMA }, - }, - }, - returns: { type: RemoteUpid }, - access: { - permission: &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_MANAGE, false), - }, -)] -/// Start a remote qemu vm. -pub async fn qemu_start( - remote: String, - node: Option, - vmid: u32, -) -> Result { - let (remotes, _) = pdm_config::remotes::config()?; - - let pve = connect_to_remote(&remotes, &remote)?; - - let node = find_node_for_vm(node, vmid, pve.as_ref()).await?; - - let upid = pve - .start_qemu_async(&node, vmid, Default::default()) - .await?; - - new_remote_upid(remote, upid) -} - -#[api( - input: { - properties: { - remote: { schema: REMOTE_ID_SCHEMA }, - node: { - schema: NODE_SCHEMA, - optional: true, - }, - vmid: { schema: VMID_SCHEMA }, - }, - }, - returns: { type: RemoteUpid }, - access: { - permission: &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_MANAGE, false), - }, -)] -/// Stop a remote qemu vm. -pub async fn qemu_stop( - remote: String, - node: Option, - vmid: u32, -) -> Result { - let (remotes, _) = pdm_config::remotes::config()?; - - let pve = connect_to_remote(&remotes, &remote)?; - - let node = find_node_for_vm(node, vmid, pve.as_ref()).await?; - - let upid = pve.stop_qemu_async(&node, vmid, Default::default()).await?; - - new_remote_upid(remote, upid) -} - -#[api( - input: { - properties: { - remote: { schema: REMOTE_ID_SCHEMA }, - node: { - schema: NODE_SCHEMA, - optional: true, - }, - vmid: { schema: VMID_SCHEMA }, - }, - }, - returns: { type: RemoteUpid }, - access: { - permission: &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_MANAGE, false), - }, -)] -/// Perform a shutdown of a remote qemu vm. -pub async fn qemu_shutdown( - remote: String, - node: Option, - vmid: u32, -) -> Result { - let (remotes, _) = pdm_config::remotes::config()?; - - let pve = connect_to_remote(&remotes, &remote)?; - - let node = find_node_for_vm(node, vmid, pve.as_ref()).await?; - - let upid = pve - .shutdown_qemu_async(&node, vmid, Default::default()) - .await?; - - //(remote, upid.to_string()).try_into() - new_remote_upid(remote, upid) -} - fn check_guest_delete_perms( rpcenv: &mut dyn RpcEnvironment, remote: &str, @@ -609,670 +258,6 @@ fn check_guest_delete_perms( ) } -#[api( - input: { - properties: { - remote: { schema: REMOTE_ID_SCHEMA }, - node: { - schema: NODE_SCHEMA, - optional: true, - }, - target: { schema: NODE_SCHEMA }, - vmid: { schema: VMID_SCHEMA }, - online: { - type: bool, - description: "Perform an online migration if the vm is running.", - optional: true, - }, - "target-storage": { - description: "Mapping of source storages to target storages.", - optional: true, - }, - bwlimit: { - description: "Override I/O bandwidth limit (in KiB/s).", - optional: true, - }, - "migration-network": { - description: "CIDR of the (sub) network that is used for migration.", - type: String, - format: &CIDR_FORMAT, - optional: true, - }, - "migration-type": { - type: StartQemuMigrationType, - optional: true, - }, - force: { - description: "Allow to migrate VMs with local devices.", - optional: true, - default: false, - }, - "with-local-disks": { - description: "Enable live storage migration for local disks.", - optional: true, - }, - }, - }, - returns: { type: RemoteUpid }, - access: { - permission: &Permission::And(&[ - &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_MIGRATE, false), - ]), - }, -)] -/// Perform an in-cluster migration of a VM. -#[allow(clippy::too_many_arguments)] -pub async fn qemu_migrate( - remote: String, - node: Option, - vmid: u32, - bwlimit: Option, - force: Option, - migration_network: Option, - migration_type: Option, - online: Option, - target: String, - target_storage: Option, - with_local_disks: Option, -) -> Result { - log::info!("in-cluster migration requested for remote {remote:?} vm {vmid} to node {target:?}"); - - let (remotes, _) = pdm_config::remotes::config()?; - let pve = connect_to_remote(&remotes, &remote)?; - - let node = find_node_for_vm(node, vmid, pve.as_ref()).await?; - - if node == target { - bail!("refusing migration to the same node"); - } - - let params = pve_api_types::MigrateQemu { - bwlimit, - force, - migration_network, - migration_type, - online, - target, - targetstorage: target_storage, - with_local_disks, - }; - let upid = pve.migrate_qemu(&node, vmid, params).await?; - //(remote, upid.to_string()).try_into() - new_remote_upid(remote, upid) -} - -#[api( - input: { - properties: { - remote: { schema: REMOTE_ID_SCHEMA }, - node: { - schema: NODE_SCHEMA, - optional: true, - }, - target: { - schema: NODE_SCHEMA, - optional: true, - }, - vmid: { schema: VMID_SCHEMA }, - } - }, - access: { - permission: &Permission::And(&[ - &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_MIGRATE, false), - ]), - }, -)] -/// Qemu (local) migrate preconditions -async fn qemu_migrate_preconditions( - remote: String, - node: Option, - target: Option, - vmid: u32, -) -> Result { - let (remotes, _) = pdm_config::remotes::config()?; - let pve = connect_to_remote(&remotes, &remote)?; - - let node = find_node_for_vm(node, vmid, pve.as_ref()).await?; - - let res = pve.qemu_migrate_preconditions(&node, vmid, target).await?; - Ok(res) -} - -#[api( - input: { - properties: { - remote: { schema: REMOTE_ID_SCHEMA }, - target: { schema: REMOTE_ID_SCHEMA }, - node: { - schema: NODE_SCHEMA, - optional: true, - }, - vmid: { schema: VMID_SCHEMA }, - "target-vmid": { - optional: true, - schema: VMID_SCHEMA, - }, - delete: { - description: "Delete the original VM and related data after successful migration.", - optional: true, - default: false, - }, - online: { - type: bool, - description: "Perform an online migration if the vm is running.", - optional: true, - default: false, - }, - "target-storage": { - description: "Mapping of source storages to target storages.", - }, - "target-bridge": { - description: "Mapping of source bridges to remote bridges.", - }, - bwlimit: { - description: "Override I/O bandwidth limit (in KiB/s).", - optional: true, - } - }, - }, - returns: { type: RemoteUpid }, - access: { - permission: - &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_MIGRATE, false), - description: "requires PRIV_RESOURCE_MIGRATE on /resource/{remote}/guest/{vmid} for source and target remove and vmid", - }, -)] -/// Perform a remote migration of a VM. -#[allow(clippy::too_many_arguments)] -pub async fn qemu_remote_migrate( - remote: String, // this is the source - target: String, // this is the destination remote name - node: Option, - vmid: u32, - target_vmid: Option, - delete: bool, - online: bool, - target_storage: String, - target_bridge: String, - bwlimit: Option, - rpcenv: &mut dyn RpcEnvironment, -) -> Result { - let user_info = CachedUserInfo::new()?; - let auth_id: Authid = rpcenv - .get_auth_id() - .ok_or_else(|| format_err!("no authid available"))? - .parse()?; - let target_privs = user_info.lookup_privs( - &auth_id, - &[ - "resource", - &target, - "guest", - &target_vmid.unwrap_or(vmid).to_string(), - ], - ); - if target_privs & PRIV_RESOURCE_MIGRATE == 0 { - http_bail!( - UNAUTHORIZED, - "missing PRIV_RESOURCE_MIGRATE on target remote+vmid" - ); - } - - if delete { - check_guest_delete_perms(rpcenv, &remote, vmid)?; - } - - let source = remote; // let's stick to "source" and "target" naming - - log::info!("remote migration requested"); - - if source == target { - bail!("source and destination clusters must be different"); - } - - let (remotes, _) = pdm_config::remotes::config()?; - let target = get_remote(&remotes, &target)?; - let source_conn = connect_to_remote(&remotes, &source)?; - - let node = find_node_for_vm(node, vmid, source_conn.as_ref()).await?; - - // FIXME: For now we'll only try with the first node but we should probably try others, too, in - // case some are offline? - - let target_node = target - .nodes - .first() - .ok_or_else(|| format_err!("no nodes configured for target cluster"))?; - let target_host_port: Authority = target_node.hostname.parse()?; - let mut target_endpoint = format!( - "host={host},port={port},apitoken=PVEAPIToken={authid}={secret}", - host = target_host_port.host(), - authid = target.authid, - secret = target.token, - port = target_host_port.port_u16().unwrap_or(8006), - ); - if let Some(fp) = target_node.fingerprint.as_deref() { - target_endpoint.reserve(fp.len() + ",fingerprint=".len()); - target_endpoint.push_str(",fingerprint="); - target_endpoint.push_str(fp); - } - - log::info!("forwarding remote migration requested"); - let params = pve_api_types::RemoteMigrateQemu { - target_bridge, - target_storage, - delete: Some(delete), - online: Some(online), - target_vmid, - target_endpoint, - bwlimit, - }; - log::info!("migrating vm {vmid} of node {node:?}"); - let upid = source_conn.remote_migrate_qemu(&node, vmid, params).await?; - - (source, upid.to_string()).try_into() -} - -#[api( - input: { - properties: { - remote: { schema: REMOTE_ID_SCHEMA }, - node: { - schema: NODE_SCHEMA, - optional: true, - }, - vmid: { schema: VMID_SCHEMA }, - state: { type: ConfigurationState }, - snapshot: { - schema: SNAPSHOT_NAME_SCHEMA, - optional: true, - }, - }, - }, - returns: { type: pve_api_types::LxcConfig }, - access: { - permission: &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_AUDIT, false), - }, -)] -/// Get the configuration of an lxc container from a remote. If a node is provided, the container -/// must be on that node, otherwise the node is determined automatically. -pub async fn lxc_get_config( - remote: String, - node: Option, - vmid: u32, - state: ConfigurationState, - snapshot: Option, -) -> Result { - let (remotes, _) = pdm_config::remotes::config()?; - - let pve = connect_to_remote(&remotes, &remote)?; - - let node = find_node_for_vm(node, vmid, pve.as_ref()).await?; - - Ok(pve - .lxc_get_config(&node, vmid, state.current(), snapshot) - .await?) -} - -#[api( - input: { - properties: { - remote: { schema: REMOTE_ID_SCHEMA }, - node: { - schema: NODE_SCHEMA, - optional: true, - }, - vmid: { schema: VMID_SCHEMA }, - }, - }, - returns: { type: pve_api_types::QemuStatus }, - access: { - permission: &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_AUDIT, false), - }, -)] -/// Get the status of an LXC guest from a remote. If a node is provided, the guest must be on that -/// node, otherwise the node is determined automatically. -pub async fn lxc_get_status( - remote: String, - node: Option, - vmid: u32, -) -> Result { - let (remotes, _) = pdm_config::remotes::config()?; - - let pve = connect_to_remote(&remotes, &remote)?; - - let node = find_node_for_vm(node, vmid, pve.as_ref()).await?; - - Ok(pve.lxc_get_status(&node, vmid).await?) -} - -#[api( - input: { - properties: { - remote: { schema: REMOTE_ID_SCHEMA }, - node: { - schema: NODE_SCHEMA, - optional: true, - }, - vmid: { schema: VMID_SCHEMA }, - }, - }, - returns: { type: RemoteUpid }, - access: { - permission: &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_MANAGE, false), - }, -)] -/// Start a remote lxc container. -pub async fn lxc_start( - remote: String, - node: Option, - vmid: u32, -) -> Result { - let (remotes, _) = pdm_config::remotes::config()?; - - let pve = connect_to_remote(&remotes, &remote)?; - - let node = find_node_for_vm(node, vmid, pve.as_ref()).await?; - - let upid = pve.start_lxc_async(&node, vmid, Default::default()).await?; - - new_remote_upid(remote, upid) -} - -#[api( - input: { - properties: { - remote: { schema: REMOTE_ID_SCHEMA }, - node: { - schema: NODE_SCHEMA, - optional: true, - }, - vmid: { schema: VMID_SCHEMA }, - }, - }, - returns: { type: RemoteUpid }, - access: { - permission: &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_MANAGE, false), - }, -)] -/// Stop a remote lxc container. -pub async fn lxc_stop( - remote: String, - node: Option, - vmid: u32, -) -> Result { - let (remotes, _) = pdm_config::remotes::config()?; - - let pve = connect_to_remote(&remotes, &remote)?; - - let node = find_node_for_vm(node, vmid, pve.as_ref()).await?; - - let upid = pve.stop_lxc_async(&node, vmid, Default::default()).await?; - - new_remote_upid(remote, upid) -} - -#[api( - input: { - properties: { - remote: { schema: REMOTE_ID_SCHEMA }, - node: { - schema: NODE_SCHEMA, - optional: true, - }, - vmid: { schema: VMID_SCHEMA }, - }, - }, - returns: { type: RemoteUpid }, - access: { - permission: &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_MANAGE, false), - }, -)] -/// Perform a shutdown of a remote lxc container. -pub async fn lxc_shutdown( - remote: String, - node: Option, - vmid: u32, -) -> Result { - let (remotes, _) = pdm_config::remotes::config()?; - - let pve = connect_to_remote(&remotes, &remote)?; - - let node = find_node_for_vm(node, vmid, pve.as_ref()).await?; - - let upid = pve - .shutdown_lxc_async(&node, vmid, Default::default()) - .await?; - - new_remote_upid(remote, upid) -} - -#[api( - input: { - properties: { - remote: { schema: REMOTE_ID_SCHEMA }, - node: { - schema: NODE_SCHEMA, - optional: true, - }, - target: { schema: NODE_SCHEMA }, - vmid: { schema: VMID_SCHEMA }, - online: { - type: bool, - description: "Attempt an online migration if the container is running.", - optional: true, - }, - restart: { - type: bool, - description: "Perform a restart-migration if the container is running.", - optional: true, - }, - "target-storage": { - description: "Mapping of source storages to target storages.", - optional: true, - }, - bwlimit: { - description: "Override I/O bandwidth limit (in KiB/s).", - optional: true, - }, - timeout: { - description: "Shutdown timeout for restart-migrations.", - optional: true, - }, - }, - }, - returns: { type: RemoteUpid }, - access: { - permission: &Permission::And(&[ - &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_MIGRATE, false), - ]), - }, -)] -/// Perform an in-cluster migration of a VM. -#[allow(clippy::too_many_arguments)] -pub async fn lxc_migrate( - remote: String, - node: Option, - vmid: u32, - bwlimit: Option, - restart: Option, - online: Option, - target: String, - target_storage: Option, - timeout: Option, -) -> Result { - let bwlimit = bwlimit.map(|n| n as f64); - - log::info!("in-cluster migration requested for remote {remote:?} ct {vmid} to node {target:?}"); - - let (remotes, _) = pdm_config::remotes::config()?; - let pve = connect_to_remote(&remotes, &remote)?; - - let node = find_node_for_vm(node, vmid, pve.as_ref()).await?; - - if node == target { - bail!("refusing migration to the same node"); - } - - let params = pve_api_types::MigrateLxc { - bwlimit, - online, - restart, - target, - target_storage, - timeout, - }; - let upid = pve.migrate_lxc(&node, vmid, params).await?; - - new_remote_upid(remote, upid) -} - -#[api( - input: { - properties: { - remote: { schema: REMOTE_ID_SCHEMA }, - target: { schema: REMOTE_ID_SCHEMA }, - node: { - schema: NODE_SCHEMA, - optional: true, - }, - vmid: { schema: VMID_SCHEMA }, - "target-vmid": { - optional: true, - schema: VMID_SCHEMA, - }, - delete: { - description: "Delete the original VM and related data after successful migration.", - optional: true, - default: false, - }, - online: { - type: bool, - description: "Perform an online migration if the vm is running.", - optional: true, - default: false, - }, - "target-storage": { - description: "Mapping of source storages to target storages.", - }, - "target-bridge": { - description: "Mapping of source bridges to remote bridges.", - }, - bwlimit: { - description: "Override I/O bandwidth limit (in KiB/s).", - optional: true, - }, - restart: { - description: "Perform a restart-migration.", - optional: true, - }, - timeout: { - description: "Add a shutdown timeout for the restart-migration.", - optional: true, - }, - }, - }, - returns: { type: RemoteUpid }, - access: { - permission: - &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_MIGRATE, false), - description: "requires PRIV_RESOURCE_MIGRATE on /resource/{remote}/guest/{vmid} for source and target remove and vmid", - }, -)] -/// Perform a remote migration of an lxc container. -#[allow(clippy::too_many_arguments)] -pub async fn lxc_remote_migrate( - remote: String, // this is the source - target: String, // this is the destination remote name - node: Option, - vmid: u32, - target_vmid: Option, - delete: bool, - online: bool, - target_storage: String, - target_bridge: String, - bwlimit: Option, - restart: Option, - timeout: Option, - rpcenv: &mut dyn RpcEnvironment, -) -> Result { - let user_info = CachedUserInfo::new()?; - let auth_id: Authid = rpcenv - .get_auth_id() - .ok_or_else(|| format_err!("no authid available"))? - .parse()?; - let target_privs = user_info.lookup_privs( - &auth_id, - &[ - "resource", - &target, - "guest", - &target_vmid.unwrap_or(vmid).to_string(), - ], - ); - if target_privs & PRIV_RESOURCE_MIGRATE == 0 { - http_bail!( - UNAUTHORIZED, - "missing PRIV_RESOURCE_MIGRATE on target remote+vmid" - ); - } - if delete { - check_guest_delete_perms(rpcenv, &remote, vmid)?; - } - - let source = remote; // let's stick to "source" and "target" naming - - log::info!("remote migration requested"); - - if source == target { - bail!("source and destination clusters must be different"); - } - - let (remotes, _) = pdm_config::remotes::config()?; - let target = get_remote(&remotes, &target)?; - let source_conn = connect_to_remote(&remotes, &source)?; - - let node = find_node_for_vm(node, vmid, source_conn.as_ref()).await?; - - // FIXME: For now we'll only try with the first node but we should probably try others, too, in - // case some are offline? - - let target_node = target - .nodes - .first() - .ok_or_else(|| format_err!("no nodes configured for target cluster"))?; - let target_host_port: Authority = target_node.hostname.parse()?; - let mut target_endpoint = format!( - "host={host},port={port},apitoken=PVEAPIToken={authid}={secret}", - host = target_host_port.host(), - authid = target.authid, - secret = target.token, - port = target_host_port.port_u16().unwrap_or(8006), - ); - if let Some(fp) = target_node.fingerprint.as_deref() { - target_endpoint.reserve(fp.len() + ",fingerprint=".len()); - target_endpoint.push_str(",fingerprint="); - target_endpoint.push_str(fp); - } - - log::info!("forwarding remote migration requested"); - let params = pve_api_types::RemoteMigrateLxc { - target_bridge, - target_storage, - delete: Some(delete), - online: Some(online), - target_vmid, - target_endpoint, - bwlimit: bwlimit.map(|limit| limit as f64), - restart, - timeout, - }; - log::info!("migrating vm {vmid} of node {node:?}"); - let upid = source_conn.remote_migrate_lxc(&node, vmid, params).await?; - - new_remote_upid(source, upid) -} - #[api( input: { properties: { diff --git a/server/src/api/pve/qemu.rs b/server/src/api/pve/qemu.rs new file mode 100644 index 0000000..9a67c10 --- /dev/null +++ b/server/src/api/pve/qemu.rs @@ -0,0 +1,552 @@ +use anyhow::{bail, format_err, Error}; +use http::uri::Authority; + +use proxmox_access_control::CachedUserInfo; +use proxmox_router::{ + http_bail, list_subdirs_api_method, Permission, Router, RpcEnvironment, SubdirMap, +}; +use proxmox_schema::api; +use proxmox_sortable_macro::sortable; + +use pdm_api_types::remotes::REMOTE_ID_SCHEMA; +use pdm_api_types::{ + Authid, ConfigurationState, RemoteUpid, CIDR_FORMAT, NODE_SCHEMA, PRIV_RESOURCE_AUDIT, + PRIV_RESOURCE_MANAGE, PRIV_RESOURCE_MIGRATE, SNAPSHOT_NAME_SCHEMA, VMID_SCHEMA, +}; + +use pve_api_types::{QemuMigratePreconditions, StartQemuMigrationType}; + +use crate::api::pve::get_remote; + +use super::{ + check_guest_delete_perms, check_guest_list_permissions, check_guest_permissions, + connect_to_remote, find_node_for_vm, new_remote_upid, +}; + +pub const ROUTER: Router = Router::new() + .get(&API_METHOD_LIST_QEMU) + .match_all("vmid", &QEMU_VM_ROUTER); + +const QEMU_VM_ROUTER: Router = Router::new() + .get(&list_subdirs_api_method!(QEMU_VM_SUBDIRS)) + .subdirs(QEMU_VM_SUBDIRS); +#[sortable] +const QEMU_VM_SUBDIRS: SubdirMap = &sorted!([ + ("config", &Router::new().get(&API_METHOD_QEMU_GET_CONFIG)), + ("rrddata", &super::rrddata::QEMU_RRD_ROUTER), + ("start", &Router::new().post(&API_METHOD_QEMU_START)), + ("status", &Router::new().get(&API_METHOD_QEMU_GET_STATUS)), + ("stop", &Router::new().post(&API_METHOD_QEMU_STOP)), + ("shutdown", &Router::new().post(&API_METHOD_QEMU_SHUTDOWN)), + ( + "migrate", + &Router::new() + .get(&API_METHOD_QEMU_MIGRATE_PRECONDITIONS) + .post(&API_METHOD_QEMU_MIGRATE) + ), + ( + "remote-migrate", + &Router::new().post(&API_METHOD_QEMU_REMOTE_MIGRATE) + ), +]); + +#[api( + input: { + properties: { + remote: { schema: REMOTE_ID_SCHEMA }, + node: { + schema: NODE_SCHEMA, + optional: true, + }, + }, + }, + returns: { + type: Array, + description: "Get a list of VMs", + items: { type: pve_api_types::VmEntry }, + }, + access: { + permission: &Permission::Privilege(&["resource", "{remote}"], PRIV_RESOURCE_AUDIT, false), + }, +)] +/// Query the remote's list of qemu VMs. If no node is provided, the all nodes are queried. +pub async fn list_qemu( + remote: String, + node: Option, + rpcenv: &mut dyn RpcEnvironment, +) -> Result, Error> { + // FIXME: top_level_allowed is always true because of schema check above, replace with Anybody + // and fine-grained checks once those are implemented for all API calls.. + let (auth_id, user_info, top_level_allowed) = check_guest_list_permissions(&remote, rpcenv)?; + + let (remotes, _) = pdm_config::remotes::config()?; + + let pve = connect_to_remote(&remotes, &remote)?; + + let list = if let Some(node) = node { + pve.list_qemu(&node, None).await? + } else { + let mut list = Vec::new(); + for node in pve.list_nodes().await? { + list.extend(pve.list_qemu(&node.node, None).await?); + } + list + }; + + if top_level_allowed { + return Ok(list); + } + + Ok(list + .into_iter() + .filter(|entry| { + check_guest_permissions( + &auth_id, + &user_info, + &remote, + PRIV_RESOURCE_AUDIT, + entry.vmid, + ) + }) + .collect()) +} + +#[api( + input: { + properties: { + remote: { schema: REMOTE_ID_SCHEMA }, + node: { + schema: NODE_SCHEMA, + optional: true, + }, + vmid: { schema: VMID_SCHEMA }, + state: { type: ConfigurationState }, + snapshot: { + schema: SNAPSHOT_NAME_SCHEMA, + optional: true, + }, + }, + }, + returns: { type: pve_api_types::QemuConfig }, + access: { + permission: &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_AUDIT, false), + }, +)] +/// Get the configuration of a qemu VM from a remote. If a node is provided, the VM must be on that +/// node, otherwise the node is determined automatically. +pub async fn qemu_get_config( + remote: String, + node: Option, + vmid: u32, + state: ConfigurationState, + snapshot: Option, +) -> Result { + let (remotes, _) = pdm_config::remotes::config()?; + + let pve = connect_to_remote(&remotes, &remote)?; + + let node = find_node_for_vm(node, vmid, pve.as_ref()).await?; + + Ok(pve + .qemu_get_config(&node, vmid, state.current(), snapshot) + .await?) +} + +#[api( + input: { + properties: { + remote: { schema: REMOTE_ID_SCHEMA }, + node: { + schema: NODE_SCHEMA, + optional: true, + }, + vmid: { schema: VMID_SCHEMA }, + }, + }, + returns: { type: pve_api_types::QemuStatus }, + access: { + permission: &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_AUDIT, false), + }, +)] +/// Get the status of a qemu VM from a remote. If a node is provided, the VM must be on that +/// node, otherwise the node is determined automatically. +pub async fn qemu_get_status( + remote: String, + node: Option, + vmid: u32, +) -> Result { + let (remotes, _) = pdm_config::remotes::config()?; + + let pve = connect_to_remote(&remotes, &remote)?; + + let node = find_node_for_vm(node, vmid, pve.as_ref()).await?; + + Ok(pve.qemu_get_status(&node, vmid).await?) +} + +#[api( + input: { + properties: { + remote: { schema: REMOTE_ID_SCHEMA }, + node: { + schema: NODE_SCHEMA, + optional: true, + }, + vmid: { schema: VMID_SCHEMA }, + }, + }, + returns: { type: RemoteUpid }, + access: { + permission: &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_MANAGE, false), + }, +)] +/// Start a remote qemu vm. +pub async fn qemu_start( + remote: String, + node: Option, + vmid: u32, +) -> Result { + let (remotes, _) = pdm_config::remotes::config()?; + + let pve = connect_to_remote(&remotes, &remote)?; + + let node = find_node_for_vm(node, vmid, pve.as_ref()).await?; + + let upid = pve + .start_qemu_async(&node, vmid, Default::default()) + .await?; + + new_remote_upid(remote, upid) +} + +#[api( + input: { + properties: { + remote: { schema: REMOTE_ID_SCHEMA }, + node: { + schema: NODE_SCHEMA, + optional: true, + }, + vmid: { schema: VMID_SCHEMA }, + }, + }, + returns: { type: RemoteUpid }, + access: { + permission: &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_MANAGE, false), + }, +)] +/// Stop a remote qemu vm. +pub async fn qemu_stop( + remote: String, + node: Option, + vmid: u32, +) -> Result { + let (remotes, _) = pdm_config::remotes::config()?; + + let pve = connect_to_remote(&remotes, &remote)?; + + let node = find_node_for_vm(node, vmid, pve.as_ref()).await?; + + let upid = pve.stop_qemu_async(&node, vmid, Default::default()).await?; + + (remote, upid.to_string()).try_into() +} + +#[api( + input: { + properties: { + remote: { schema: REMOTE_ID_SCHEMA }, + node: { + schema: NODE_SCHEMA, + optional: true, + }, + vmid: { schema: VMID_SCHEMA }, + }, + }, + returns: { type: RemoteUpid }, + access: { + permission: &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_MANAGE, false), + }, +)] +/// Perform a shutdown of a remote qemu vm. +pub async fn qemu_shutdown( + remote: String, + node: Option, + vmid: u32, +) -> Result { + let (remotes, _) = pdm_config::remotes::config()?; + + let pve = connect_to_remote(&remotes, &remote)?; + + let node = find_node_for_vm(node, vmid, pve.as_ref()).await?; + + let upid = pve + .shutdown_qemu_async(&node, vmid, Default::default()) + .await?; + + (remote, upid.to_string()).try_into() +} + +#[api( + input: { + properties: { + remote: { schema: REMOTE_ID_SCHEMA }, + node: { + schema: NODE_SCHEMA, + optional: true, + }, + target: { schema: NODE_SCHEMA }, + vmid: { schema: VMID_SCHEMA }, + online: { + type: bool, + description: "Perform an online migration if the vm is running.", + optional: true, + }, + "target-storage": { + description: "Mapping of source storages to target storages.", + optional: true, + }, + bwlimit: { + description: "Override I/O bandwidth limit (in KiB/s).", + optional: true, + }, + "migration-network": { + description: "CIDR of the (sub) network that is used for migration.", + type: String, + format: &CIDR_FORMAT, + optional: true, + }, + "migration-type": { + type: StartQemuMigrationType, + optional: true, + }, + force: { + description: "Allow to migrate VMs with local devices.", + optional: true, + default: false, + }, + "with-local-disks": { + description: "Enable live storage migration for local disks.", + optional: true, + }, + }, + }, + returns: { type: RemoteUpid }, + access: { + permission: &Permission::And(&[ + &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_MIGRATE, false), + ]), + }, +)] +/// Perform an in-cluster migration of a VM. +#[allow(clippy::too_many_arguments)] +pub async fn qemu_migrate( + remote: String, + node: Option, + vmid: u32, + bwlimit: Option, + force: Option, + migration_network: Option, + migration_type: Option, + online: Option, + target: String, + target_storage: Option, + with_local_disks: Option, +) -> Result { + log::info!("in-cluster migration requested for remote {remote:?} vm {vmid} to node {target:?}"); + + let (remotes, _) = pdm_config::remotes::config()?; + let pve = connect_to_remote(&remotes, &remote)?; + + let node = find_node_for_vm(node, vmid, pve.as_ref()).await?; + + if node == target { + bail!("refusing migration to the same node"); + } + + let params = pve_api_types::MigrateQemu { + bwlimit, + force, + migration_network, + migration_type, + online, + target, + targetstorage: target_storage, + with_local_disks, + }; + let upid = pve.migrate_qemu(&node, vmid, params).await?; + //(remote, upid.to_string()).try_into() + new_remote_upid(remote, upid) +} + +#[api( + input: { + properties: { + remote: { schema: REMOTE_ID_SCHEMA }, + node: { + schema: NODE_SCHEMA, + optional: true, + }, + target: { + schema: NODE_SCHEMA, + optional: true, + }, + vmid: { schema: VMID_SCHEMA }, + } + }, + access: { + permission: &Permission::And(&[ + &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_MIGRATE, false), + ]), + }, +)] +/// Qemu (local) migrate preconditions +async fn qemu_migrate_preconditions( + remote: String, + node: Option, + target: Option, + vmid: u32, +) -> Result { + let (remotes, _) = pdm_config::remotes::config()?; + let pve = connect_to_remote(&remotes, &remote)?; + + let node = find_node_for_vm(node, vmid, pve.as_ref()).await?; + + let res = pve.qemu_migrate_preconditions(&node, vmid, target).await?; + Ok(res) +} + +#[api( + input: { + properties: { + remote: { schema: REMOTE_ID_SCHEMA }, + target: { schema: REMOTE_ID_SCHEMA }, + node: { + schema: NODE_SCHEMA, + optional: true, + }, + vmid: { schema: VMID_SCHEMA }, + "target-vmid": { + optional: true, + schema: VMID_SCHEMA, + }, + delete: { + description: "Delete the original VM and related data after successful migration.", + optional: true, + default: false, + }, + online: { + type: bool, + description: "Perform an online migration if the vm is running.", + optional: true, + default: false, + }, + "target-storage": { + description: "Mapping of source storages to target storages.", + }, + "target-bridge": { + description: "Mapping of source bridges to remote bridges.", + }, + bwlimit: { + description: "Override I/O bandwidth limit (in KiB/s).", + optional: true, + } + }, + }, + returns: { type: RemoteUpid }, + access: { + permission: + &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_MIGRATE, false), + description: "requires PRIV_RESOURCE_MIGRATE on /resource/{remote}/guest/{vmid} for source and target remove and vmid", + }, +)] +/// Perform a remote migration of a VM. +#[allow(clippy::too_many_arguments)] +pub async fn qemu_remote_migrate( + remote: String, // this is the source + target: String, // this is the destination remote name + node: Option, + vmid: u32, + target_vmid: Option, + delete: bool, + online: bool, + target_storage: String, + target_bridge: String, + bwlimit: Option, + rpcenv: &mut dyn RpcEnvironment, +) -> Result { + let user_info = CachedUserInfo::new()?; + let auth_id: Authid = rpcenv + .get_auth_id() + .ok_or_else(|| format_err!("no authid available"))? + .parse()?; + let target_privs = user_info.lookup_privs( + &auth_id, + &[ + "resource", + &target, + "guest", + &target_vmid.unwrap_or(vmid).to_string(), + ], + ); + if target_privs & PRIV_RESOURCE_MIGRATE == 0 { + http_bail!( + UNAUTHORIZED, + "missing PRIV_RESOURCE_MIGRATE on target remote+vmid" + ); + } + + if delete { + check_guest_delete_perms(rpcenv, &remote, vmid)?; + } + + let source = remote; // let's stick to "source" and "target" naming + + log::info!("remote migration requested"); + + if source == target { + bail!("source and destination clusters must be different"); + } + + let (remotes, _) = pdm_config::remotes::config()?; + let target = get_remote(&remotes, &target)?; + let source_conn = connect_to_remote(&remotes, &source)?; + + let node = find_node_for_vm(node, vmid, source_conn.as_ref()).await?; + + // FIXME: For now we'll only try with the first node but we should probably try others, too, in + // case some are offline? + + let target_node = target + .nodes + .first() + .ok_or_else(|| format_err!("no nodes configured for target cluster"))?; + let target_host_port: Authority = target_node.hostname.parse()?; + let mut target_endpoint = format!( + "host={host},port={port},apitoken=PVEAPIToken={authid}={secret}", + host = target_host_port.host(), + authid = target.authid, + secret = target.token, + port = target_host_port.port_u16().unwrap_or(8006), + ); + if let Some(fp) = target_node.fingerprint.as_deref() { + target_endpoint.reserve(fp.len() + ",fingerprint=".len()); + target_endpoint.push_str(",fingerprint="); + target_endpoint.push_str(fp); + } + + log::info!("forwarding remote migration requested"); + let params = pve_api_types::RemoteMigrateQemu { + target_bridge, + target_storage, + delete: Some(delete), + online: Some(online), + target_vmid, + target_endpoint, + bwlimit, + }; + log::info!("migrating vm {vmid} of node {node:?}"); + let upid = source_conn.remote_migrate_qemu(&node, vmid, params).await?; + + (source, upid.to_string()).try_into() +} -- 2.39.5 _______________________________________________ pdm-devel mailing list pdm-devel@lists.proxmox.com https://lists.proxmox.com/cgi-bin/mailman/listinfo/pdm-devel