From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [212.224.123.68]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits)) (No client certificate requested) by lists.proxmox.com (Postfix) with ESMTPS id BD10960E3E for ; Thu, 3 Feb 2022 13:42:35 +0100 (CET) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id 811CD26B3A for ; Thu, 3 Feb 2022 13:42:35 +0100 (CET) Received: from proxmox-new.maurer-it.com (proxmox-new.maurer-it.com [94.136.29.106]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits)) (No client certificate requested) by firstgate.proxmox.com (Proxmox) with ESMTPS id 17A5F26B2F for ; Thu, 3 Feb 2022 13:42:33 +0100 (CET) Received: from proxmox-new.maurer-it.com (localhost.localdomain [127.0.0.1]) by proxmox-new.maurer-it.com (Proxmox) with ESMTP id E565344BDB for ; Thu, 3 Feb 2022 13:42:32 +0100 (CET) From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= To: pve-devel@lists.proxmox.com Date: Thu, 3 Feb 2022 13:41:38 +0100 Message-Id: <20220203124143.1931377-18-f.gruenbichler@proxmox.com> X-Mailer: git-send-email 2.30.2 In-Reply-To: <20220203124143.1931377-1-f.gruenbichler@proxmox.com> References: <20220203124143.1931377-1-f.gruenbichler@proxmox.com> MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-SPAM-LEVEL: Spam detection results: 0 AWL 0.059 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment POISEN_SPAM_PILL 0.1 Meta: its spam POISEN_SPAM_PILL_1 0.1 random spam to be learned in bayes POISEN_SPAM_PILL_3 0.1 random spam to be learned in bayes SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record T_SCC_BODY_TEXT_LINE -0.01 - URIBL_BLOCKED 0.001 ADMINISTRATOR NOTICE: The query to URIBL was blocked. See http://wiki.apache.org/spamassassin/DnsBlocklists#dnsbl-block for more information. [qemu.pm] Subject: [pve-devel] [PATCH v4 qemu-server 10/11] api: add remote migrate endpoint X-BeenThere: pve-devel@lists.proxmox.com X-Mailman-Version: 2.1.29 Precedence: list List-Id: Proxmox VE development discussion List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Thu, 03 Feb 2022 12:42:35 -0000 entry point for the remote migration on the source side, mainly preparing the API client that gets passed to the actual migration code and doing some parameter parsing. querying of the remote sides resources (like available storages, free VMIDs, lookup of endpoint details for specific nodes, ...) should be done by the client - see next commit with CLI example. Signed-off-by: Fabian Grünbichler --- Notes: v4: - removed target_node parameter, now determined by querying /cluster/status on the remote - moved checks to CLI requires libpve-common-perl with proxmox-remote (already bumped in d/control) PVE/API2/Qemu.pm | 213 ++++++++++++++++++++++++++++++++++++++++++++++- debian/control | 6 +- 2 files changed, 215 insertions(+), 4 deletions(-) diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm index f6ce7c3f..dd76c276 100644 --- a/PVE/API2/Qemu.pm +++ b/PVE/API2/Qemu.pm @@ -12,6 +12,7 @@ use URI::Escape; use Crypt::OpenSSL::Random; use Socket qw(SOCK_STREAM); +use PVE::APIClient::LWP; use PVE::Cluster qw (cfs_read_file cfs_write_file);; use PVE::RRD; use PVE::SafeSyslog; @@ -51,8 +52,6 @@ BEGIN { } } -use Data::Dumper; # fixme: remove - use base qw(PVE::RESTHandler); my $opt_force_description = "Force physical removal. Without this, we simple remove the disk from the config file and create an additional configuration entry called 'unused[n]', which contains the volume ID. Unlink of unused[n] always cause physical removal."; @@ -4000,6 +3999,205 @@ __PACKAGE__->register_method({ }}); +__PACKAGE__->register_method({ + name => 'remote_migrate_vm', + path => '{vmid}/remote_migrate', + method => 'POST', + protected => 1, + proxyto => 'node', + description => "Migrate virtual machine to a remote cluster. Creates a new migration task.", + permissions => { + check => ['perm', '/vms/{vmid}', [ 'VM.Migrate' ]], + }, + parameters => { + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }), + 'target-vmid' => get_standard_option('pve-vmid', { optional => 1 }), + 'target-endpoint' => get_standard_option('proxmox-remote', { + description => "Remote target endpoint", + }), + online => { + type => 'boolean', + description => "Use online/live migration if VM is running. Ignored if VM is stopped.", + optional => 1, + }, + 'with-local-disks' => { + type => 'boolean', + description => "Enable live storage migration for local disk", + optional => 1, + }, + delete => { + type => 'boolean', + description => "Delete the original VM and related data after successful migration. By default the original VM is kept on the source cluster in a stopped state.", + optional => 1, + default => 0, + }, + 'target-storage' => get_standard_option('pve-targetstorage', { + completion => \&PVE::QemuServer::complete_migration_storage, + optional => 0, + }), + 'target-bridge' => { + type => 'string', + description => "Mapping from source to target bridges. Providing only a single bridge ID maps all source bridges to that bridge. Providing the special value '1' will map each source bridge to itself.", + format => 'bridge-pair-list', + }, + bwlimit => { + description => "Override I/O bandwidth limit (in KiB/s).", + optional => 1, + type => 'integer', + minimum => '0', + default => 'migrate limit from datacenter or storage config', + }, + }, + }, + returns => { + type => 'string', + description => "the task ID.", + }, + code => sub { + my ($param) = @_; + + my $rpcenv = PVE::RPCEnvironment::get(); + my $authuser = $rpcenv->get_user(); + + my $source_vmid = extract_param($param, 'vmid'); + my $target_endpoint = extract_param($param, 'target-endpoint'); + my $target_vmid = extract_param($param, 'target-vmid') // $source_vmid; + + my $delete = extract_param($param, 'delete') // 0; + + PVE::Cluster::check_cfs_quorum(); + + # test if VM exists + my $conf = PVE::QemuConfig->load_config($source_vmid); + + PVE::QemuConfig->check_lock($conf); + + raise_param_exc({ vmid => "cannot migrate HA-managed VM to remote cluster" }) + if PVE::HA::Config::vm_is_ha_managed($source_vmid); + + my $remote = PVE::JSONSchema::parse_property_string('proxmox-remote', $target_endpoint); + + # TODO: move this as helper somewhere appropriate? + my $conn_args = { + protocol => 'https', + host => $remote->{host}, + port => $remote->{port} // 8006, + apitoken => $remote->{apitoken}, + }; + + my $fp; + if ($fp = $remote->{fingerprint}) { + $conn_args->{cached_fingerprints} = { uc($fp) => 1 }; + } + + print "Establishing API connection with remote at '$remote->{host}'\n"; + + my $api_client = PVE::APIClient::LWP->new(%$conn_args); + + if (!defined($fp)) { + my $cert_info = $api_client->get("/nodes/localhost/certificates/info"); + foreach my $cert (@$cert_info) { + my $filename = $cert->{filename}; + next if $filename ne 'pveproxy-ssl.pem' && $filename ne 'pve-ssl.pem'; + $fp = $cert->{fingerprint} if !$fp || $filename eq 'pveproxy-ssl.pem'; + } + $conn_args->{cached_fingerprints} = { uc($fp) => 1 } + if defined($fp); + } + + if (PVE::QemuServer::check_running($source_vmid)) { + die "can't migrate running VM without --online\n" if !$param->{online}; + + my $repl_conf = PVE::ReplicationConfig->new(); + my $is_replicated = $repl_conf->check_for_existing_jobs($source_vmid, 1); + die "cannot remote-migrate replicated VM\n" if $is_replicated; + } else { + warn "VM isn't running. Doing offline migration instead.\n" if $param->{online}; + $param->{online} = 0; + } + + # FIXME: fork worker hear to avoid timeout? or poll these periodically + # in pvestatd and access cached info here? all of the below is actually + # checked at the remote end anyway once we call the mtunnel endpoint, + # we could also punt it to the client and not do it here at all.. + my $resources = $api_client->get("/cluster/resources", { type => 'vm' }); + if (grep { defined($_->{vmid}) && $_->{vmid} eq $target_vmid } @$resources) { + raise_param_exc({ target_vmid => "Guest with ID '$target_vmid' already exists on remote cluster" }); + } + + my $storages = $api_client->get("/nodes/localhost/storage", { enabled => 1 }); + + my $storecfg = PVE::Storage::config(); + my $target_storage = extract_param($param, 'target-storage'); + my $storagemap = eval { PVE::JSONSchema::parse_idmap($target_storage, 'pve-storage-id') }; + raise_param_exc({ 'target-storage' => "failed to parse storage map: $@" }) + if $@; + + my $target_bridge = extract_param($param, 'target-bridge'); + my $bridgemap = eval { PVE::JSONSchema::parse_idmap($target_bridge, 'pve-bridge-id') }; + raise_param_exc({ 'target-bridge' => "failed to parse bridge map: $@" }) + if $@; + + my $check_remote_storage = sub { + my ($storage) = @_; + my $found = [ grep { $_->{storage} eq $storage } @$storages ]; + die "remote: storage '$storage' does not exist!\n" + if !@$found; + + $found = @$found[0]; + + my $content_types = [ PVE::Tools::split_list($found->{content}) ]; + die "remote: storage '$storage' cannot store images\n" + if !grep { $_ eq 'images' } @$content_types; + }; + + foreach my $target_sid (values %{$storagemap->{entries}}) { + $check_remote_storage->($target_sid); + } + + $check_remote_storage->($storagemap->{default}) + if $storagemap->{default}; + + die "remote migration requires explicit storage mapping!\n" + if $storagemap->{identity}; + + $param->{storagemap} = $storagemap; + $param->{bridgemap} = $bridgemap; + $param->{remote} = { + conn => $conn_args, # re-use fingerprint for tunnel + client => $api_client, + vmid => $target_vmid, + }; + $param->{migration_type} = 'websocket'; + $param->{delete} = $delete if $delete; + + my $cluster_status = $api_client->get("/cluster/status"); + my $target_node; + foreach my $entry (@$cluster_status) { + next if $entry->{type} ne 'node'; + if ($entry->{local}) { + $target_node = $entry->{name}; + last; + } + } + + die "couldn't determine endpoint's node name\n" + if !defined($target_node); + + my $realcmd = sub { + PVE::QemuMigrate->migrate($target_node, $remote->{host}, $source_vmid, $param); + }; + + my $worker = sub { + return PVE::GuestHelpers::guest_migration_lock($source_vmid, 10, $realcmd); + }; + + return $rpcenv->fork_worker('qmigrate', $source_vmid, $authuser, $worker); + }}); + __PACKAGE__->register_method({ name => 'monitor', path => '{vmid}/monitor', @@ -4677,6 +4875,12 @@ __PACKAGE__->register_method({ optional => 1, description => 'List of storages to check permission and availability. Will be checked again for all actually used storages during migration.', }, + bridges => { + type => 'string', + format => 'pve-bridge-id-list', + optional => 1, + description => 'List of network bridges to check availability. Will be checked again for actually used bridges during migration.', + }, }, }, returns => { @@ -4697,6 +4901,7 @@ __PACKAGE__->register_method({ my $vmid = extract_param($param, 'vmid'); my $storages = extract_param($param, 'storages'); + my $bridges = extract_param($param, 'bridges'); my $nodename = PVE::INotify::nodename(); @@ -4710,6 +4915,10 @@ __PACKAGE__->register_method({ $check_storage_access_migrate->($rpcenv, $authuser, $storecfg, $storeid, $node); } + foreach my $bridge (PVE::Tools::split_list($bridges)) { + PVE::Network::read_bridge_mtu($bridge); + } + PVE::Cluster::check_cfs_quorum(); my $socket_addr = "/run/qemu-server/$vmid.mtunnel"; diff --git a/debian/control b/debian/control index 8d0b856e..c2395ec6 100644 --- a/debian/control +++ b/debian/control @@ -6,8 +6,9 @@ Build-Depends: debhelper (>= 12~), libglib2.0-dev, libio-multiplex-perl, libjson-c-dev, + libpve-apiclient-perl, libpve-cluster-perl, - libpve-common-perl (>= 7.0-14), + libpve-common-perl (>= 7.0-19), libpve-guest-common-perl (>= 3.1-3), libpve-storage-perl (>= 6.1-7), libtest-mockmodule-perl, @@ -34,8 +35,9 @@ Depends: dbus, libjson-xs-perl, libnet-ssleay-perl, libpve-access-control (>= 7.0-7), + libpve-apiclient-perl, libpve-cluster-perl, - libpve-common-perl (>= 7.0-14), + libpve-common-perl (>= 7.0-19), libpve-guest-common-perl (>= 3.1-3), libpve-storage-perl (>= 6.3-8), libterm-readline-gnu-perl, -- 2.30.2