From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [212.224.123.68]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits)) (No client certificate requested) by lists.proxmox.com (Postfix) with ESMTPS id 9284A72AC2 for ; Tue, 13 Apr 2021 14:17:05 +0200 (CEST) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id 87D5C2AA8D for ; Tue, 13 Apr 2021 14:17:05 +0200 (CEST) Received: from proxmox-new.maurer-it.com (proxmox-new.maurer-it.com [212.186.127.180]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits)) (No client certificate requested) by firstgate.proxmox.com (Proxmox) with ESMTPS id 9982F2AA78 for ; Tue, 13 Apr 2021 14:17:02 +0200 (CEST) Received: from proxmox-new.maurer-it.com (localhost.localdomain [127.0.0.1]) by proxmox-new.maurer-it.com (Proxmox) with ESMTP id 64C6F45A6F for ; Tue, 13 Apr 2021 14:17:02 +0200 (CEST) From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= To: pve-devel@lists.proxmox.com Date: Tue, 13 Apr 2021 14:16:40 +0200 Message-Id: <20210413121640.3602975-23-f.gruenbichler@proxmox.com> X-Mailer: git-send-email 2.20.1 In-Reply-To: <20210413121640.3602975-1-f.gruenbichler@proxmox.com> References: <20210413121640.3602975-1-f.gruenbichler@proxmox.com> MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-SPAM-LEVEL: Spam detection results: 0 AWL 0.026 Adjusted score from AWL reputation of From: address KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment RCVD_IN_DNSWL_MED -2.3 Sender listed at https://www.dnswl.org/, medium trust SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record URIBL_BLOCKED 0.001 ADMINISTRATOR NOTICE: The query to URIBL was blocked. See http://wiki.apache.org/spamassassin/DnsBlocklists#dnsbl-block for more information. [qemu.pm] Subject: [pve-devel] [PATCH qemu-server 7/7] api: add remote migrate endpoint X-BeenThere: pve-devel@lists.proxmox.com X-Mailman-Version: 2.1.29 Precedence: list List-Id: Proxmox VE development discussion List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Tue, 13 Apr 2021 12:17:05 -0000 Signed-off-by: Fabian Grünbichler --- Notes: requires - pve-manager with 'addr' API endpoint on target node - pve-cluster with RemoteConfig support on local node - pve-common with bridgepair format - pve-guest-common with AbstractMigrate handling remote migration PVE/API2/Qemu.pm | 196 ++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 194 insertions(+), 2 deletions(-) diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm index bf5ca14..28dd323 100644 --- a/PVE/API2/Qemu.pm +++ b/PVE/API2/Qemu.pm @@ -39,6 +39,7 @@ use PVE::API2::Firewall::VM; use PVE::API2::Qemu::Agent; use PVE::VZDump::Plugin; use PVE::DataCenterConfig; +use PVE::RemoteConfig; use PVE::SSHInfo; BEGIN { @@ -50,8 +51,6 @@ BEGIN { } } -use Data::Dumper; # fixme: remove - use base qw(PVE::RESTHandler); my $opt_force_description = "Force physical removal. Without this, we simple remove the disk from the config file and create an additional configuration entry called 'unused[n]', which contains the volume ID. Unlink of unused[n] always cause physical removal."; @@ -3754,6 +3753,199 @@ __PACKAGE__->register_method({ }}); +__PACKAGE__->register_method({ + name => 'remote_migrate_vm', + path => '{vmid}/remote_migrate', + method => 'POST', + protected => 1, + proxyto => 'node', + description => "Migrate virtual machine to a remote cluster. Creates a new migration task.", + permissions => { + check => ['perm', '/vms/{vmid}', [ 'VM.Migrate' ]], + }, + parameters => { + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }), + 'target-vmid' => get_standard_option('pve-vmid', { optional => 1 }), + 'target-node' => get_standard_option('pve-node', { + description => "Target node on remote cluster.", + completion => \&PVE::RemoteConfig::complete_remote_node, + }), + 'target-cluster' => get_standard_option('pve-remote-cluster', { + description => "Remote target cluster", + completion => \&PVE::RemoteConfig::complete_remote_cluster, + }), + online => { + type => 'boolean', + description => "Use online/live migration if VM is running. Ignored if VM is stopped.", + optional => 1, + }, + 'migration-network' => { + type => 'string', format => 'CIDR', + description => "CIDR of the (sub) network that is used for migration.", + optional => 1, + }, + 'with-local-disks' => { + type => 'boolean', + description => "Enable live storage migration for local disk", + optional => 1, + }, + targetstorage => get_standard_option('pve-targetstorage', { + completion => \&PVE::QemuServer::complete_migration_storage, + optional => 0, + }), + targetbridge => { + type => 'string', + description => "Mapping from source to target bridges. Providing only a single bridge ID maps all source bridges to that bridge. Providing the special value '1' will map each source bridge to itself.", + format => 'bridgepair-list', + }, + bwlimit => { + description => "Override I/O bandwidth limit (in KiB/s).", + optional => 1, + type => 'integer', + minimum => '0', + default => 'migrate limit from datacenter or storage config', + }, + }, + }, + returns => { + type => 'string', + description => "the task ID.", + }, + code => sub { + my ($param) = @_; + + my $rpcenv = PVE::RPCEnvironment::get(); + my $authuser = $rpcenv->get_user(); + + my $source_vmid = extract_param($param, 'vmid'); + my $target_cluster = extract_param($param, 'target-cluster'); + my $target_node = extract_param($param, 'target-node'); + my $target_vmid = extract_param($param, 'target-vmid') // $source_vmid; + + my $localnode = PVE::INotify::nodename(); + my $network = extract_param($param, 'migration-network'); + + PVE::Cluster::check_cfs_quorum(); + + raise_param_exc({ 'migration-network' => "Only root may use this option." }) + if $network && $authuser ne 'root@pam'; + + # test if VM exists + my $conf = PVE::QemuConfig->load_config($source_vmid); + + PVE::QemuConfig->check_lock($conf); + + raise_param_exc({ vmid => "cannot migrate HA-manage VM to remote cluster" }) + if PVE::HA::Config::vm_is_ha_managed($source_vmid); + + my $remote_conf = PVE::RemoteConfig->new(); + + # TODO: check remote ACLs + my ($ip_info, $fp, $conn) = $remote_conf->get_remote_info($target_cluster, $target_node, $network); + + die "Unable to determine remote IP\n" + if !defined($ip_info) || !defined($ip_info->{default}); + + my $extra_ips = $ip_info->{extra} // []; + die "Unable to determine remote IP in migration network '$network'\n" + if defined($network) && !@$extra_ips; + + my $target_ip; + if (@$extra_ips) { + $target_ip = $ip_info->{extra}[0]; + print "remote: selected IP '$target_ip' within '$network'.\n"; + } else { + $target_ip = $ip_info->{default}; + print "remote: selected default IP '$target_ip'.\n"; + } + + $conn->{host} = $target_ip; + $conn->{cached_fingerprints}->{$fp} = 1 if defined($fp); + + my $api_client = PVE::APIClient::LWP->new(%$conn); + my $version = $api_client->get("/version"); + print "remote: version '$version->{version}\n"; + + if (PVE::QemuServer::check_running($source_vmid)) { + die "can't migrate running VM without --online\n" if !$param->{online}; + + my $repl_conf = PVE::ReplicationConfig->new(); + my $is_replicated = $repl_conf->check_for_existing_jobs($source_vmid, 1); + die "cannot remote-migrate replicated VM\n" if $is_replicated; + } else { + warn "VM isn't running. Doing offline migration instead.\n" if $param->{online}; + $param->{online} = 0; + } + + # FIXME: fork worker hear to avoid timeout? or poll these periodically + # in pvestatd and access cached info here? all of the below is actually + # checked at the remote end anyway once we call the mtunnel endpoint, + # we could also punt it to the client and not do it here at all.. + my $resources = $api_client->get("/cluster/resources"); + if (grep { defined($_->{vmid}) && $_->{vmid} eq $target_vmid } @$resources) { + raise_param_exc({ target_vmid => "Guest with ID '$target_vmid' already exists on remote cluster" }); + } + + my $storages = [ grep { $_->{type} eq 'storage' && $_->{node} eq $target_node } @$resources ]; + my $storecfg = PVE::Storage::config(); + my $targetstorage = extract_param($param, 'targetstorage'); + my $storagemap = eval { PVE::JSONSchema::parse_idmap($targetstorage, 'pve-storage-id') }; + raise_param_exc({ targetstorage => "failed to parse storage map: $@" }) + if $@; + + my $targetbridge = extract_param($param, 'targetbridge'); + my $bridgemap = eval { PVE::JSONSchema::parse_idmap($targetbridge, 'pve-bridge-id') }; + raise_param_exc({ targetbridge => "failed to parse bridge map: $@" }) + if $@; + + my $check_remote_storage = sub { + my ($storage) = @_; + my $found = [ grep { $_->{storage} eq $storage } @$storages ]; + die "remote: storage '$storage' does not exist!\n" + if !@$found; + + $found = @$found[0]; + + my $content_types = [ PVE::Tools::split_list($found->{content}) ]; + die "remote: storage '$storage' cannot store images\n" + if !grep { $_ eq 'images' } @$content_types; + }; + + foreach my $target_sid (values %{$storagemap->{entries}}) { + $check_remote_storage->($target_sid); + } + + $check_remote_storage->($storagemap->{default}) + if $storagemap->{default}; + + # TODO: or check all referenced storages? + die "remote migration requires explicit storage mapping!\n" + if $storagemap->{identity}; + + $param->{storagemap} = $storagemap; + $param->{bridgemap} = $bridgemap; + $param->{remote} = { + conn => $conn, + client => $api_client, + vmid => $target_vmid, + }; + $param->{migration_type} = 'websocket'; + $param->{migration_network} = $network if $network; + + my $realcmd = sub { + PVE::QemuMigrate->migrate($target_node, $target_ip, $source_vmid, $param); + }; + + my $worker = sub { + return PVE::GuestHelpers::guest_migration_lock($source_vmid, 10, $realcmd); + }; + + return $rpcenv->fork_worker('qmigrate', $source_vmid, $authuser, $worker); + }}); + __PACKAGE__->register_method({ name => 'monitor', path => '{vmid}/monitor', -- 2.20.1