From: "Fabian Grünbichler" <f.gruenbichler@proxmox.com>
To: pve-devel@lists.proxmox.com
Subject: [pve-devel] [PATCH PoC v5 container 3/3] migration: add remote migration
Date: Wed, 9 Feb 2022 14:07:33 +0100 [thread overview]
Message-ID: <20220209130750.902245-5-f.gruenbichler@proxmox.com> (raw)
In-Reply-To: <20220209130750.902245-1-f.gruenbichler@proxmox.com>
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
---
Notes:
new in v5 - PoC to ensure helpers and abstractions are re-usable
src/PVE/API2/LXC.pm | 630 +++++++++++++++++++++++++++++++++++++++++
src/PVE/LXC/Migrate.pm | 237 +++++++++++++---
2 files changed, 828 insertions(+), 39 deletions(-)
diff --git a/src/PVE/API2/LXC.pm b/src/PVE/API2/LXC.pm
index 61eaaf7..32ae465 100644
--- a/src/PVE/API2/LXC.pm
+++ b/src/PVE/API2/LXC.pm
@@ -3,6 +3,8 @@ package PVE::API2::LXC;
use strict;
use warnings;
+use Socket qw(SOCK_STREAM);
+
use PVE::SafeSyslog;
use PVE::Tools qw(extract_param run_command);
use PVE::Exception qw(raise raise_param_exc raise_perm_exc);
@@ -1084,6 +1086,174 @@ __PACKAGE__->register_method ({
}});
+__PACKAGE__->register_method({
+ name => 'remote_migrate_vm',
+ path => '{vmid}/remote_migrate',
+ method => 'POST',
+ protected => 1,
+ proxyto => 'node',
+ description => "Migrate the container to another cluster. Creates a new migration task.",
+ permissions => {
+ check => ['perm', '/vms/{vmid}', [ 'VM.Migrate' ]],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::LXC::complete_ctid }),
+ 'target-vmid' => get_standard_option('pve-vmid', { optional => 1 }),
+ 'target-endpoint' => get_standard_option('proxmox-remote', {
+ description => "Remote target endpoint",
+ }),
+ online => {
+ type => 'boolean',
+ description => "Use online/live migration.",
+ optional => 1,
+ },
+ restart => {
+ type => 'boolean',
+ description => "Use restart migration",
+ optional => 1,
+ },
+ timeout => {
+ type => 'integer',
+ description => "Timeout in seconds for shutdown for restart migration",
+ optional => 1,
+ default => 180,
+ },
+ delete => {
+ type => 'boolean',
+ description => "Delete the original VM and related data after successful migration. By default the original VM is kept on the source cluster in a stopped state.",
+ optional => 1,
+ default => 0,
+ },
+ 'target-storage' => get_standard_option('pve-targetstorage', {
+ optional => 0,
+ }),
+ 'target-bridge' => {
+ type => 'string',
+ description => "Mapping from source to target bridges. Providing only a single bridge ID maps all source bridges to that bridge. Providing the special value '1' will map each source bridge to itself.",
+ format => 'bridge-pair-list',
+ },
+ bwlimit => {
+ description => "Override I/O bandwidth limit (in KiB/s).",
+ optional => 1,
+ type => 'number',
+ minimum => '0',
+ default => 'migrate limit from datacenter or storage config',
+ },
+ },
+ },
+ returns => {
+ type => 'string',
+ description => "the task ID.",
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+ my $authuser = $rpcenv->get_user();
+
+ my $source_vmid = extract_param($param, 'vmid');
+ my $target_endpoint = extract_param($param, 'target-endpoint');
+ my $target_vmid = extract_param($param, 'target-vmid') // $source_vmid;
+
+ my $delete = extract_param($param, 'delete') // 0;
+
+ PVE::Cluster::check_cfs_quorum();
+
+ # test if CT exists
+ my $conf = PVE::LXC::Config->load_config($source_vmid);
+ PVE::LXC::Config->check_lock($conf);
+
+ # try to detect errors early
+ if (PVE::LXC::check_running($source_vmid)) {
+ die "can't migrate running container without --online or --restart\n"
+ if !$param->{online} && !$param->{restart};
+ }
+
+ raise_param_exc({ vmid => "cannot migrate HA-managed CT to remote cluster" })
+ if PVE::HA::Config::vm_is_ha_managed($source_vmid);
+
+ my $remote = PVE::JSONSchema::parse_property_string('proxmox-remote', $target_endpoint);
+
+ # TODO: move this as helper somewhere appropriate?
+ my $conn_args = {
+ protocol => 'https',
+ host => $remote->{host},
+ port => $remote->{port} // 8006,
+ apitoken => $remote->{apitoken},
+ };
+
+ my $fp;
+ if ($fp = $remote->{fingerprint}) {
+ $conn_args->{cached_fingerprints} = { uc($fp) => 1 };
+ }
+
+ print "Establishing API connection with remote at '$remote->{host}'\n";
+
+ my $api_client = PVE::APIClient::LWP->new(%$conn_args);
+
+ if (!defined($fp)) {
+ my $cert_info = $api_client->get("/nodes/localhost/certificates/info");
+ foreach my $cert (@$cert_info) {
+ my $filename = $cert->{filename};
+ next if $filename ne 'pveproxy-ssl.pem' && $filename ne 'pve-ssl.pem';
+ $fp = $cert->{fingerprint} if !$fp || $filename eq 'pveproxy-ssl.pem';
+ }
+ $conn_args->{cached_fingerprints} = { uc($fp) => 1 }
+ if defined($fp);
+ }
+
+ my $storecfg = PVE::Storage::config();
+ my $target_storage = extract_param($param, 'target-storage');
+ my $storagemap = eval { PVE::JSONSchema::parse_idmap($target_storage, 'pve-storage-id') };
+ raise_param_exc({ 'target-storage' => "failed to parse storage map: $@" })
+ if $@;
+
+ my $target_bridge = extract_param($param, 'target-bridge');
+ my $bridgemap = eval { PVE::JSONSchema::parse_idmap($target_bridge, 'pve-bridge-id') };
+ raise_param_exc({ 'target-bridge' => "failed to parse bridge map: $@" })
+ if $@;
+
+ die "remote migration requires explicit storage mapping!\n"
+ if $storagemap->{identity};
+
+ $param->{storagemap} = $storagemap;
+ $param->{bridgemap} = $bridgemap;
+ $param->{remote} = {
+ conn => $conn_args, # re-use fingerprint for tunnel
+ client => $api_client,
+ vmid => $target_vmid,
+ };
+ $param->{migration_type} = 'websocket';
+ $param->{delete} = $delete if $delete;
+
+ my $cluster_status = $api_client->get("/cluster/status");
+ my $target_node;
+ foreach my $entry (@$cluster_status) {
+ next if $entry->{type} ne 'node';
+ if ($entry->{local}) {
+ $target_node = $entry->{name};
+ last;
+ }
+ }
+
+ die "couldn't determine endpoint's node name\n"
+ if !defined($target_node);
+
+ my $realcmd = sub {
+ PVE::LXC::Migrate->migrate($target_node, $remote->{host}, $source_vmid, $param);
+ };
+
+ my $worker = sub {
+ return PVE::GuestHelpers::guest_migration_lock($source_vmid, 10, $realcmd);
+ };
+
+ return $rpcenv->fork_worker('vzmigrate', $source_vmid, $authuser, $worker);
+ }});
+
+
__PACKAGE__->register_method({
name => 'migrate_vm',
path => '{vmid}/migrate',
@@ -2308,4 +2478,464 @@ __PACKAGE__->register_method({
return PVE::GuestHelpers::config_with_pending_array($conf, $pending_delete_hash);
}});
+__PACKAGE__->register_method({
+ name => 'mtunnel',
+ path => '{vmid}/mtunnel',
+ method => 'POST',
+ protected => 1,
+ description => 'Migration tunnel endpoint - only for internal use by CT migration.',
+ permissions => {
+ check => ['perm', '/vms/{vmid}', [ 'VM.Allocate' ]],
+ description => "You need 'VM.Allocate' permissions on /vms/{vmid}. Further permission checks happen during the actual migration.",
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid'),
+ storages => {
+ type => 'string',
+ format => 'pve-storage-id-list',
+ optional => 1,
+ description => 'List of storages to check permission and availability. Will be checked again for all actually used storages during migration.',
+ },
+ bridges => {
+ type => 'string',
+ format => 'pve-bridge-id-list',
+ optional => 1,
+ description => 'List of network bridges to check availability. Will be checked again for actually used bridges during migration.',
+ },
+ },
+ },
+ returns => {
+ additionalProperties => 0,
+ properties => {
+ upid => { type => 'string' },
+ ticket => { type => 'string' },
+ socket => { type => 'string' },
+ },
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+ my $authuser = $rpcenv->get_user();
+
+ my $node = extract_param($param, 'node');
+ my $vmid = extract_param($param, 'vmid');
+
+ my $storages = extract_param($param, 'storages');
+ my $bridges = extract_param($param, 'bridges');
+
+ my $nodename = PVE::INotify::nodename();
+
+ raise_param_exc({ node => "node needs to be 'localhost' or local hostname '$nodename'" })
+ if $node ne 'localhost' && $node ne $nodename;
+
+ $node = $nodename;
+
+ my $storecfg = PVE::Storage::config();
+ foreach my $storeid (PVE::Tools::split_list($storages)) {
+ $check_storage_access_migrate->($rpcenv, $authuser, $storecfg, $storeid, $node);
+ }
+
+ foreach my $bridge (PVE::Tools::split_list($bridges)) {
+ PVE::Network::read_bridge_mtu($bridge);
+ }
+
+ PVE::Cluster::check_cfs_quorum();
+
+ my $socket_addr = "/run/pve/ct-$vmid.mtunnel";
+
+ my $lock = 'create';
+ eval { PVE::LXC::Config->create_and_lock_config($vmid, 0, $lock); };
+
+ raise_param_exc({ vmid => "unable to create empty CT config - $@"})
+ if $@;
+
+ my $realcmd = sub {
+ my $state = {
+ storecfg => PVE::Storage::config(),
+ lock => $lock,
+ vmid => $vmid,
+ };
+
+ my $run_locked = sub {
+ my ($code, $params) = @_;
+ return PVE::LXC::Config->lock_config($state->{vmid}, sub {
+ my $conf = PVE::LXC::Config->load_config($state->{vmid});
+
+ $state->{conf} = $conf;
+
+ die "Encountered wrong lock - aborting mtunnel command handling.\n"
+ if $state->{lock} && !PVE::LXC::Config->has_lock($conf, $state->{lock});
+
+ return $code->($params);
+ });
+ };
+
+ my $cmd_desc = {
+ config => {
+ conf => {
+ type => 'string',
+ description => 'Full CT config, adapted for target cluster/node',
+ },
+ 'firewall-config' => {
+ type => 'string',
+ description => 'CT firewall config',
+ optional => 1,
+ },
+ },
+ ticket => {
+ path => {
+ type => 'string',
+ description => 'socket path for which the ticket should be valid. must be known to current mtunnel instance.',
+ },
+ },
+ quit => {
+ cleanup => {
+ type => 'boolean',
+ description => 'remove CT config and volumes, aborting migration',
+ default => 0,
+ },
+ },
+ 'disk-import' => $PVE::StorageTunnel::cmd_schema->{'disk-import'},
+ 'query-disk-import' => $PVE::StorageTunnel::cmd_schema->{'query-disk-import'},
+ bwlimit => $PVE::StorageTunnel::cmd_schema->{bwlimit},
+ };
+
+ my $cmd_handlers = {
+ 'version' => sub {
+ # compared against other end's version
+ # bump/reset for breaking changes
+ # bump/bump for opt-in changes
+ return {
+ api => $PVE::LXC::Migrate::WS_TUNNEL_VERSION,
+ age => 0,
+ };
+ },
+ 'config' => sub {
+ my ($params) = @_;
+
+ # parse and write out VM FW config if given
+ if (my $fw_conf = $params->{'firewall-config'}) {
+ my ($path, $fh) = PVE::Tools::tempfile_contents($fw_conf, 700);
+
+ my $empty_conf = {
+ rules => [],
+ options => {},
+ aliases => {},
+ ipset => {} ,
+ ipset_comments => {},
+ };
+ my $cluster_fw_conf = PVE::Firewall::load_clusterfw_conf();
+
+ # TODO: add flag for strict parsing?
+ # TODO: add import sub that does all this given raw content?
+ my $vmfw_conf = PVE::Firewall::generic_fw_config_parser($path, $cluster_fw_conf, $empty_conf, 'vm');
+ $vmfw_conf->{vmid} = $state->{vmid};
+ PVE::Firewall::save_vmfw_conf($state->{vmid}, $vmfw_conf);
+
+ $state->{cleanup}->{fw} = 1;
+ }
+
+ my $conf_fn = "incoming/lxc/$state->{vmid}.conf";
+ my $new_conf = PVE::LXC::Config::parse_pct_config($conf_fn, $params->{conf}, 1);
+ delete $new_conf->{lock};
+ delete $new_conf->{digest};
+
+ my $unprivileged = delete $new_conf->{unprivileged};
+ my $arch = delete $new_conf->{arch};
+
+ # TODO handle properly?
+ delete $new_conf->{snapshots};
+ delete $new_conf->{parent};
+ delete $new_conf->{pending};
+ delete $new_conf->{lxc};
+
+ PVE::LXC::Config->remove_lock($state->{vmid}, 'create');
+
+ eval {
+ my $conf = {
+ unprivileged => $unprivileged,
+ arch => $arch,
+ };
+ PVE::LXC::check_ct_modify_config_perm(
+ $rpcenv,
+ $authuser,
+ $state->{vmid},
+ undef,
+ $conf,
+ $new_conf,
+ undef,
+ $unprivileged,
+ );
+ my $errors = PVE::LXC::Config->update_pct_config(
+ $state->{vmid},
+ $conf,
+ 0,
+ $new_conf,
+ [],
+ [],
+ );
+ raise_param_exc($errors) if scalar(keys %$errors);
+ PVE::LXC::Config->write_config($state->{vmid}, $conf);
+ PVE::LXC::update_lxc_config($vmid, $conf);
+ };
+ if (my $err = $@) {
+ # revert to locked previous config
+ my $conf = PVE::LXC::Config->load_config($state->{vmid});
+ $conf->{lock} = 'create';
+ PVE::LXC::Config->write_config($state->{vmid}, $conf);
+
+ die $err;
+ }
+
+ my $conf = PVE::LXC::Config->load_config($state->{vmid});
+ $conf->{lock} = 'migrate';
+ PVE::LXC::Config->write_config($state->{vmid}, $conf);
+
+ $state->{lock} = 'migrate';
+
+ return;
+ },
+ 'bwlimit' => sub {
+ my ($params) = @_;
+ return PVE::StorageTunnel::handle_bwlimit($params);
+ },
+ 'disk-import' => sub {
+ my ($params) = @_;
+
+ $check_storage_access_migrate->(
+ $rpcenv,
+ $authuser,
+ $state->{storecfg},
+ $params->{storage},
+ $node
+ );
+
+ $params->{unix} = "/run/pve/ct-$state->{vmid}.storage";
+
+ return PVE::StorageTunnel::handle_disk_import($state, $params);
+ },
+ 'query-disk-import' => sub {
+ my ($params) = @_;
+
+ return PVE::StorageTunnel::handle_query_disk_import($state, $params);
+ },
+ 'unlock' => sub {
+ PVE::LXC::Config->remove_lock($state->{vmid}, $state->{lock});
+ delete $state->{lock};
+ return;
+ },
+ 'start' => sub {
+ PVE::LXC::vm_start(
+ $state->{vmid},
+ $state->{conf},
+ 0
+ );
+
+ return;
+ },
+ 'stop' => sub {
+ PVE::LXC::vm_stop($state->{vmid}, 1, 10, 1);
+ return;
+ },
+ 'ticket' => sub {
+ my ($params) = @_;
+
+ my $path = $params->{path};
+
+ die "Not allowed to generate ticket for unknown socket '$path'\n"
+ if !defined($state->{sockets}->{$path});
+
+ return { ticket => PVE::AccessControl::assemble_tunnel_ticket($authuser, "/socket/$path") };
+ },
+ 'quit' => sub {
+ my ($params) = @_;
+
+ if ($params->{cleanup}) {
+ if ($state->{cleanup}->{fw}) {
+ PVE::Firewall::remove_vmfw_conf($state->{vmid});
+ }
+
+ for my $volid (keys $state->{cleanup}->{volumes}->%*) {
+ print "freeing volume '$volid' as part of cleanup\n";
+ eval { PVE::Storage::vdisk_free($state->{storecfg}, $volid) };
+ warn $@ if $@;
+ }
+
+ PVE::LXC::destroy_lxc_container(
+ $state->{storecfg},
+ $state->{vmid},
+ $state->{conf},
+ undef,
+ 0,
+ );
+ }
+
+ print "switching to exit-mode, waiting for client to disconnect\n";
+ $state->{exit} = 1;
+ return;
+ },
+ };
+
+ $run_locked->(sub {
+ my $socket_addr = "/run/pve/ct-$state->{vmid}.mtunnel";
+ unlink $socket_addr;
+
+ $state->{socket} = IO::Socket::UNIX->new(
+ Type => SOCK_STREAM(),
+ Local => $socket_addr,
+ Listen => 1,
+ );
+
+ $state->{socket_uid} = getpwnam('www-data')
+ or die "Failed to resolve user 'www-data' to numeric UID\n";
+ chown $state->{socket_uid}, -1, $socket_addr;
+ });
+
+ print "mtunnel started\n";
+
+ my $conn = eval { PVE::Tools::run_with_timeout(300, sub { $state->{socket}->accept() }) };
+ if ($@) {
+ warn "Failed to accept tunnel connection - $@\n";
+
+ warn "Removing tunnel socket..\n";
+ unlink $state->{socket};
+
+ warn "Removing temporary VM config..\n";
+ $run_locked->(sub {
+ PVE::LXC::destroy_config($state->{vmid});
+ });
+
+ die "Exiting mtunnel\n";
+ }
+
+ $state->{conn} = $conn;
+
+ my $reply_err = sub {
+ my ($msg) = @_;
+
+ my $reply = JSON::encode_json({
+ success => JSON::false,
+ msg => $msg,
+ });
+ $conn->print("$reply\n");
+ $conn->flush();
+ };
+
+ my $reply_ok = sub {
+ my ($res) = @_;
+
+ $res->{success} = JSON::true;
+ my $reply = JSON::encode_json($res);
+ $conn->print("$reply\n");
+ $conn->flush();
+ };
+
+ while (my $line = <$conn>) {
+ chomp $line;
+
+ # untaint, we validate below if needed
+ ($line) = $line =~ /^(.*)$/;
+ my $parsed = eval { JSON::decode_json($line) };
+ if ($@) {
+ $reply_err->("failed to parse command - $@");
+ next;
+ }
+
+ my $cmd = delete $parsed->{cmd};
+ if (!defined($cmd)) {
+ $reply_err->("'cmd' missing");
+ } elsif ($state->{exit}) {
+ $reply_err->("tunnel is in exit-mode, processing '$cmd' cmd not possible");
+ next;
+ } elsif (my $handler = $cmd_handlers->{$cmd}) {
+ print "received command '$cmd'\n";
+ eval {
+ if ($cmd_desc->{$cmd}) {
+ PVE::JSONSchema::validate($cmd_desc->{$cmd}, $parsed);
+ } else {
+ $parsed = {};
+ }
+ my $res = $run_locked->($handler, $parsed);
+ $reply_ok->($res);
+ };
+ $reply_err->("failed to handle '$cmd' command - $@")
+ if $@;
+ } else {
+ $reply_err->("unknown command '$cmd' given");
+ }
+ }
+
+ if ($state->{exit}) {
+ print "mtunnel exited\n";
+ } else {
+ die "mtunnel exited unexpectedly\n";
+ }
+ };
+
+ my $ticket = PVE::AccessControl::assemble_tunnel_ticket($authuser, "/socket/$socket_addr");
+ my $upid = $rpcenv->fork_worker('vzmtunnel', $vmid, $authuser, $realcmd);
+
+ return {
+ ticket => $ticket,
+ upid => $upid,
+ socket => $socket_addr,
+ };
+ }});
+
+__PACKAGE__->register_method({
+ name => 'mtunnelwebsocket',
+ path => '{vmid}/mtunnelwebsocket',
+ method => 'GET',
+ permissions => {
+ description => "You need to pass a ticket valid for the selected socket. Tickets can be created via the mtunnel API call, which will check permissions accordingly.",
+ user => 'all', # check inside
+ },
+ description => 'Migration tunnel endpoint for websocket upgrade - only for internal use by VM migration.',
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid'),
+ socket => {
+ type => "string",
+ description => "unix socket to forward to",
+ },
+ ticket => {
+ type => "string",
+ description => "ticket return by initial 'mtunnel' API call, or retrieved via 'ticket' tunnel command",
+ },
+ },
+ },
+ returns => {
+ type => "object",
+ properties => {
+ port => { type => 'string', optional => 1 },
+ socket => { type => 'string', optional => 1 },
+ },
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+ my $authuser = $rpcenv->get_user();
+
+ my $nodename = PVE::INotify::nodename();
+ my $node = extract_param($param, 'node');
+
+ raise_param_exc({ node => "node needs to be 'localhost' or local hostname '$nodename'" })
+ if $node ne 'localhost' && $node ne $nodename;
+
+ my $vmid = $param->{vmid};
+ # check VM exists
+ PVE::LXC::Config->load_config($vmid);
+
+ my $socket = $param->{socket};
+ PVE::AccessControl::verify_tunnel_ticket($param->{ticket}, $authuser, "/socket/$socket");
+
+ return { socket => $socket };
+ }});
1;
diff --git a/src/PVE/LXC/Migrate.pm b/src/PVE/LXC/Migrate.pm
index c85a09c..84911e3 100644
--- a/src/PVE/LXC/Migrate.pm
+++ b/src/PVE/LXC/Migrate.pm
@@ -17,6 +17,9 @@ use PVE::Replication;
use base qw(PVE::AbstractMigrate);
+# compared against remote end's minimum version
+our $WS_TUNNEL_VERSION = 2;
+
sub lock_vm {
my ($self, $vmid, $code, @param) = @_;
@@ -28,6 +31,7 @@ sub prepare {
my $online = $self->{opts}->{online};
my $restart= $self->{opts}->{restart};
+ my $remote = $self->{opts}->{remote};
$self->{storecfg} = PVE::Storage::config();
@@ -44,6 +48,7 @@ sub prepare {
}
$self->{was_running} = $running;
+ my $storages = {};
PVE::LXC::Config->foreach_volume_full($conf, { include_unused => 1 }, sub {
my ($ms, $mountpoint) = @_;
@@ -70,7 +75,7 @@ sub prepare {
die "content type 'rootdir' is not available on storage '$storage'\n"
if !$scfg->{content}->{rootdir};
- if ($scfg->{shared}) {
+ if ($scfg->{shared} && !$remote) {
# PVE::Storage::activate_storage checks this for non-shared storages
my $plugin = PVE::Storage::Plugin->lookup($scfg->{type});
warn "Used shared storage '$storage' is not online on source node!\n"
@@ -83,18 +88,63 @@ sub prepare {
$targetsid = PVE::JSONSchema::map_id($self->{opts}->{storagemap}, $storage);
}
- my $target_scfg = PVE::Storage::storage_check_enabled($self->{storecfg}, $targetsid, $self->{node});
+ if (!$remote) {
+ my $target_scfg = PVE::Storage::storage_check_enabled($self->{storecfg}, $targetsid, $self->{node});
+
+ die "$volid: content type 'rootdir' is not available on storage '$targetsid'\n"
+ if !$target_scfg->{content}->{rootdir};
+ }
- die "$volid: content type 'rootdir' is not available on storage '$targetsid'\n"
- if !$target_scfg->{content}->{rootdir};
+ $storages->{$targetsid} = 1;
});
# todo: test if VM uses local resources
- # test ssh connection
- my $cmd = [ @{$self->{rem_ssh}}, '/bin/true' ];
- eval { $self->cmd_quiet($cmd); };
- die "Can't connect to destination address using public key\n" if $@;
+ if ($remote) {
+ # test & establish websocket connection
+ my $bridges = map_bridges($conf, $self->{opts}->{bridgemap}, 1);
+
+ my $remote = $self->{opts}->{remote};
+ my $conn = $remote->{conn};
+
+ my $log = sub {
+ my ($level, $msg) = @_;
+ $self->log($level, $msg);
+ };
+
+ my $websocket_url = "https://$conn->{host}:$conn->{port}/api2/json/nodes/$self->{node}/lxc/$remote->{vmid}/mtunnelwebsocket";
+ my $url = "/nodes/$self->{node}/lxc/$remote->{vmid}/mtunnel";
+
+ my $tunnel_params = {
+ url => $websocket_url,
+ };
+
+ my $storage_list = join(',', keys %$storages);
+ my $bridge_list = join(',', keys %$bridges);
+
+ my $req_params = {
+ storages => $storage_list,
+ bridges => $bridge_list,
+ };
+
+ my $tunnel = PVE::Tunnel::fork_websocket_tunnel($conn, $url, $req_params, $tunnel_params, $log);
+ my $min_version = $tunnel->{version} - $tunnel->{age};
+ $self->log('info', "local WS tunnel version: $WS_TUNNEL_VERSION");
+ $self->log('info', "remote WS tunnel version: $tunnel->{version}");
+ $self->log('info', "minimum required WS tunnel version: $min_version");
+ die "Remote tunnel endpoint not compatible, upgrade required\n"
+ if $WS_TUNNEL_VERSION < $min_version;
+ die "Remote tunnel endpoint too old, upgrade required\n"
+ if $WS_TUNNEL_VERSION > $tunnel->{version};
+
+ $self->log('info', "websocket tunnel started\n");
+ $self->{tunnel} = $tunnel;
+ } else {
+ # test ssh connection
+ my $cmd = [ @{$self->{rem_ssh}}, '/bin/true' ];
+ eval { $self->cmd_quiet($cmd); };
+ die "Can't connect to destination address using public key\n" if $@;
+ }
# in restart mode, we shutdown the container before migrating
if ($restart && $running) {
@@ -113,6 +163,8 @@ sub prepare {
sub phase1 {
my ($self, $vmid) = @_;
+ my $remote = $self->{opts}->{remote};
+
$self->log('info', "starting migration of CT $self->{vmid} to node '$self->{node}' ($self->{nodeip})");
my $conf = $self->{vmconf};
@@ -147,7 +199,7 @@ sub phase1 {
my $targetsid = $sid;
- if ($scfg->{shared}) {
+ if ($scfg->{shared} && !$remote) {
$self->log('info', "volume '$volid' is on shared storage '$sid'")
if !$snapname;
return;
@@ -155,7 +207,8 @@ sub phase1 {
$targetsid = PVE::JSONSchema::map_id($self->{opts}->{storagemap}, $sid);
}
- PVE::Storage::storage_check_enabled($self->{storecfg}, $targetsid, $self->{node});
+ PVE::Storage::storage_check_enabled($self->{storecfg}, $targetsid, $self->{node})
+ if !$remote;
my $bwlimit = $self->get_bwlimit($sid, $targetsid);
@@ -192,6 +245,8 @@ sub phase1 {
eval {
&$test_volid($volid, $snapname);
+
+ die "remote migration with snapshots not supported yet\n" if $remote;
};
&$log_error($@, $volid) if $@;
@@ -201,7 +256,7 @@ sub phase1 {
my @sids = PVE::Storage::storage_ids($self->{storecfg});
foreach my $storeid (@sids) {
my $scfg = PVE::Storage::storage_config($self->{storecfg}, $storeid);
- next if $scfg->{shared};
+ next if $scfg->{shared} && !$remote;
next if !PVE::Storage::storage_check_enabled($self->{storecfg}, $storeid, undef, 1);
# get list from PVE::Storage (for unreferenced volumes)
@@ -211,7 +266,8 @@ sub phase1 {
# check if storage is available on target node
my $targetsid = PVE::JSONSchema::map_id($self->{opts}->{storagemap}, $storeid);
- PVE::Storage::storage_check_enabled($self->{storecfg}, $targetsid, $self->{node});
+ PVE::Storage::storage_check_enabled($self->{storecfg}, $targetsid, $self->{node})
+ if !$remote;
die "content type 'rootdir' is not available on storage '$storeid'\n"
if !$scfg->{content}->{rootdir};
@@ -239,12 +295,21 @@ sub phase1 {
my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
my $scfg = PVE::Storage::storage_config($self->{storecfg}, $sid);
- my $migratable = ($scfg->{type} eq 'dir') || ($scfg->{type} eq 'zfspool')
- || ($scfg->{type} eq 'lvmthin') || ($scfg->{type} eq 'lvm')
- || ($scfg->{type} eq 'btrfs');
+ # TODO move to storage plugin layer?
+ my $migratable_storages = [
+ 'dir',
+ 'zfspool',
+ 'lvmthin',
+ 'lvm',
+ 'btrfs',
+ ];
+ if ($remote) {
+ push @$migratable_storages, 'cifs';
+ push @$migratable_storages, 'nfs';
+ }
die "storage type '$scfg->{type}' not supported\n"
- if !$migratable;
+ if !grep { $_ eq $scfg->{type} } @$migratable_storages;
# image is a linked clone on local storage, se we can't migrate.
if (my $basename = (PVE::Storage::parse_volname($self->{storecfg}, $volid))[3]) {
@@ -279,7 +344,10 @@ sub phase1 {
my $rep_cfg = PVE::ReplicationConfig->new();
- if (my $jobcfg = $rep_cfg->find_local_replication_job($vmid, $self->{node})) {
+ if ($remote) {
+ die "cannot remote-migrate replicated VM\n"
+ if $rep_cfg->check_for_existing_jobs($vmid, 1);
+ } elsif (my $jobcfg = $rep_cfg->find_local_replication_job($vmid, $self->{node})) {
die "can't live migrate VM with replicated volumes\n" if $self->{running};
my $start_time = time();
my $logfunc = sub { my ($msg) = @_; $self->log('info', $msg); };
@@ -290,7 +358,6 @@ sub phase1 {
my $opts = $self->{opts};
foreach my $volid (keys %$volhash) {
next if $rep_volumes->{$volid};
- my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
push @{$self->{volumes}}, $volid;
# JSONSchema and get_bandwidth_limit use kbps - storage_migrate bps
@@ -300,21 +367,38 @@ sub phase1 {
my $targetsid = $volhash->{$volid}->{targetsid};
my $new_volid = eval {
- my $storage_migrate_opts = {
- 'ratelimit_bps' => $bwlimit,
- 'insecure' => $opts->{migration_type} eq 'insecure',
- 'with_snapshots' => $volhash->{$volid}->{snapshots},
- };
-
- my $logfunc = sub { $self->log('info', $_[0]); };
- return PVE::Storage::storage_migrate(
- $self->{storecfg},
- $volid,
- $self->{ssh_info},
- $targetsid,
- $storage_migrate_opts,
- $logfunc,
- );
+ if ($remote) {
+ my $log = sub {
+ my ($level, $msg) = @_;
+ $self->log($level, $msg);
+ };
+
+ return PVE::StorageTunnel::storage_migrate(
+ $self->{tunnel},
+ $self->{storecfg},
+ $volid,
+ $self->{vmid},
+ $remote->{vmid},
+ $volhash->{$volid},
+ $log,
+ );
+ } else {
+ my $storage_migrate_opts = {
+ 'ratelimit_bps' => $bwlimit,
+ 'insecure' => $opts->{migration_type} eq 'insecure',
+ 'with_snapshots' => $volhash->{$volid}->{snapshots},
+ };
+
+ my $logfunc = sub { $self->log('info', $_[0]); };
+ return PVE::Storage::storage_migrate(
+ $self->{storecfg},
+ $volid,
+ $self->{ssh_info},
+ $targetsid,
+ $storage_migrate_opts,
+ $logfunc,
+ );
+ }
};
if (my $err = $@) {
@@ -344,13 +428,38 @@ sub phase1 {
my $vollist = PVE::LXC::Config->get_vm_volumes($conf);
PVE::Storage::deactivate_volumes($self->{storecfg}, $vollist);
- # transfer replication state before moving config
- $self->transfer_replication_state() if $rep_volumes;
- PVE::LXC::Config->update_volume_ids($conf, $self->{volume_map});
- PVE::LXC::Config->write_config($vmid, $conf);
- PVE::LXC::Config->move_config_to_node($vmid, $self->{node});
+ if ($remote) {
+ my $remote_conf = PVE::LXC::Config->load_config($vmid);
+ PVE::LXC::Config->update_volume_ids($remote_conf, $self->{volume_map});
+
+ my $bridges = map_bridges($remote_conf, $self->{opts}->{bridgemap});
+ for my $target (keys $bridges->%*) {
+ for my $nic (keys $bridges->{$target}->%*) {
+ $self->log('info', "mapped: $nic from $bridges->{$target}->{$nic} to $target");
+ }
+ }
+ my $conf_str = PVE::LXC::Config::write_pct_config("remote", $remote_conf);
+
+ # TODO expose in PVE::Firewall?
+ my $vm_fw_conf_path = "/etc/pve/firewall/$vmid.fw";
+ my $fw_conf_str;
+ $fw_conf_str = PVE::Tools::file_get_contents($vm_fw_conf_path)
+ if -e $vm_fw_conf_path;
+ my $params = {
+ conf => $conf_str,
+ 'firewall-config' => $fw_conf_str,
+ };
+
+ PVE::Tunnel::write_tunnel($self->{tunnel}, 10, 'config', $params);
+ } else {
+ # transfer replication state before moving config
+ $self->transfer_replication_state() if $rep_volumes;
+ PVE::LXC::Config->update_volume_ids($conf, $self->{volume_map});
+ PVE::LXC::Config->write_config($vmid, $conf);
+ PVE::LXC::Config->move_config_to_node($vmid, $self->{node});
+ $self->switch_replication_job_target() if $rep_volumes;
+ }
$self->{conf_migrated} = 1;
- $self->switch_replication_job_target() if $rep_volumes;
}
sub phase1_cleanup {
@@ -364,6 +473,12 @@ sub phase1_cleanup {
# fixme: try to remove ?
}
}
+
+ if ($self->{opts}->{remote}) {
+ # cleans up remote volumes
+ PVE::Tunnel::finish_tunnel($self->{tunnel}, 1);
+ delete $self->{tunnel};
+ }
}
sub phase3 {
@@ -371,6 +486,9 @@ sub phase3 {
my $volids = $self->{volumes};
+ # handled below in final_cleanup
+ return if $self->{opts}->{remote};
+
# destroy local copies
foreach my $volid (@$volids) {
eval { PVE::Storage::vdisk_free($self->{storecfg}, $volid); };
@@ -399,6 +517,24 @@ sub final_cleanup {
my $skiplock = 1;
PVE::LXC::vm_start($vmid, $self->{vmconf}, $skiplock);
}
+ } elsif ($self->{opts}->{remote}) {
+ eval { PVE::Tunnel::write_tunnel($self->{tunnel}, 10, 'unlock') };
+ $self->log('err', "Failed to clear migrate lock - $@\n") if $@;
+
+ if ($self->{opts}->{restart} && $self->{was_running}) {
+ $self->log('info', "start container on target node");
+ PVE::Tunnel::write_tunnel($self->{tunnel}, 60, 'start');
+ }
+ if ($self->{opts}->{delete}) {
+ PVE::LXC::destroy_lxc_container(
+ PVE::Storage::config(),
+ $vmid,
+ PVE::LXC::Config->load_config($vmid),
+ undef,
+ 0,
+ );
+ }
+ PVE::Tunnel::finish_tunnel($self->{tunnel});
} else {
my $cmd = [ @{$self->{rem_ssh}}, 'pct', 'unlock', $vmid ];
$self->cmd_logerr($cmd, errmsg => "failed to clear migrate lock");
@@ -411,7 +547,30 @@ sub final_cleanup {
$self->cmd($cmd);
}
}
+}
+
+sub map_bridges {
+ my ($conf, $map, $scan_only) = @_;
+
+ my $bridges = {};
+
+ foreach my $opt (keys %$conf) {
+ next if $opt !~ m/^net\d+$/;
+
+ next if !$conf->{$opt};
+ my $d = PVE::LXC::Config->parse_lxc_network($conf->{$opt});
+ next if !$d || !$d->{bridge};
+
+ my $target_bridge = PVE::JSONSchema::map_id($map, $d->{bridge});
+ $bridges->{$target_bridge}->{$opt} = $d->{bridge};
+
+ next if $scan_only;
+
+ $d->{bridge} = $target_bridge;
+ $conf->{$opt} = PVE::LXC::Config->print_lxc_network($d);
+ }
+ return $bridges;
}
1;
--
2.30.2
next prev parent reply other threads:[~2022-02-09 13:08 UTC|newest]
Thread overview: 35+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-02-09 13:07 [pve-devel] [PATCH-SERIES 0/21] " Fabian Grünbichler
2022-02-09 13:07 ` [pve-devel] [PATCH v5 common 1/1] add 'map_id' helper for ID maps Fabian Grünbichler
2022-02-09 13:07 ` [pve-devel] [PATCH v5 container 1/3] fix #1532: add target-storage support to migration Fabian Grünbichler
2022-02-10 11:52 ` Fabian Ebner
2022-02-11 8:33 ` Fabian Grünbichler
2022-02-09 13:07 ` [pve-devel] [PATCH v5 container 2/3] config: add strict parser Fabian Grünbichler
2022-02-09 13:07 ` Fabian Grünbichler [this message]
2022-02-09 13:07 ` [pve-devel] [PATCH v5 guest-common 1/3] migrate: add get_bwlimit helper Fabian Grünbichler
2022-02-09 13:07 ` [pve-devel] [PATCH v5 guest-common 2/3] add tunnel helper module Fabian Grünbichler
2022-02-09 13:07 ` [pve-devel] [PATCH v5 guest-common 3/3] add storage tunnel module Fabian Grünbichler
2022-02-09 13:07 ` [pve-devel] [PATCH v5 qemu-server 01/11] move map_storage to PVE::JSONSchema::map_id Fabian Grünbichler
2022-02-09 13:07 ` [pve-devel] [PATCH v5 qemu-server 02/11] schema: use pve-bridge-id Fabian Grünbichler
2022-02-09 13:07 ` [pve-devel] [PATCH v5 qemu-server 03/11] parse_config: optional strict mode Fabian Grünbichler
2022-02-09 13:07 ` [pve-devel] [PATCH v5 qemu-server 04/11] update_vm: allow simultaneous setting of boot-order and dev Fabian Grünbichler
2022-02-09 13:07 ` [pve-devel] [PATCH v5 qemu-server 05/11] nbd alloc helper: allow passing in explicit format Fabian Grünbichler
2022-02-09 13:07 ` [pve-devel] [PATCH v5 qemu-server 06/11] migrate: move tunnel-helpers to pve-guest-common Fabian Grünbichler
2022-02-09 13:07 ` [pve-devel] [PATCH v5 qemu-server 07/11] mtunnel: add API endpoints Fabian Grünbichler
2022-02-11 13:01 ` Fabian Ebner
[not found] ` <<0b8626f8-df25-05a6-3db3-698591688eab@proxmox.com>
2022-02-16 12:57 ` Fabian Grünbichler
2022-02-09 13:07 ` [pve-devel] [PATCH v5 qemu-server 08/11] migrate: refactor remote VM/tunnel start Fabian Grünbichler
2022-02-11 13:01 ` Fabian Ebner
[not found] ` <<ce49d9a8-03b6-01ed-ad01-5cc500bfba19@proxmox.com>
2022-02-16 12:58 ` Fabian Grünbichler
2022-02-09 13:07 ` [pve-devel] [PATCH v5 qemu-server 09/11] migrate: add remote migration handling Fabian Grünbichler
2022-02-09 13:07 ` [pve-devel] [PATCH v5 qemu-server 10/11] api: add remote migrate endpoint Fabian Grünbichler
2022-02-11 13:01 ` Fabian Ebner
[not found] ` <<e5069cdd-7a84-9664-2dea-1ac3e68e339c@proxmox.com>
2022-02-16 12:58 ` Fabian Grünbichler
2022-02-09 13:07 ` [pve-devel] [PATCH v5 qemu-server 11/11] qm: add remote-migrate command Fabian Grünbichler
2022-02-09 13:07 ` [pve-devel] [PATCH v5 storage 1/3] storage_migrate_snapshot: skip for btrfs without snapshots Fabian Grünbichler
2022-02-09 13:07 ` [pve-devel] [PATCH v5 storage 2/3] storage_migrate: pull out import/export_prepare Fabian Grünbichler
2022-02-09 13:07 ` [pve-devel] [PATCH v5 storage 3/3] add volume_import/export_start helpers Fabian Grünbichler
2022-02-09 17:56 ` [pve-devel] [PATCH-SERIES 0/21] remote migration Thomas Lamprecht
2022-02-11 10:38 ` [pve-devel] [PATCH qemu-server follow-up] schema: move 'pve-targetstorage' to pve-common Fabian Grünbichler
2022-02-11 10:38 ` [pve-devel] [PATCH common follow-up] schema: take over 'pve-targetstorage' option Fabian Grünbichler
2022-02-11 11:31 ` [pve-devel] [PATCH qemu-server follow-up] schema: move 'pve-targetstorage' to pve-common Fabian Ebner
2022-02-11 13:08 ` [pve-devel] [PATCH-SERIES 0/21] remote migration Fabian Ebner
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220209130750.902245-5-f.gruenbichler@proxmox.com \
--to=f.gruenbichler@proxmox.com \
--cc=pve-devel@lists.proxmox.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox