* [pve-devel] [PATCH v5 qemu-server 1/3] migration: move livemigration code in a dedicated sub
2023-10-26 8:57 [pve-devel] [PATCH v5 qemu-server 0/3] remote-migration: migration with different cpu Alexandre Derumier
@ 2023-10-26 8:57 ` Alexandre Derumier
2023-10-26 8:57 ` [pve-devel] [PATCH v5 qemu-server 2/3] remote-migration: add restart param Alexandre Derumier
` (2 subsequent siblings)
3 siblings, 0 replies; 7+ messages in thread
From: Alexandre Derumier @ 2023-10-26 8:57 UTC (permalink / raw)
To: pve-devel
Signed-off-by: Alexandre Derumier <aderumier@odiso.com>
---
PVE/QemuMigrate.pm | 420 +++++++++++++++++++++++----------------------
1 file changed, 214 insertions(+), 206 deletions(-)
diff --git a/PVE/QemuMigrate.pm b/PVE/QemuMigrate.pm
index 111eeb0..7dd3455 100644
--- a/PVE/QemuMigrate.pm
+++ b/PVE/QemuMigrate.pm
@@ -728,6 +728,219 @@ sub cleanup_bitmaps {
}
}
+sub live_migration {
+ my ($self, $vmid, $migrate_uri, $spice_port) = @_;
+
+ my $conf = $self->{vmconf};
+
+ $self->log('info', "starting online/live migration on $migrate_uri");
+ $self->{livemigration} = 1;
+
+ # load_defaults
+ my $defaults = PVE::QemuServer::load_defaults();
+
+ $self->log('info', "set migration capabilities");
+ eval { PVE::QemuServer::set_migration_caps($vmid) };
+ warn $@ if $@;
+
+ my $qemu_migrate_params = {};
+
+ # migrate speed can be set via bwlimit (datacenter.cfg and API) and via the
+ # migrate_speed parameter in qm.conf - take the lower of the two.
+ my $bwlimit = $self->get_bwlimit();
+
+ my $migrate_speed = $conf->{migrate_speed} // 0;
+ $migrate_speed *= 1024; # migrate_speed is in MB/s, bwlimit in KB/s
+
+ if ($bwlimit && $migrate_speed) {
+ $migrate_speed = ($bwlimit < $migrate_speed) ? $bwlimit : $migrate_speed;
+ } else {
+ $migrate_speed ||= $bwlimit;
+ }
+ $migrate_speed ||= ($defaults->{migrate_speed} || 0) * 1024;
+
+ if ($migrate_speed) {
+ $migrate_speed *= 1024; # qmp takes migrate_speed in B/s.
+ $self->log('info', "migration speed limit: ". render_bytes($migrate_speed, 1) ."/s");
+ } else {
+ # always set migrate speed as QEMU default to 128 MiBps == 1 Gbps, use 16 GiBps == 128 Gbps
+ $migrate_speed = (16 << 30);
+ }
+ $qemu_migrate_params->{'max-bandwidth'} = int($migrate_speed);
+
+ my $migrate_downtime = $defaults->{migrate_downtime};
+ $migrate_downtime = $conf->{migrate_downtime} if defined($conf->{migrate_downtime});
+ # migrate-set-parameters expects limit in ms
+ $migrate_downtime *= 1000;
+ $self->log('info', "migration downtime limit: $migrate_downtime ms");
+ $qemu_migrate_params->{'downtime-limit'} = int($migrate_downtime);
+
+ # set cachesize to 10% of the total memory
+ my $memory = get_current_memory($conf->{memory});
+ my $cachesize = int($memory * 1048576 / 10);
+ $cachesize = round_powerof2($cachesize);
+
+ $self->log('info', "migration cachesize: " . render_bytes($cachesize, 1));
+ $qemu_migrate_params->{'xbzrle-cache-size'} = int($cachesize);
+
+ $self->log('info', "set migration parameters");
+ eval {
+ mon_cmd($vmid, "migrate-set-parameters", %{$qemu_migrate_params});
+ };
+ $self->log('info', "migrate-set-parameters error: $@") if $@;
+
+ if (PVE::QemuServer::vga_conf_has_spice($conf->{vga}) && !$self->{opts}->{remote}) {
+ my $rpcenv = PVE::RPCEnvironment::get();
+ my $authuser = $rpcenv->get_user();
+
+ my (undef, $proxyticket) = PVE::AccessControl::assemble_spice_ticket($authuser, $vmid, $self->{node});
+
+ my $filename = "/etc/pve/nodes/$self->{node}/pve-ssl.pem";
+ my $subject = PVE::AccessControl::read_x509_subject_spice($filename);
+
+ $self->log('info', "spice client_migrate_info");
+
+ eval {
+ mon_cmd($vmid, "client_migrate_info", protocol => 'spice',
+ hostname => $proxyticket, 'port' => 0, 'tls-port' => $spice_port,
+ 'cert-subject' => $subject);
+ };
+ $self->log('info', "client_migrate_info error: $@") if $@;
+
+ }
+
+ my $start = time();
+
+ $self->log('info', "start migrate command to $migrate_uri");
+ eval {
+ mon_cmd($vmid, "migrate", uri => $migrate_uri);
+ };
+ my $merr = $@;
+ $self->log('info', "migrate uri => $migrate_uri failed: $merr") if $merr;
+
+ my $last_mem_transferred = 0;
+ my $usleep = 1000000;
+ my $i = 0;
+ my $err_count = 0;
+ my $lastrem = undef;
+ my $downtimecounter = 0;
+ while (1) {
+ $i++;
+ my $avglstat = $last_mem_transferred ? $last_mem_transferred / $i : 0;
+
+ usleep($usleep);
+
+ my $stat = eval { mon_cmd($vmid, "query-migrate") };
+ if (my $err = $@) {
+ $err_count++;
+ warn "query migrate failed: $err\n";
+ $self->log('info', "query migrate failed: $err");
+ if ($err_count <= 5) {
+ usleep(1_000_000);
+ next;
+ }
+ die "too many query migrate failures - aborting\n";
+ }
+
+ my $status = $stat->{status};
+ if (defined($status) && $status =~ m/^(setup)$/im) {
+ sleep(1);
+ next;
+ }
+
+ if (!defined($status) || $status !~ m/^(active|completed|failed|cancelled)$/im) {
+ die $merr if $merr;
+ die "unable to parse migration status '$status' - aborting\n";
+ }
+ $merr = undef;
+ $err_count = 0;
+
+ my $memstat = $stat->{ram};
+
+ if ($status eq 'completed') {
+ my $delay = time() - $start;
+ if ($delay > 0) {
+ my $total = $memstat->{total} || 0;
+ my $avg_speed = render_bytes($total / $delay, 1);
+ my $downtime = $stat->{downtime} || 0;
+ $self->log('info', "average migration speed: $avg_speed/s - downtime $downtime ms");
+ }
+ }
+
+ if ($status eq 'failed' || $status eq 'cancelled') {
+ my $message = $stat->{'error-desc'} ? "$status - $stat->{'error-desc'}" : $status;
+ $self->log('info', "migration status error: $message");
+ die "aborting\n"
+ }
+
+ if ($status ne 'active') {
+ $self->log('info', "migration status: $status");
+ last;
+ }
+
+ if ($memstat->{transferred} ne $last_mem_transferred) {
+ my $trans = $memstat->{transferred} || 0;
+ my $rem = $memstat->{remaining} || 0;
+ my $total = $memstat->{total} || 0;
+ my $speed = ($memstat->{'pages-per-second'} // 0) * ($memstat->{'page-size'} // 0);
+ my $dirty_rate = ($memstat->{'dirty-pages-rate'} // 0) * ($memstat->{'page-size'} // 0);
+
+ # reduce sleep if remainig memory is lower than the average transfer speed
+ $usleep = 100_000 if $avglstat && $rem < $avglstat;
+
+ # also reduce loggin if we poll more frequent
+ my $should_log = $usleep > 100_000 ? 1 : ($i % 10) == 0;
+
+ my $total_h = render_bytes($total, 1);
+ my $transferred_h = render_bytes($trans, 1);
+ my $speed_h = render_bytes($speed, 1);
+
+ my $progress = "transferred $transferred_h of $total_h VM-state, ${speed_h}/s";
+
+ if ($dirty_rate > $speed) {
+ my $dirty_rate_h = render_bytes($dirty_rate, 1);
+ $progress .= ", VM dirties lots of memory: $dirty_rate_h/s";
+ }
+
+ $self->log('info', "migration $status, $progress") if $should_log;
+
+ my $xbzrle = $stat->{"xbzrle-cache"} || {};
+ my ($xbzrlebytes, $xbzrlepages) = $xbzrle->@{'bytes', 'pages'};
+ if ($xbzrlebytes || $xbzrlepages) {
+ my $bytes_h = render_bytes($xbzrlebytes, 1);
+
+ my $msg = "send updates to $xbzrlepages pages in $bytes_h encoded memory";
+
+ $msg .= sprintf(", cache-miss %.2f%%", $xbzrle->{'cache-miss-rate'} * 100)
+ if $xbzrle->{'cache-miss-rate'};
+
+ $msg .= ", overflow $xbzrle->{overflow}" if $xbzrle->{overflow};
+
+ $self->log('info', "xbzrle: $msg") if $should_log;
+ }
+
+ if (($lastrem && $rem > $lastrem) || ($rem == 0)) {
+ $downtimecounter++;
+ }
+ $lastrem = $rem;
+
+ if ($downtimecounter > 5) {
+ $downtimecounter = 0;
+ $migrate_downtime *= 2;
+ $self->log('info', "auto-increased downtime to continue migration: $migrate_downtime ms");
+ eval {
+ # migrate-set-parameters does not touch values not
+ # specified, so this only changes downtime-limit
+ mon_cmd($vmid, "migrate-set-parameters", 'downtime-limit' => int($migrate_downtime));
+ };
+ $self->log('info', "migrate-set-parameters error: $@") if $@;
+ }
+ }
+
+ $last_mem_transferred = $memstat->{transferred};
+ }
+}
+
sub phase1 {
my ($self, $vmid) = @_;
@@ -1139,212 +1352,7 @@ sub phase2 {
}
}
- $self->log('info', "starting online/live migration on $migrate_uri");
- $self->{livemigration} = 1;
-
- # load_defaults
- my $defaults = PVE::QemuServer::load_defaults();
-
- $self->log('info', "set migration capabilities");
- eval { PVE::QemuServer::set_migration_caps($vmid) };
- warn $@ if $@;
-
- my $qemu_migrate_params = {};
-
- # migrate speed can be set via bwlimit (datacenter.cfg and API) and via the
- # migrate_speed parameter in qm.conf - take the lower of the two.
- my $bwlimit = $self->get_bwlimit();
-
- my $migrate_speed = $conf->{migrate_speed} // 0;
- $migrate_speed *= 1024; # migrate_speed is in MB/s, bwlimit in KB/s
-
- if ($bwlimit && $migrate_speed) {
- $migrate_speed = ($bwlimit < $migrate_speed) ? $bwlimit : $migrate_speed;
- } else {
- $migrate_speed ||= $bwlimit;
- }
- $migrate_speed ||= ($defaults->{migrate_speed} || 0) * 1024;
-
- if ($migrate_speed) {
- $migrate_speed *= 1024; # qmp takes migrate_speed in B/s.
- $self->log('info', "migration speed limit: ". render_bytes($migrate_speed, 1) ."/s");
- } else {
- # always set migrate speed as QEMU default to 128 MiBps == 1 Gbps, use 16 GiBps == 128 Gbps
- $migrate_speed = (16 << 30);
- }
- $qemu_migrate_params->{'max-bandwidth'} = int($migrate_speed);
-
- my $migrate_downtime = $defaults->{migrate_downtime};
- $migrate_downtime = $conf->{migrate_downtime} if defined($conf->{migrate_downtime});
- # migrate-set-parameters expects limit in ms
- $migrate_downtime *= 1000;
- $self->log('info', "migration downtime limit: $migrate_downtime ms");
- $qemu_migrate_params->{'downtime-limit'} = int($migrate_downtime);
-
- # set cachesize to 10% of the total memory
- my $memory = get_current_memory($conf->{memory});
- my $cachesize = int($memory * 1048576 / 10);
- $cachesize = round_powerof2($cachesize);
-
- $self->log('info', "migration cachesize: " . render_bytes($cachesize, 1));
- $qemu_migrate_params->{'xbzrle-cache-size'} = int($cachesize);
-
- $self->log('info', "set migration parameters");
- eval {
- mon_cmd($vmid, "migrate-set-parameters", %{$qemu_migrate_params});
- };
- $self->log('info', "migrate-set-parameters error: $@") if $@;
-
- if (PVE::QemuServer::vga_conf_has_spice($conf->{vga}) && !$self->{opts}->{remote}) {
- my $rpcenv = PVE::RPCEnvironment::get();
- my $authuser = $rpcenv->get_user();
-
- my (undef, $proxyticket) = PVE::AccessControl::assemble_spice_ticket($authuser, $vmid, $self->{node});
-
- my $filename = "/etc/pve/nodes/$self->{node}/pve-ssl.pem";
- my $subject = PVE::AccessControl::read_x509_subject_spice($filename);
-
- $self->log('info', "spice client_migrate_info");
-
- eval {
- mon_cmd($vmid, "client_migrate_info", protocol => 'spice',
- hostname => $proxyticket, 'port' => 0, 'tls-port' => $spice_port,
- 'cert-subject' => $subject);
- };
- $self->log('info', "client_migrate_info error: $@") if $@;
-
- }
-
- my $start = time();
-
- $self->log('info', "start migrate command to $migrate_uri");
- eval {
- mon_cmd($vmid, "migrate", uri => $migrate_uri);
- };
- my $merr = $@;
- $self->log('info', "migrate uri => $migrate_uri failed: $merr") if $merr;
-
- my $last_mem_transferred = 0;
- my $usleep = 1000000;
- my $i = 0;
- my $err_count = 0;
- my $lastrem = undef;
- my $downtimecounter = 0;
- while (1) {
- $i++;
- my $avglstat = $last_mem_transferred ? $last_mem_transferred / $i : 0;
-
- usleep($usleep);
-
- my $stat = eval { mon_cmd($vmid, "query-migrate") };
- if (my $err = $@) {
- $err_count++;
- warn "query migrate failed: $err\n";
- $self->log('info', "query migrate failed: $err");
- if ($err_count <= 5) {
- usleep(1_000_000);
- next;
- }
- die "too many query migrate failures - aborting\n";
- }
-
- my $status = $stat->{status};
- if (defined($status) && $status =~ m/^(setup)$/im) {
- sleep(1);
- next;
- }
-
- if (!defined($status) || $status !~ m/^(active|completed|failed|cancelled)$/im) {
- die $merr if $merr;
- die "unable to parse migration status '$status' - aborting\n";
- }
- $merr = undef;
- $err_count = 0;
-
- my $memstat = $stat->{ram};
-
- if ($status eq 'completed') {
- my $delay = time() - $start;
- if ($delay > 0) {
- my $total = $memstat->{total} || 0;
- my $avg_speed = render_bytes($total / $delay, 1);
- my $downtime = $stat->{downtime} || 0;
- $self->log('info', "average migration speed: $avg_speed/s - downtime $downtime ms");
- }
- }
-
- if ($status eq 'failed' || $status eq 'cancelled') {
- my $message = $stat->{'error-desc'} ? "$status - $stat->{'error-desc'}" : $status;
- $self->log('info', "migration status error: $message");
- die "aborting\n"
- }
-
- if ($status ne 'active') {
- $self->log('info', "migration status: $status");
- last;
- }
-
- if ($memstat->{transferred} ne $last_mem_transferred) {
- my $trans = $memstat->{transferred} || 0;
- my $rem = $memstat->{remaining} || 0;
- my $total = $memstat->{total} || 0;
- my $speed = ($memstat->{'pages-per-second'} // 0) * ($memstat->{'page-size'} // 0);
- my $dirty_rate = ($memstat->{'dirty-pages-rate'} // 0) * ($memstat->{'page-size'} // 0);
-
- # reduce sleep if remainig memory is lower than the average transfer speed
- $usleep = 100_000 if $avglstat && $rem < $avglstat;
-
- # also reduce loggin if we poll more frequent
- my $should_log = $usleep > 100_000 ? 1 : ($i % 10) == 0;
-
- my $total_h = render_bytes($total, 1);
- my $transferred_h = render_bytes($trans, 1);
- my $speed_h = render_bytes($speed, 1);
-
- my $progress = "transferred $transferred_h of $total_h VM-state, ${speed_h}/s";
-
- if ($dirty_rate > $speed) {
- my $dirty_rate_h = render_bytes($dirty_rate, 1);
- $progress .= ", VM dirties lots of memory: $dirty_rate_h/s";
- }
-
- $self->log('info', "migration $status, $progress") if $should_log;
-
- my $xbzrle = $stat->{"xbzrle-cache"} || {};
- my ($xbzrlebytes, $xbzrlepages) = $xbzrle->@{'bytes', 'pages'};
- if ($xbzrlebytes || $xbzrlepages) {
- my $bytes_h = render_bytes($xbzrlebytes, 1);
-
- my $msg = "send updates to $xbzrlepages pages in $bytes_h encoded memory";
-
- $msg .= sprintf(", cache-miss %.2f%%", $xbzrle->{'cache-miss-rate'} * 100)
- if $xbzrle->{'cache-miss-rate'};
-
- $msg .= ", overflow $xbzrle->{overflow}" if $xbzrle->{overflow};
-
- $self->log('info', "xbzrle: $msg") if $should_log;
- }
-
- if (($lastrem && $rem > $lastrem) || ($rem == 0)) {
- $downtimecounter++;
- }
- $lastrem = $rem;
-
- if ($downtimecounter > 5) {
- $downtimecounter = 0;
- $migrate_downtime *= 2;
- $self->log('info', "auto-increased downtime to continue migration: $migrate_downtime ms");
- eval {
- # migrate-set-parameters does not touch values not
- # specified, so this only changes downtime-limit
- mon_cmd($vmid, "migrate-set-parameters", 'downtime-limit' => int($migrate_downtime));
- };
- $self->log('info', "migrate-set-parameters error: $@") if $@;
- }
- }
-
- $last_mem_transferred = $memstat->{transferred};
- }
+ live_migration($self, $vmid, $migrate_uri, $spice_port);
if ($self->{storage_migration}) {
# finish block-job with block-job-cancel, to disconnect source VM from NBD
--
2.39.2
^ permalink raw reply [flat|nested] 7+ messages in thread
* [pve-devel] [PATCH v5 qemu-server 2/3] remote-migration: add restart param
2023-10-26 8:57 [pve-devel] [PATCH v5 qemu-server 0/3] remote-migration: migration with different cpu Alexandre Derumier
2023-10-26 8:57 ` [pve-devel] [PATCH v5 qemu-server 1/3] migration: move livemigration code in a dedicated sub Alexandre Derumier
@ 2023-10-26 8:57 ` Alexandre Derumier
2023-10-26 8:57 ` [pve-devel] [PATCH v5 qemu-server 3/3] add target-cpu param Alexandre Derumier
2023-11-17 8:52 ` [pve-devel] [PATCH v5 qemu-server 0/3] remote-migration: migration with different cpu DERUMIER, Alexandre
3 siblings, 0 replies; 7+ messages in thread
From: Alexandre Derumier @ 2023-10-26 8:57 UTC (permalink / raw)
To: pve-devel
This patch add support for migration without memory transfert.
After the optionnal storage migration, we cleanly shutdown source vm
and restart the target vm. (like a virtual restart between source/dest)
Signed-off-by: Alexandre Derumier <aderumier@odiso.com>
---
PVE/API2/Qemu.pm | 19 +++++++++++++++++++
PVE/CLI/qm.pm | 5 +++++
PVE/QemuMigrate.pm | 31 ++++++++++++++++++++++++++++---
3 files changed, 52 insertions(+), 3 deletions(-)
diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm
index 38bdaab..c0ae516 100644
--- a/PVE/API2/Qemu.pm
+++ b/PVE/API2/Qemu.pm
@@ -4583,6 +4583,11 @@ __PACKAGE__->register_method({
optional => 1,
default => 0,
},
+ 'restart' => {
+ type => 'boolean',
+ description => "For online migration, skip memory migration and restart the vm.",
+ optional => 1,
+ },
'target-storage' => get_standard_option('pve-targetstorage', {
completion => \&PVE::QemuServer::complete_migration_storage,
optional => 0,
@@ -5729,6 +5734,20 @@ __PACKAGE__->register_method({
PVE::QemuServer::nbd_stop($state->{vmid});
return;
},
+ 'restart' => sub {
+ my $nocheck = 1;
+ my $timeout = 1;
+ my $shutdown = undef;
+ my $force = undef;
+ my $keepactive = 1;
+ PVE::QemuServer::vm_stop($storecfg, $state->{vmid}, $nocheck, $timeout, undef, undef, $keepactive);
+ my $info = PVE::QemuServer::vm_start_nolock(
+ $state->{storecfg},
+ $state->{vmid},
+ $state->{conf},
+ );
+ return;
+ },
'resume' => sub {
if (PVE::QemuServer::Helpers::vm_running_locally($state->{vmid})) {
PVE::QemuServer::vm_resume($state->{vmid}, 1, 1);
diff --git a/PVE/CLI/qm.pm b/PVE/CLI/qm.pm
index b17b4fe..12c5291 100755
--- a/PVE/CLI/qm.pm
+++ b/PVE/CLI/qm.pm
@@ -189,6 +189,11 @@ __PACKAGE__->register_method({
optional => 1,
default => 0,
},
+ 'restart' => {
+ type => 'boolean',
+ description => "For online migration , skip memory migration and restart the vm.",
+ optional => 1,
+ },
'target-storage' => get_standard_option('pve-targetstorage', {
completion => \&PVE::QemuServer::complete_migration_storage,
optional => 0,
diff --git a/PVE/QemuMigrate.pm b/PVE/QemuMigrate.pm
index 7dd3455..c801362 100644
--- a/PVE/QemuMigrate.pm
+++ b/PVE/QemuMigrate.pm
@@ -731,6 +731,11 @@ sub cleanup_bitmaps {
sub live_migration {
my ($self, $vmid, $migrate_uri, $spice_port) = @_;
+ if($self->{opts}->{'restart'}){
+ $self->log('info', "using restart migration - skipping live migration.");
+ return;
+ }
+
my $conf = $self->{vmconf};
$self->log('info', "starting online/live migration on $migrate_uri");
@@ -1358,7 +1363,14 @@ sub phase2 {
# finish block-job with block-job-cancel, to disconnect source VM from NBD
# to avoid it trying to re-establish it. We are in blockjob ready state,
# thus, this command changes to it to blockjob complete (see qapi docs)
- eval { PVE::QemuServer::qemu_drive_mirror_monitor($vmid, undef, $self->{storage_migration_jobs}, 'cancel'); };
+ my $finish_cmd = "cancel";
+ if ($self->{opts}->{'restart'}) {
+ # no live migration.
+ # finish block-job with block-job-complete, the source will switch to remote NDB
+ # then we cleanly stop the source vm during phase3
+ $finish_cmd = "complete";
+ }
+ eval { PVE::QemuServer::qemu_drive_mirror_monitor($vmid, undef, $self->{storage_migration_jobs}, $finish_cmd); };
if (my $err = $@) {
die "Failed to complete storage migration: $err\n";
}
@@ -1575,7 +1587,17 @@ sub phase3_cleanup {
};
# always stop local VM with nocheck, since config is moved already
- eval { PVE::QemuServer::vm_stop($self->{storecfg}, $vmid, 1, 1); };
+ my $shutdown_timeout = undef;
+ my $shutdown = undef;
+ my $force_stop = undef;
+ if ($self->{opts}->{'restart'}) {
+ $shutdown_timeout = 180;
+ $shutdown = 1;
+ $force_stop = 1;
+ $self->log('info', "shutting down source vm");
+ }
+
+ eval { PVE::QemuServer::vm_stop($self->{storecfg}, $vmid, 1, 1, $shutdown_timeout, $shutdown, $force_stop); };
if (my $err = $@) {
$self->log('err', "stopping vm failed - $err");
$self->{errors} = 1;
@@ -1609,7 +1631,10 @@ sub phase3_cleanup {
# clear migrate lock
if ($tunnel && $tunnel->{version} >= 2) {
PVE::Tunnel::write_tunnel($tunnel, 10, "unlock");
-
+ if ($self->{opts}->{'restart'}) {
+ $self->log('info', "restart target vm");
+ PVE::Tunnel::write_tunnel($tunnel, 10, 'restart');
+ }
PVE::Tunnel::finish_tunnel($tunnel);
} else {
my $cmd = [ @{$self->{rem_ssh}}, 'qm', 'unlock', $vmid ];
--
2.39.2
^ permalink raw reply [flat|nested] 7+ messages in thread
* [pve-devel] [PATCH v5 qemu-server 3/3] add target-cpu param
2023-10-26 8:57 [pve-devel] [PATCH v5 qemu-server 0/3] remote-migration: migration with different cpu Alexandre Derumier
2023-10-26 8:57 ` [pve-devel] [PATCH v5 qemu-server 1/3] migration: move livemigration code in a dedicated sub Alexandre Derumier
2023-10-26 8:57 ` [pve-devel] [PATCH v5 qemu-server 2/3] remote-migration: add restart param Alexandre Derumier
@ 2023-10-26 8:57 ` Alexandre Derumier
2023-11-17 8:52 ` [pve-devel] [PATCH v5 qemu-server 0/3] remote-migration: migration with different cpu DERUMIER, Alexandre
3 siblings, 0 replies; 7+ messages in thread
From: Alexandre Derumier @ 2023-10-26 8:57 UTC (permalink / raw)
To: pve-devel
Signed-off-by: Alexandre Derumier <aderumier@odiso.com>
---
PVE/API2/Qemu.pm | 7 +++++++
PVE/CLI/qm.pm | 7 +++++++
PVE/QemuMigrate.pm | 1 +
3 files changed, 15 insertions(+)
diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm
index c0ae516..291eb2b 100644
--- a/PVE/API2/Qemu.pm
+++ b/PVE/API2/Qemu.pm
@@ -4597,6 +4597,13 @@ __PACKAGE__->register_method({
description => "Mapping from source to target bridges. Providing only a single bridge ID maps all source bridges to that bridge. Providing the special value '1' will map each source bridge to itself.",
format => 'bridge-pair-list',
},
+ 'target-cpu' => {
+ optional => 1,
+ description => "Target Emulated CPU model. For online migration, this require restart option",
+ type => 'string',
+ requires => 'restart',
+ format => 'pve-vm-cpu-conf',
+ },
bwlimit => {
description => "Override I/O bandwidth limit (in KiB/s).",
optional => 1,
diff --git a/PVE/CLI/qm.pm b/PVE/CLI/qm.pm
index 12c5291..358ace6 100755
--- a/PVE/CLI/qm.pm
+++ b/PVE/CLI/qm.pm
@@ -194,6 +194,13 @@ __PACKAGE__->register_method({
description => "For online migration , skip memory migration and restart the vm.",
optional => 1,
},
+ 'target-cpu' => {
+ optional => 1,
+ description => "Target Emulated CPU model. For online migration, this require restart option",
+ type => 'string',
+ requires => 'restart',
+ format => 'pve-vm-cpu-conf',
+ },
'target-storage' => get_standard_option('pve-targetstorage', {
completion => \&PVE::QemuServer::complete_migration_storage,
optional => 0,
diff --git a/PVE/QemuMigrate.pm b/PVE/QemuMigrate.pm
index c801362..a0e3d04 100644
--- a/PVE/QemuMigrate.pm
+++ b/PVE/QemuMigrate.pm
@@ -998,6 +998,7 @@ sub phase1_remote {
my ($self, $vmid) = @_;
my $remote_conf = PVE::QemuConfig->load_config($vmid);
+ $remote_conf->{cpu} = $self->{opts}->{'target-cpu'} if $self->{opts}->{'target-cpu'};
PVE::QemuConfig->update_volume_ids($remote_conf, $self->{volume_map});
my $bridges = map_bridges($remote_conf, $self->{opts}->{bridgemap});
--
2.39.2
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [pve-devel] [PATCH v5 qemu-server 0/3] remote-migration: migration with different cpu
2023-10-26 8:57 [pve-devel] [PATCH v5 qemu-server 0/3] remote-migration: migration with different cpu Alexandre Derumier
` (2 preceding siblings ...)
2023-10-26 8:57 ` [pve-devel] [PATCH v5 qemu-server 3/3] add target-cpu param Alexandre Derumier
@ 2023-11-17 8:52 ` DERUMIER, Alexandre
2023-11-17 9:04 ` Thomas Lamprecht
3 siblings, 1 reply; 7+ messages in thread
From: DERUMIER, Alexandre @ 2023-11-17 8:52 UTC (permalink / raw)
To: pve-devel
Hi,
Any chance to have it one merged for 8.1 ?
-------- Message initial --------
De: Alexandre Derumier <aderumier@odiso.com>
Répondre à: Proxmox VE development discussion <pve-
devel@lists.proxmox.com>
À: pve-devel@lists.proxmox.com
Objet: [pve-devel] [PATCH v5 qemu-server 0/3] remote-migration:
migration with different cpu
Date: 26/10/2023 10:57:07
This patch series allow remote migration between cluster with different
cpu model.
2 new params are introduced: "target-cpu" && "restart"
If target-cpu is defined, this will replace the cpu model of the target
vm.
If vm is online/running, an extra "target-reboot" safeguard option is
needed.
Indeed, as the target cpu is different, the live migration with memory
transfert
is skipped (as anyway, the target will die with a different cpu).
Then, after the storage copy, we switch source vm disk to the targetvm
nbd export,
then shutdown the source vm and restart the target vm.
(Like a virtual reboot between source/target)
We have redone a lot of migration this summer( maybe another 4000vm),
0 corruption, windows or linux guest vms.
Changelog v2:
The first version was simply shuting down the target vm,
wihout doing the block-job-complete.
After doing production migration with around 400vms, I had
some fs corruption, like some datas was still in buffer.
This v2 has been tested with another 400vms batch, without
any corruption.
Changelog v3:
v2 was not perfect, still have some 1 or 2 fs corruption with vms doing
a lot of write.
This v3 retake idea of the v1 but in a cleaner way
- we migrate disk to target vm
- source vm is switching disk to the nbd of the target vm.
(with a block-job-complete, and not a block-job-cancel with standard
disk migration).
We are 100% sure it that no pending write is still pending in the
migration job.
- source vm is shutdown
- target with is restart
Changelog v4:
- bugfix: no not override cpu with empty config if targetcpu is not
defined
- small cleanups with params
Changelov V5:
- Fix fiona comments
- use "restart" param instead "target-reboot"
- split target-cpu param in separated patch
Alexandre Derumier (3):
migration: move livemigration code in a dedicated sub
remote-migration: add restart param
add target-cpu param
PVE/API2/Qemu.pm | 26 +++
PVE/CLI/qm.pm | 12 ++
PVE/QemuMigrate.pm | 452 ++++++++++++++++++++++++---------------------
3 files changed, 281 insertions(+), 209 deletions(-)
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [pve-devel] [PATCH v5 qemu-server 0/3] remote-migration: migration with different cpu
2023-11-17 8:52 ` [pve-devel] [PATCH v5 qemu-server 0/3] remote-migration: migration with different cpu DERUMIER, Alexandre
@ 2023-11-17 9:04 ` Thomas Lamprecht
2023-11-17 9:11 ` DERUMIER, Alexandre
0 siblings, 1 reply; 7+ messages in thread
From: Thomas Lamprecht @ 2023-11-17 9:04 UTC (permalink / raw)
To: Proxmox VE development discussion, DERUMIER, Alexandre
Am 17/11/2023 um 09:52 schrieb DERUMIER, Alexandre:
> Any chance to have it one merged for 8.1 ?
rather unlikely, this is very core stuff and could add regressions to one
of our most important use case, would like to avoid rushing it now, sorry.
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [pve-devel] [PATCH v5 qemu-server 0/3] remote-migration: migration with different cpu
2023-11-17 9:04 ` Thomas Lamprecht
@ 2023-11-17 9:11 ` DERUMIER, Alexandre
0 siblings, 0 replies; 7+ messages in thread
From: DERUMIER, Alexandre @ 2023-11-17 9:11 UTC (permalink / raw)
To: pve-devel, t.lamprecht
Am 17/11/2023 um 09:52 schrieb DERUMIER, Alexandre:
> Any chance to have it one merged for 8.1 ?
>>rather unlikely, this is very core stuff and could add regressions to
>>one
>>of our most important use case, would like to avoid rushing it now,
>>sorry.
Ok, no problem.
I'll still maintain it in my own branch for now .
^ permalink raw reply [flat|nested] 7+ messages in thread