public inbox for pve-devel@lists.proxmox.com
 help / color / mirror / Atom feed
From: Fabian Ebner <f.ebner@proxmox.com>
To: pve-devel@lists.proxmox.com
Subject: [pve-devel] [PATCH v2 qemu-server 11/13] migration: keep track of replicated volumes via local_volumes
Date: Fri, 29 Jan 2021 16:11:41 +0100	[thread overview]
Message-ID: <20210129151143.10014-12-f.ebner@proxmox.com> (raw)
In-Reply-To: <20210129151143.10014-1-f.ebner@proxmox.com>

by extending filter_local_volumes.

Signed-off-by: Fabian Ebner <f.ebner@proxmox.com>
---

Changes from v1:
    * rebase (new check for is_replicated was introduced in the meantime)
    * move setting of replicated flag to earlier (previously it happend after
      run_replication) so that the next patch works

 PVE/QemuMigrate.pm | 32 +++++++++++++++++---------------
 1 file changed, 17 insertions(+), 15 deletions(-)

diff --git a/PVE/QemuMigrate.pm b/PVE/QemuMigrate.pm
index 09289a5..64f3054 100644
--- a/PVE/QemuMigrate.pm
+++ b/PVE/QemuMigrate.pm
@@ -9,7 +9,7 @@ use POSIX qw( WNOHANG );
 use Time::HiRes qw( usleep );
 
 use PVE::Cluster;
-use PVE::GuestHelpers qw(safe_string_ne);
+use PVE::GuestHelpers qw(safe_boolean_ne safe_string_ne);
 use PVE::INotify;
 use PVE::RPCEnvironment;
 use PVE::Replication;
@@ -434,6 +434,9 @@ sub scan_local_volumes {
 
 	my $replicatable_volumes = !$self->{replication_jobcfg} ? {}
 	    : PVE::QemuConfig->get_replicatable_volumes($storecfg, $vmid, $conf, 0, 1);
+	foreach my $volid (keys %{$replicatable_volumes}) {
+	    $local_volumes->{$volid}->{replicated} = 1;
+	}
 
 	my $test_volid = sub {
 	    my ($volid, $attr) = @_;
@@ -590,7 +593,7 @@ sub scan_local_volumes {
 
 	    my $start_time = time();
 	    my $logfunc = sub { $self->log('info', shift) };
-	    $self->{replicated_volumes} = PVE::Replication::run_replication(
+	    my $replicated_volumes = PVE::Replication::run_replication(
 	       'PVE::QemuConfig', $self->{replication_jobcfg}, $start_time, $start_time, $logfunc);
 	}
 
@@ -601,7 +604,6 @@ sub scan_local_volumes {
 	    } elsif ($self->{running} && $ref eq 'generated') {
 		die "can't live migrate VM with local cloudinit disk. use a shared storage instead\n";
 	    } else {
-		next if $self->{replicated_volumes}->{$volid};
 		$local_volumes->{$volid}->{migration_mode} = 'offline';
 	    }
 	}
@@ -637,13 +639,14 @@ sub config_update_local_disksizes {
 }
 
 sub filter_local_volumes {
-    my ($self, $migration_mode) = @_;
+    my ($self, $migration_mode, $replicated) = @_;
 
     my $volumes = $self->{local_volumes};
     my @filtered_volids;
 
     foreach my $volid (sort keys %{$volumes}) {
 	next if defined($migration_mode) && safe_string_ne($volumes->{$volid}->{migration_mode}, $migration_mode);
+	next if defined($replicated) && safe_boolean_ne($volumes->{$volid}->{replicated}, $replicated);
 	push @filtered_volids, $volid;
     }
 
@@ -654,7 +657,7 @@ sub sync_offline_local_volumes {
     my ($self) = @_;
 
     my $local_volumes = $self->{local_volumes};
-    my @volids = $self->filter_local_volumes('offline');
+    my @volids = $self->filter_local_volumes('offline', 0);
 
     my $storecfg = $self->{storecfg};
     my $opts = $self->{opts};
@@ -695,9 +698,11 @@ sub sync_offline_local_volumes {
 sub cleanup_remotedisks {
     my ($self) = @_;
 
+    my $local_volumes = $self->{local_volumes};
+
     foreach my $volid (values %{$self->{volume_map}}) {
 	# don't clean up replicated disks!
-	next if defined($self->{replicated_volumes}->{$volid});
+	next if $local_volumes->{$volid}->{replicated};
 
 	my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid);
 
@@ -825,10 +830,8 @@ sub phase2 {
     my $input = $spice_ticket ? "$spice_ticket\n" : "\n";
     $input .= "nbd_protocol_version: $nbd_protocol_version\n";
 
-    my $number_of_online_replicated_volumes = 0;
-    foreach my $volid (@online_local_volumes) {
-	next if !$self->{replicated_volumes}->{$volid};
-	$number_of_online_replicated_volumes++;
+    my @online_replicated_volumes = $self->filter_local_volumes('online', 1);
+    foreach my $volid (@online_replicated_volumes) {
 	$input .= "replicated_volume: $volid\n";
     }
 
@@ -906,7 +909,7 @@ sub phase2 {
 
     die "unable to detect remote migration address\n" if !$raddr;
 
-    if (scalar(keys %$target_replicated_volumes) != $number_of_online_replicated_volumes) {
+    if (scalar(keys %$target_replicated_volumes) != scalar(@online_replicated_volumes)) {
 	die "number of replicated disks on source and target node do not match - target node too old?\n"
     }
 
@@ -1308,11 +1311,10 @@ sub phase3_cleanup {
 	$self->{errors} = 1;
     }
 
-    # destroy local copies
-    foreach my $volid (keys %{$self->{local_volumes}}) {
-	# keep replicated volumes!
-	next if $self->{replicated_volumes}->{$volid};
+    my @not_replicated_volumes = $self->filter_local_volumes(undef, 0);
 
+    # destroy local copies
+    foreach my $volid (@not_replicated_volumes) {
 	eval { PVE::Storage::vdisk_free($self->{storecfg}, $volid); };
 	if (my $err = $@) {
 	    $self->log('err', "removing local copy of '$volid' failed - $err");
-- 
2.20.1





  parent reply	other threads:[~2021-01-29 15:12 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-01-29 15:11 [pve-devel] [PATCH-SERIES v2 qemu-server] Cleanup migration code and improve migration disk cleanup Fabian Ebner
2021-01-29 15:11 ` [pve-devel] [PATCH v2 qemu-server 01/13] test: migration: add parse_volume_id calls Fabian Ebner
2021-01-29 15:11 ` [pve-devel] [PATCH v2 qemu-server 02/13] migration: split sync_disks into two functions Fabian Ebner
2021-01-29 15:11 ` [pve-devel] [PATCH v2 qemu-server 03/13] migration: avoid re-scanning all volumes Fabian Ebner
2021-01-29 15:11 ` [pve-devel] [PATCH v2 qemu-server 04/13] migration: split out config_update_local_disksizes from scan_local_volumes Fabian Ebner
2021-01-29 15:11 ` [pve-devel] [PATCH v2 qemu-server 05/13] migration: fix calculation of bandwith limit for non-disk migration Fabian Ebner
2021-01-29 15:11 ` [pve-devel] [PATCH v2 qemu-server 06/13] migration: save targetstorage and bwlimit in local_volumes hash and re-use information Fabian Ebner
2021-01-29 15:11 ` [pve-devel] [PATCH v2 qemu-server 07/13] migration: add nbd migrated volumes to volume_map earlier Fabian Ebner
2021-01-29 15:11 ` [pve-devel] [PATCH v2 qemu-server 08/13] migration: simplify removal of local volumes and get rid of self->{volumes} Fabian Ebner
2021-01-29 15:11 ` [pve-devel] [PATCH v2 qemu-server 09/13] migration: cleanup_remotedisks: simplify and include more disks Fabian Ebner
2021-01-29 15:11 ` [pve-devel] [PATCH v2 qemu-server 10/13] migration: use storage_migration for checks instead of online_local_volumes Fabian Ebner
2021-01-29 15:11 ` Fabian Ebner [this message]
2021-01-29 15:11 ` [pve-devel] [PATCH v2 qemu-server 12/13] migration: split out replication from scan_local_volumes Fabian Ebner
2021-01-29 15:11 ` [pve-devel] [PATCH v2 qemu-server 13/13] migration: move finishing block jobs to phase2 for better/uniform error handling Fabian Ebner
2021-04-19  6:49 ` [pve-devel] [PATCH-SERIES v2 qemu-server] Cleanup migration code and improve migration disk cleanup Fabian Ebner
2021-04-19 11:50 ` [pve-devel] applied-series: " Thomas Lamprecht

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210129151143.10014-12-f.ebner@proxmox.com \
    --to=f.ebner@proxmox.com \
    --cc=pve-devel@lists.proxmox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox
Service provided by Proxmox Server Solutions GmbH | Privacy | Legal