public inbox for pve-devel@lists.proxmox.com
 help / color / mirror / Atom feed
From: Aaron Lauterer <a.lauterer@proxmox.com>
To: pve-devel@lists.proxmox.com
Subject: [pve-devel] [PATCH qemu-server v2 1/6] migration: only migrate disks used by the guest
Date: Fri, 12 May 2023 14:40:38 +0200	[thread overview]
Message-ID: <20230512124043.888785-2-a.lauterer@proxmox.com> (raw)
In-Reply-To: <20230512124043.888785-1-a.lauterer@proxmox.com>

When scanning all configured storages for disk images belonging to the
VM, the migration could easily fail if a storage is not available, but
enabled. That storage might not even be used by the VM at all.

By not doing that and only looking at the disk images referenced in the
VM config, we can avoid that.
Extra handling is needed for disk images currently in the 'pending'
section of the VM config. These disk images used to be detected by
scanning all storages before.
It is also necessary to fetch some information (size, format) about the
disk images explicitly that used to be provided by the initial scan of
all storages.

The big change regarding behavior is that disk images not referenced in
the VM config file will be ignored.  They are already orphans that used
to be migrated as well, but are now left where they are.  The tests have
been adapted to that changed behavior.

Signed-off-by: Aaron Lauterer <a.lauterer@proxmox.com>
---
 PVE/QemuMigrate.pm                    | 71 +++++++++++----------------
 test/MigrationTest/QemuMigrateMock.pm | 10 ++++
 test/run_qemu_migrate_tests.pl        | 12 ++---
 3 files changed, 44 insertions(+), 49 deletions(-)

diff --git a/PVE/QemuMigrate.pm b/PVE/QemuMigrate.pm
index 09cc1d8..1d21250 100644
--- a/PVE/QemuMigrate.pm
+++ b/PVE/QemuMigrate.pm
@@ -312,49 +312,6 @@ sub scan_local_volumes {
 	    $abort = 1;
 	};
 
-	my @sids = PVE::Storage::storage_ids($storecfg);
-	foreach my $storeid (@sids) {
-	    my $scfg = PVE::Storage::storage_config($storecfg, $storeid);
-	    next if $scfg->{shared} && !$self->{opts}->{remote};
-	    next if !PVE::Storage::storage_check_enabled($storecfg, $storeid, undef, 1);
-
-	    # get list from PVE::Storage (for unused volumes)
-	    my $dl = PVE::Storage::vdisk_list($storecfg, $storeid, $vmid, undef, 'images');
-
-	    next if @{$dl->{$storeid}} == 0;
-
-	    my $targetsid = PVE::JSONSchema::map_id($self->{opts}->{storagemap}, $storeid);
-	    if (!$self->{opts}->{remote}) {
-		# check if storage is available on target node
-		my $target_scfg = PVE::Storage::storage_check_enabled(
-		    $storecfg,
-		    $targetsid,
-		    $self->{node},
-		);
-
-		die "content type 'images' is not available on storage '$targetsid'\n"
-		    if !$target_scfg->{content}->{images};
-
-	    }
-
-	    my $bwlimit = $self->get_bwlimit($storeid, $targetsid);
-
-	    PVE::Storage::foreach_volid($dl, sub {
-		my ($volid, $sid, $volinfo) = @_;
-
-		$local_volumes->{$volid}->{ref} = 'storage';
-		$local_volumes->{$volid}->{size} = $volinfo->{size};
-		$local_volumes->{$volid}->{targetsid} = $targetsid;
-		$local_volumes->{$volid}->{bwlimit} = $bwlimit;
-
-		# If with_snapshots is not set for storage migrate, it tries to use
-		# a raw+size stream, but on-the-fly conversion from qcow2 to raw+size
-		# back to qcow2 is currently not possible.
-		$local_volumes->{$volid}->{snapshots} = ($volinfo->{format} =~ /^(?:qcow2|vmdk)$/);
-		$local_volumes->{$volid}->{format} = $volinfo->{format};
-	    });
-	}
-
 	my $replicatable_volumes = !$self->{replication_jobcfg} ? {}
 	    : PVE::QemuConfig->get_replicatable_volumes($storecfg, $vmid, $conf, 0, 1);
 	foreach my $volid (keys %{$replicatable_volumes}) {
@@ -405,8 +362,23 @@ sub scan_local_volumes {
 
 	    $local_volumes->{$volid}->{ref} = $attr->{referenced_in_config} ? 'config' : 'snapshot';
 	    $local_volumes->{$volid}->{ref} = 'storage' if $attr->{is_unused};
+	    $local_volumes->{$volid}->{ref} = 'storage' if $attr->{is_pending};
 	    $local_volumes->{$volid}->{ref} = 'generated' if $attr->{is_tpmstate};
 
+	    my $bwlimit = $self->get_bwlimit($sid, $targetsid);
+	    $local_volumes->{$volid}->{targetsid} = $targetsid;
+	    $local_volumes->{$volid}->{bwlimit} = $bwlimit;
+
+	    my $volume_list = PVE::Storage::volume_list($storecfg, $sid, $vmid, 'images');
+	    # TODO could probably be done better than just iterating
+	    for my $volume (@$volume_list) {
+		if ($volume->{volid} eq $volid) {
+		    $local_volumes->{$volid}->{size} = $volume->{size};
+		    $local_volumes->{$volid}->{format} = $volume->{format};
+		    last;
+		}
+	    }
+
 	    $local_volumes->{$volid}->{is_vmstate} = $attr->{is_vmstate} ? 1 : 0;
 
 	    $local_volumes->{$volid}->{drivename} = $attr->{drivename}
@@ -450,6 +422,19 @@ sub scan_local_volumes {
 		if PVE::Storage::volume_is_base_and_used($storecfg, $volid);
 	};
 
+	# add pending disks first
+	if (defined $conf->{pending} && %{$conf->{pending}}) {
+	    PVE::QemuServer::foreach_volid($conf->{pending}, sub {
+		    my ($volid, $attr) = @_;
+		    $attr->{is_pending} = 1;
+		    eval { $test_volid->($volid, $attr); };
+		    if (my $err = $@) {
+			&$log_error($err, $volid);
+		    }
+		});
+	}
+
+	# add non-pending referenced disks
 	PVE::QemuServer::foreach_volid($conf, sub {
 	    my ($volid, $attr) = @_;
 	    eval { $test_volid->($volid, $attr); };
diff --git a/test/MigrationTest/QemuMigrateMock.pm b/test/MigrationTest/QemuMigrateMock.pm
index 94fe686..46af62a 100644
--- a/test/MigrationTest/QemuMigrateMock.pm
+++ b/test/MigrationTest/QemuMigrateMock.pm
@@ -230,6 +230,16 @@ $MigrationTest::Shared::storage_module->mock(
 	}
 	return $res;
     },
+    volume_list => sub {
+	my ($cfg, $sid, $vmid) = @_;
+
+	my $res = [];
+
+	for my $volume (@{$source_vdisks->{$sid}}) {
+	    push @$res, $volume if $volume->{vmid} eq $vmid;
+	}
+	return $res;
+    },
     vdisk_free => sub {
 	my ($scfg, $volid) = @_;
 
diff --git a/test/run_qemu_migrate_tests.pl b/test/run_qemu_migrate_tests.pl
index 3a3049d..fbe87e6 100755
--- a/test/run_qemu_migrate_tests.pl
+++ b/test/run_qemu_migrate_tests.pl
@@ -707,7 +707,6 @@ my $tests = [
 	},
     },
     {
-	# FIXME: Maybe add orphaned drives as unused?
 	name => '149_running_orphaned_disk_targetstorage_zfs',
 	target => 'pve1',
 	vmid => 149,
@@ -728,10 +727,11 @@ my $tests = [
 	},
 	expected_calls => $default_expected_calls_online,
 	expected => {
-	    source_volids => {},
+	    source_volids => {
+		'local-dir:149/vm-149-disk-0.qcow2' => 1,
+	    },
 	    target_volids => {
 		'local-zfs:vm-149-disk-10' => 1,
-		'local-zfs:vm-149-disk-0' => 1,
 	    },
 	    vm_config => get_patched_config(149, {
 		scsi0 => 'local-zfs:vm-149-disk-10,format=raw,size=4G',
@@ -744,7 +744,6 @@ my $tests = [
 	},
     },
     {
-	# FIXME: Maybe add orphaned drives as unused?
 	name => '149_running_orphaned_disk',
 	target => 'pve1',
 	vmid => 149,
@@ -764,10 +763,11 @@ my $tests = [
 	},
 	expected_calls => $default_expected_calls_online,
 	expected => {
-	    source_volids => {},
+	    source_volids => {
+		'local-dir:149/vm-149-disk-0.qcow2' => 1,
+	    },
 	    target_volids => {
 		'local-lvm:vm-149-disk-10' => 1,
-		'local-dir:149/vm-149-disk-0.qcow2' => 1,
 	    },
 	    vm_config => get_patched_config(149, {
 		scsi0 => 'local-lvm:vm-149-disk-10,format=raw,size=4G',
-- 
2.30.2





  reply	other threads:[~2023-05-12 12:41 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-05-12 12:40 [pve-devel] [PATCH qemu-server, container v2 0/6] migration: don't scan all storages, fail on aliases Aaron Lauterer
2023-05-12 12:40 ` Aaron Lauterer [this message]
2023-05-22 11:59   ` [pve-devel] [PATCH qemu-server v2 1/6] migration: only migrate disks used by the guest Fiona Ebner
2023-05-24 15:00     ` Aaron Lauterer
2023-05-12 12:40 ` [pve-devel] [PATCH qemu-server v2 2/6] tests: add migration test for pending disk Aaron Lauterer
2023-05-22 14:02   ` Fiona Ebner
2023-05-12 12:40 ` [pve-devel] [PATCH qemu-server v2 3/6] migration: fail when aliased volume is detected Aaron Lauterer
2023-05-22 14:17   ` Fiona Ebner
2023-05-24 14:40     ` Aaron Lauterer
2023-05-25  8:14       ` Fiona Ebner
2023-05-25  8:15   ` Fiona Ebner
2023-05-12 12:40 ` [pve-devel] [PATCH qemu-server v2 4/6] tests: add migration alias check Aaron Lauterer
2023-05-22 14:25   ` Fiona Ebner
2023-05-24 14:41     ` Aaron Lauterer
2023-05-12 12:40 ` [pve-devel] [PATCH container v2 5/6] migration: only migrate volumes used by the guest Aaron Lauterer
2023-05-22 15:00   ` Fiona Ebner
2023-05-12 12:40 ` [pve-devel] [PATCH container v2 6/6] migration: fail when aliased volume is detected Aaron Lauterer

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230512124043.888785-2-a.lauterer@proxmox.com \
    --to=a.lauterer@proxmox.com \
    --cc=pve-devel@lists.proxmox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox
Service provided by Proxmox Server Solutions GmbH | Privacy | Legal