From: Fiona Ebner <f.ebner@proxmox.com>
To: pve-devel@lists.proxmox.com
Subject: [pve-devel] [PATCH qemu-server 15/31] introduce QemuMigrate::Helpers module
Date: Wed, 25 Jun 2025 17:56:38 +0200 [thread overview]
Message-ID: <20250625155751.268047-16-f.ebner@proxmox.com> (raw)
In-Reply-To: <20250625155751.268047-1-f.ebner@proxmox.com>
The QemuMigrate module is high-level and should not be called from
many other places, while also being an implementation of
AbstractMigrate, so using a separate module is much more natural.
Signed-off-by: Fiona Ebner <f.ebner@proxmox.com>
---
src/PVE/API2/Qemu.pm | 6 +-
src/PVE/Makefile | 1 +
src/PVE/QemuConfig.pm | 5 +-
src/PVE/QemuMigrate.pm | 5 +-
src/PVE/QemuMigrate/Helpers.pm | 146 ++++++++++++++++++++++
src/PVE/QemuMigrate/Makefile | 9 ++
src/PVE/QemuServer.pm | 136 +-------------------
src/test/MigrationTest/QemuMigrateMock.pm | 3 -
src/test/MigrationTest/QmMock.pm | 3 -
src/test/MigrationTest/Shared.pm | 7 ++
src/test/snapshot-test.pm | 11 +-
11 files changed, 186 insertions(+), 146 deletions(-)
create mode 100644 src/PVE/QemuMigrate/Helpers.pm
create mode 100644 src/PVE/QemuMigrate/Makefile
diff --git a/src/PVE/API2/Qemu.pm b/src/PVE/API2/Qemu.pm
index 7f55998e..27426eab 100644
--- a/src/PVE/API2/Qemu.pm
+++ b/src/PVE/API2/Qemu.pm
@@ -45,6 +45,7 @@ use PVE::QemuServer::RNG;
use PVE::QemuServer::USB;
use PVE::QemuServer::Virtiofs qw(max_virtiofs);
use PVE::QemuMigrate;
+use PVE::QemuMigrate::Helpers;
use PVE::RPCEnvironment;
use PVE::AccessControl;
use PVE::INotify;
@@ -5167,7 +5168,7 @@ __PACKAGE__->register_method({
$res->{running} = PVE::QemuServer::check_running($vmid) ? 1 : 0;
my ($local_resources, $mapped_resources, $missing_mappings_by_node) =
- PVE::QemuServer::check_local_resources($vmconf, $res->{running}, 1);
+ PVE::QemuMigrate::Helpers::check_local_resources($vmconf, $res->{running}, 1);
my $vga = PVE::QemuServer::parse_vga($vmconf->{vga});
if ($res->{running} && $vga->{'clipboard'} && $vga->{'clipboard'} eq 'vnc') {
@@ -5903,7 +5904,8 @@ __PACKAGE__->register_method({
if lc($snapname) eq 'pending';
my $vmconf = PVE::QemuConfig->load_config($vmid);
- PVE::QemuServer::check_non_migratable_resources($vmconf, $param->{vmstate}, 0);
+ PVE::QemuMigrate::Helpers::check_non_migratable_resources($vmconf, $param->{vmstate},
+ 0);
my $realcmd = sub {
PVE::Cluster::log_msg('info', $authuser, "snapshot VM $vmid: $snapname");
diff --git a/src/PVE/Makefile b/src/PVE/Makefile
index 01cf9df6..e0537b4a 100644
--- a/src/PVE/Makefile
+++ b/src/PVE/Makefile
@@ -16,4 +16,5 @@ install:
$(MAKE) -C API2 install
$(MAKE) -C CLI install
$(MAKE) -C QemuConfig install
+ $(MAKE) -C QemuMigrate install
$(MAKE) -C QemuServer install
diff --git a/src/PVE/QemuConfig.pm b/src/PVE/QemuConfig.pm
index 957c875a..01104723 100644
--- a/src/PVE/QemuConfig.pm
+++ b/src/PVE/QemuConfig.pm
@@ -8,6 +8,7 @@ use Scalar::Util qw(blessed);
use PVE::AbstractConfig;
use PVE::INotify;
use PVE::JSONSchema;
+use PVE::QemuMigrate::Helpers;
use PVE::QemuServer::Agent;
use PVE::QemuServer::CPUConfig;
use PVE::QemuServer::Drive;
@@ -211,7 +212,7 @@ sub get_backup_volumes {
sub __snapshot_assert_no_blockers {
my ($class, $vmconf, $save_vmstate) = @_;
- PVE::QemuServer::check_non_migratable_resources($vmconf, $save_vmstate, 0);
+ PVE::QemuMigrate::Helpers::check_non_migratable_resources($vmconf, $save_vmstate, 0);
}
sub __snapshot_save_vmstate {
@@ -325,7 +326,7 @@ sub __snapshot_create_vol_snapshots_hook {
PVE::Storage::activate_volumes($storecfg, [$snap->{vmstate}]);
my $state_storage_id = PVE::Storage::parse_volume_id($snap->{vmstate});
- PVE::QemuServer::set_migration_caps($vmid, 1);
+ PVE::QemuMigrate::Helpers::set_migration_caps($vmid, 1);
mon_cmd($vmid, "savevm-start", statefile => $path);
print "saving VM state and RAM using storage '$state_storage_id'\n";
my $render_state = sub {
diff --git a/src/PVE/QemuMigrate.pm b/src/PVE/QemuMigrate.pm
index 934d4350..4fd46a76 100644
--- a/src/PVE/QemuMigrate.pm
+++ b/src/PVE/QemuMigrate.pm
@@ -25,6 +25,7 @@ use PVE::Tools;
use PVE::Tunnel;
use PVE::QemuConfig;
+use PVE::QemuMigrate::Helpers;
use PVE::QemuServer::CPUConfig;
use PVE::QemuServer::Drive qw(checked_volume_format);
use PVE::QemuServer::Helpers qw(min_version);
@@ -239,7 +240,7 @@ sub prepare {
}
my ($loc_res, $mapped_res, $missing_mappings_by_node) =
- PVE::QemuServer::check_local_resources($conf, $running, 1);
+ PVE::QemuMigrate::Helpers::check_local_resources($conf, $running, 1);
my $blocking_resources = [];
for my $res ($loc_res->@*) {
if (!defined($mapped_res->{$res})) {
@@ -1235,7 +1236,7 @@ sub phase2 {
my $defaults = PVE::QemuServer::load_defaults();
$self->log('info', "set migration capabilities");
- eval { PVE::QemuServer::set_migration_caps($vmid) };
+ eval { PVE::QemuMigrate::Helpers::set_migration_caps($vmid) };
warn $@ if $@;
my $qemu_migrate_params = {};
diff --git a/src/PVE/QemuMigrate/Helpers.pm b/src/PVE/QemuMigrate/Helpers.pm
new file mode 100644
index 00000000..f191565a
--- /dev/null
+++ b/src/PVE/QemuMigrate/Helpers.pm
@@ -0,0 +1,146 @@
+package PVE::QemuMigrate::Helpers;
+
+use strict;
+use warnings;
+
+use JSON;
+
+use PVE::Cluster;
+use PVE::JSONSchema qw(parse_property_string);
+use PVE::Mapping::Dir;
+use PVE::Mapping::PCI;
+use PVE::Mapping::USB;
+
+use PVE::QemuServer::Monitor qw(mon_cmd);
+use PVE::QemuServer::Virtiofs;
+
+sub check_non_migratable_resources {
+ my ($conf, $state, $noerr) = @_;
+
+ my @blockers = ();
+ if ($state) {
+ push @blockers, "amd-sev" if $conf->{"amd-sev"};
+ push @blockers, "virtiofs" if PVE::QemuServer::Virtiofs::virtiofs_enabled($conf);
+ }
+
+ if (scalar(@blockers) && !$noerr) {
+ die "Cannot live-migrate, snapshot (with RAM), or hibernate a VM with: "
+ . join(', ', @blockers) . "\n";
+ }
+
+ return @blockers;
+}
+
+# test if VM uses local resources (to prevent migration)
+sub check_local_resources {
+ my ($conf, $state, $noerr) = @_;
+
+ my @loc_res = ();
+ my $mapped_res = {};
+
+ my @non_migratable_resources = check_non_migratable_resources($conf, $state, $noerr);
+ push(@loc_res, @non_migratable_resources);
+
+ my $nodelist = PVE::Cluster::get_nodelist();
+ my $pci_map = PVE::Mapping::PCI::config();
+ my $usb_map = PVE::Mapping::USB::config();
+ my $dir_map = PVE::Mapping::Dir::config();
+
+ my $missing_mappings_by_node = { map { $_ => [] } @$nodelist };
+
+ my $add_missing_mapping = sub {
+ my ($type, $key, $id) = @_;
+ for my $node (@$nodelist) {
+ my $entry;
+ if ($type eq 'pci') {
+ $entry = PVE::Mapping::PCI::get_node_mapping($pci_map, $id, $node);
+ } elsif ($type eq 'usb') {
+ $entry = PVE::Mapping::USB::get_node_mapping($usb_map, $id, $node);
+ } elsif ($type eq 'dir') {
+ $entry = PVE::Mapping::Dir::get_node_mapping($dir_map, $id, $node);
+ }
+ if (!scalar($entry->@*)) {
+ push @{ $missing_mappings_by_node->{$node} }, $key;
+ }
+ }
+ };
+
+ push @loc_res, "hostusb" if $conf->{hostusb}; # old syntax
+ push @loc_res, "hostpci" if $conf->{hostpci}; # old syntax
+
+ push @loc_res, "ivshmem" if $conf->{ivshmem};
+
+ foreach my $k (keys %$conf) {
+ if ($k =~ m/^usb/) {
+ my $entry = parse_property_string('pve-qm-usb', $conf->{$k});
+ next if $entry->{host} && $entry->{host} =~ m/^spice$/i;
+ if (my $name = $entry->{mapping}) {
+ $add_missing_mapping->('usb', $k, $name);
+ $mapped_res->{$k} = { name => $name };
+ }
+ }
+ if ($k =~ m/^hostpci/) {
+ my $entry = parse_property_string('pve-qm-hostpci', $conf->{$k});
+ if (my $name = $entry->{mapping}) {
+ $add_missing_mapping->('pci', $k, $name);
+ my $mapped_device = { name => $name };
+ $mapped_res->{$k} = $mapped_device;
+
+ if ($pci_map->{ids}->{$name}->{'live-migration-capable'}) {
+ $mapped_device->{'live-migration'} = 1;
+ # don't add mapped device with live migration as blocker
+ next;
+ }
+
+ # don't add mapped devices as blocker for offline migration but still iterate over
+ # all mappings above to collect on which nodes they are available.
+ next if !$state;
+ }
+ }
+ if ($k =~ m/^virtiofs/) {
+ my $entry = parse_property_string('pve-qm-virtiofs', $conf->{$k});
+ $add_missing_mapping->('dir', $k, $entry->{dirid});
+ $mapped_res->{$k} = { name => $entry->{dirid} };
+ }
+ # sockets are safe: they will recreated be on the target side post-migrate
+ next if $k =~ m/^serial/ && ($conf->{$k} eq 'socket');
+ push @loc_res, $k if $k =~ m/^(usb|hostpci|serial|parallel|virtiofs)\d+$/;
+ }
+
+ die "VM uses local resources\n" if scalar @loc_res && !$noerr;
+
+ return wantarray ? (\@loc_res, $mapped_res, $missing_mappings_by_node) : \@loc_res;
+}
+
+sub set_migration_caps {
+ my ($vmid, $savevm) = @_;
+
+ my $qemu_support = eval { mon_cmd($vmid, "query-proxmox-support") };
+
+ my $bitmap_prop = $savevm ? 'pbs-dirty-bitmap-savevm' : 'pbs-dirty-bitmap-migration';
+ my $dirty_bitmaps = $qemu_support->{$bitmap_prop} ? 1 : 0;
+
+ my $cap_ref = [];
+
+ my $enabled_cap = {
+ "auto-converge" => 1,
+ "xbzrle" => 1,
+ "dirty-bitmaps" => $dirty_bitmaps,
+ };
+
+ my $supported_capabilities = mon_cmd($vmid, "query-migrate-capabilities");
+
+ for my $supported_capability (@$supported_capabilities) {
+ push @$cap_ref,
+ {
+ capability => $supported_capability->{capability},
+ state => $enabled_cap->{ $supported_capability->{capability} }
+ ? JSON::true
+ : JSON::false,
+ };
+ }
+
+ mon_cmd($vmid, "migrate-set-capabilities", capabilities => $cap_ref);
+}
+
+1;
diff --git a/src/PVE/QemuMigrate/Makefile b/src/PVE/QemuMigrate/Makefile
new file mode 100644
index 00000000..c6e50d3f
--- /dev/null
+++ b/src/PVE/QemuMigrate/Makefile
@@ -0,0 +1,9 @@
+DESTDIR=
+PREFIX=/usr
+PERLDIR=$(PREFIX)/share/perl5
+
+SOURCES=Helpers.pm
+
+.PHONY: install
+install: $(SOURCES)
+ for i in $(SOURCES); do install -D -m 0644 $$i $(DESTDIR)$(PERLDIR)/PVE/QemuMigrate/$$i; done
diff --git a/src/PVE/QemuServer.pm b/src/PVE/QemuServer.pm
index d24dc7eb..3c612b44 100644
--- a/src/PVE/QemuServer.pm
+++ b/src/PVE/QemuServer.pm
@@ -53,6 +53,7 @@ use PVE::Tools
use PVE::QMPClient;
use PVE::QemuConfig;
use PVE::QemuConfig::NoWrite;
+use PVE::QemuMigrate::Helpers;
use PVE::QemuServer::Agent qw(qga_check_running);
use PVE::QemuServer::Helpers
qw(config_aware_timeout get_iscsi_initiator_name min_version kvm_user_version windows_version);
@@ -2254,104 +2255,6 @@ sub config_list {
return $res;
}
-sub check_non_migratable_resources {
- my ($conf, $state, $noerr) = @_;
-
- my @blockers = ();
- if ($state) {
- push @blockers, "amd-sev" if $conf->{"amd-sev"};
- push @blockers, "virtiofs" if PVE::QemuServer::Virtiofs::virtiofs_enabled($conf);
- }
-
- if (scalar(@blockers) && !$noerr) {
- die "Cannot live-migrate, snapshot (with RAM), or hibernate a VM with: "
- . join(', ', @blockers) . "\n";
- }
-
- return @blockers;
-}
-
-# test if VM uses local resources (to prevent migration)
-sub check_local_resources {
- my ($conf, $state, $noerr) = @_;
-
- my @loc_res = ();
- my $mapped_res = {};
-
- my @non_migratable_resources = check_non_migratable_resources($conf, $state, $noerr);
- push(@loc_res, @non_migratable_resources);
-
- my $nodelist = PVE::Cluster::get_nodelist();
- my $pci_map = PVE::Mapping::PCI::config();
- my $usb_map = PVE::Mapping::USB::config();
- my $dir_map = PVE::Mapping::Dir::config();
-
- my $missing_mappings_by_node = { map { $_ => [] } @$nodelist };
-
- my $add_missing_mapping = sub {
- my ($type, $key, $id) = @_;
- for my $node (@$nodelist) {
- my $entry;
- if ($type eq 'pci') {
- $entry = PVE::Mapping::PCI::get_node_mapping($pci_map, $id, $node);
- } elsif ($type eq 'usb') {
- $entry = PVE::Mapping::USB::get_node_mapping($usb_map, $id, $node);
- } elsif ($type eq 'dir') {
- $entry = PVE::Mapping::Dir::get_node_mapping($dir_map, $id, $node);
- }
- if (!scalar($entry->@*)) {
- push @{ $missing_mappings_by_node->{$node} }, $key;
- }
- }
- };
-
- push @loc_res, "hostusb" if $conf->{hostusb}; # old syntax
- push @loc_res, "hostpci" if $conf->{hostpci}; # old syntax
-
- push @loc_res, "ivshmem" if $conf->{ivshmem};
-
- foreach my $k (keys %$conf) {
- if ($k =~ m/^usb/) {
- my $entry = parse_property_string('pve-qm-usb', $conf->{$k});
- next if $entry->{host} && $entry->{host} =~ m/^spice$/i;
- if (my $name = $entry->{mapping}) {
- $add_missing_mapping->('usb', $k, $name);
- $mapped_res->{$k} = { name => $name };
- }
- }
- if ($k =~ m/^hostpci/) {
- my $entry = parse_property_string('pve-qm-hostpci', $conf->{$k});
- if (my $name = $entry->{mapping}) {
- $add_missing_mapping->('pci', $k, $name);
- my $mapped_device = { name => $name };
- $mapped_res->{$k} = $mapped_device;
-
- if ($pci_map->{ids}->{$name}->{'live-migration-capable'}) {
- $mapped_device->{'live-migration'} = 1;
- # don't add mapped device with live migration as blocker
- next;
- }
-
- # don't add mapped devices as blocker for offline migration but still iterate over
- # all mappings above to collect on which nodes they are available.
- next if !$state;
- }
- }
- if ($k =~ m/^virtiofs/) {
- my $entry = parse_property_string('pve-qm-virtiofs', $conf->{$k});
- $add_missing_mapping->('dir', $k, $entry->{dirid});
- $mapped_res->{$k} = { name => $entry->{dirid} };
- }
- # sockets are safe: they will recreated be on the target side post-migrate
- next if $k =~ m/^serial/ && ($conf->{$k} eq 'socket');
- push @loc_res, $k if $k =~ m/^(usb|hostpci|serial|parallel|virtiofs)\d+$/;
- }
-
- die "VM uses local resources\n" if scalar @loc_res && !$noerr;
-
- return wantarray ? (\@loc_res, $mapped_res, $missing_mappings_by_node) : \@loc_res;
-}
-
# check if used storages are available on all nodes (use by migrate)
sub check_storage_availability {
my ($storecfg, $conf, $node) = @_;
@@ -4508,37 +4411,6 @@ sub qemu_volume_snapshot_delete {
}
}
-sub set_migration_caps {
- my ($vmid, $savevm) = @_;
-
- my $qemu_support = eval { mon_cmd($vmid, "query-proxmox-support") };
-
- my $bitmap_prop = $savevm ? 'pbs-dirty-bitmap-savevm' : 'pbs-dirty-bitmap-migration';
- my $dirty_bitmaps = $qemu_support->{$bitmap_prop} ? 1 : 0;
-
- my $cap_ref = [];
-
- my $enabled_cap = {
- "auto-converge" => 1,
- "xbzrle" => 1,
- "dirty-bitmaps" => $dirty_bitmaps,
- };
-
- my $supported_capabilities = mon_cmd($vmid, "query-migrate-capabilities");
-
- for my $supported_capability (@$supported_capabilities) {
- push @$cap_ref,
- {
- capability => $supported_capability->{capability},
- state => $enabled_cap->{ $supported_capability->{capability} }
- ? JSON::true
- : JSON::false,
- };
- }
-
- mon_cmd($vmid, "migrate-set-capabilities", capabilities => $cap_ref);
-}
-
sub foreach_volid {
my ($conf, $func, @param) = @_;
@@ -5893,7 +5765,7 @@ sub vm_start_nolock {
}
if ($migratedfrom) {
- eval { set_migration_caps($vmid); };
+ eval { PVE::QemuMigrate::Helpers::set_migration_caps($vmid); };
warn $@ if $@;
if ($spice_port) {
@@ -6315,7 +6187,7 @@ sub vm_suspend {
die "cannot suspend to disk during backup\n"
if $is_backing_up && $includestate;
- check_non_migratable_resources($conf, $includestate, 0);
+ PVE::QemuMigrate::Helpers::check_non_migratable_resources($conf, $includestate, 0);
if ($includestate) {
$conf->{lock} = 'suspending';
@@ -6351,7 +6223,7 @@ sub vm_suspend {
PVE::Storage::activate_volumes($storecfg, [$vmstate]);
eval {
- set_migration_caps($vmid, 1);
+ PVE::QemuMigrate::Helpers::set_migration_caps($vmid, 1);
mon_cmd($vmid, "savevm-start", statefile => $path);
for (;;) {
my $state = mon_cmd($vmid, "query-savevm");
diff --git a/src/test/MigrationTest/QemuMigrateMock.pm b/src/test/MigrationTest/QemuMigrateMock.pm
index 1b95a2ff..56a1d777 100644
--- a/src/test/MigrationTest/QemuMigrateMock.pm
+++ b/src/test/MigrationTest/QemuMigrateMock.pm
@@ -167,9 +167,6 @@ $MigrationTest::Shared::qemu_server_module->mock(
qemu_drive_mirror_switch_to_active_mode => sub {
return;
},
- set_migration_caps => sub {
- return;
- },
vm_stop => sub {
$vm_stop_executed = 1;
delete $expected_calls->{'vm_stop'};
diff --git a/src/test/MigrationTest/QmMock.pm b/src/test/MigrationTest/QmMock.pm
index 69b9c2c9..3eaa131f 100644
--- a/src/test/MigrationTest/QmMock.pm
+++ b/src/test/MigrationTest/QmMock.pm
@@ -142,9 +142,6 @@ $MigrationTest::Shared::qemu_server_module->mock(
}
die "run_command (mocked) - implement me: ${cmd_msg}";
},
- set_migration_caps => sub {
- return;
- },
vm_migrate_alloc_nbd_disks => sub {
my $nbd =
$MigrationTest::Shared::qemu_server_module->original('vm_migrate_alloc_nbd_disks')
diff --git a/src/test/MigrationTest/Shared.pm b/src/test/MigrationTest/Shared.pm
index e29cd1df..a51e1692 100644
--- a/src/test/MigrationTest/Shared.pm
+++ b/src/test/MigrationTest/Shared.pm
@@ -135,6 +135,13 @@ $qemu_config_module->mock(
},
);
+our $qemu_migrate_helpers_module = Test::MockModule->new("PVE::QemuMigrate::Helpers");
+$qemu_migrate_helpers_module->mock(
+ set_migration_caps => sub {
+ return;
+ },
+);
+
our $qemu_server_cloudinit_module = Test::MockModule->new("PVE::QemuServer::Cloudinit");
$qemu_server_cloudinit_module->mock(
generate_cloudinitconfig => sub {
diff --git a/src/test/snapshot-test.pm b/src/test/snapshot-test.pm
index 1a8623c0..4fce87f1 100644
--- a/src/test/snapshot-test.pm
+++ b/src/test/snapshot-test.pm
@@ -390,6 +390,12 @@ sub qmp_cmd {
}
# END mocked PVE::QemuServer::Monitor methods
+#
+# BEGIN mocked PVE::QemuMigrate::Helpers methods
+
+sub set_migration_caps { } # ignored
+
+# END mocked PVE::QemuMigrate::Helpers methods
# BEGIN redefine PVE::QemuServer methods
@@ -429,13 +435,14 @@ sub vm_stop {
return;
}
-sub set_migration_caps { } # ignored
-
# END redefine PVE::QemuServer methods
PVE::Tools::run_command("rm -rf snapshot-working");
PVE::Tools::run_command("cp -a snapshot-input snapshot-working");
+my $qemu_migrate_helpers_module = Test::MockModule->new('PVE::QemuMigrate::Helpers');
+$qemu_migrate_helpers_module->mock('set_migration_caps', \&set_migration_caps);
+
my $qemu_helpers_module = Test::MockModule->new('PVE::QemuServer::Helpers');
$qemu_helpers_module->mock('vm_running_locally', \&vm_running_locally);
--
2.47.2
_______________________________________________
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel
next prev parent reply other threads:[~2025-06-25 15:59 UTC|newest]
Thread overview: 33+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-06-25 15:56 [pve-devel] [PATCH-SERIES qemu-server 00/31] preparation for blockdev, part three Fiona Ebner
2025-06-25 15:56 ` [pve-devel] [PATCH qemu-server 01/31] print ovmf commandline: collect hardware parameters into hash argument Fiona Ebner
2025-06-25 15:56 ` [pve-devel] [PATCH qemu-server 02/31] introduce OVMF module Fiona Ebner
2025-06-25 15:56 ` [pve-devel] [PATCH qemu-server 03/31] ovmf: add support for using blockdev Fiona Ebner
2025-06-25 15:56 ` [pve-devel] [PATCH qemu-server 04/31] cfg2cmd: ovmf: support print_ovmf_commandline() returning machine flags Fiona Ebner
2025-06-25 15:56 ` [pve-devel] [PATCH qemu-server 05/31] assume that SDN is available Fiona Ebner
2025-06-25 15:56 ` [pve-devel] [PATCH qemu-server 06/31] schema: remove unused pve-qm-ipconfig standard option Fiona Ebner
2025-06-25 15:56 ` [pve-devel] [PATCH qemu-server 07/31] remove unused $nic_model_list_txt variable Fiona Ebner
2025-06-25 15:56 ` [pve-devel] [PATCH qemu-server 08/31] introduce Network module Fiona Ebner
2025-06-25 15:56 ` [pve-devel] [PATCH qemu-server 09/31] agent: drop unused $noerr argument from helpers Fiona Ebner
2025-06-25 15:56 ` [pve-devel] [PATCH qemu-server 10/31] agent: code style: order module imports according to style guide Fiona Ebner
2025-06-25 15:56 ` [pve-devel] [PATCH qemu-server 11/31] agent: avoid dependency on QemuConfig module Fiona Ebner
2025-06-25 15:56 ` [pve-devel] [PATCH qemu-server 12/31] agent: avoid use of deprecated check_running() function Fiona Ebner
2025-06-25 15:56 ` [pve-devel] [PATCH qemu-server 13/31] agent: move qga_check_running() to agent module Fiona Ebner
2025-06-25 15:56 ` [pve-devel] [PATCH qemu-server 14/31] move find_vmstate_storage() helper to QemuConfig module Fiona Ebner
2025-06-25 15:56 ` Fiona Ebner [this message]
2025-06-25 15:56 ` [pve-devel] [PATCH qemu-server 16/31] introduce RunState module Fiona Ebner
2025-06-25 15:56 ` [pve-devel] [PATCH qemu-server 17/31] code cleanup: drive mirror: do not prefix calls to function in the same module Fiona Ebner
2025-06-25 15:56 ` [pve-devel] [PATCH qemu-server 18/31] introduce BlockJob module Fiona Ebner
2025-06-25 15:56 ` [pve-devel] [PATCH qemu-server 19/31] drive: die in get_drive_id() if argument misses relevant members Fiona Ebner
2025-06-25 15:56 ` [pve-devel] [PATCH qemu-server 20/31] block job: add and use wrapper for mirror Fiona Ebner
2025-06-25 15:56 ` [pve-devel] [PATCH qemu-server 21/31] drive mirror: add variable for device ID and make name for drive ID precise Fiona Ebner
2025-06-25 15:56 ` [pve-devel] [PATCH qemu-server 22/31] test: migration: factor out common mocking for mirror Fiona Ebner
2025-06-25 15:56 ` [pve-devel] [PATCH qemu-server 23/31] block job: factor out helper for common mirror QMP options Fiona Ebner
2025-06-25 15:56 ` [pve-devel] [RFC qemu-server 24/31] block job: add blockdev mirror Fiona Ebner
2025-06-25 15:56 ` [pve-devel] [RFC qemu-server 25/31] blockdev: support using zeroinit filter Fiona Ebner
2025-06-25 15:56 ` [pve-devel] [PATCH qemu-server 26/31] blockdev: make some functions private Fiona Ebner
2025-06-25 15:56 ` [pve-devel] [RFC qemu-server 27/31] clone disk: skip check for aio=default (io_uring) compatibility starting with machine version 10.0 Fiona Ebner
2025-06-25 15:56 ` [pve-devel] [RFC qemu-server 28/31] print drive device: don't reference any drive for 'none' " Fiona Ebner
2025-06-25 15:56 ` [pve-devel] [RFC qemu-server 29/31] blockdev: add support for NBD paths Fiona Ebner
2025-06-25 15:56 ` [pve-devel] [RFC qemu-server 30/31] command line: switch to blockdev starting with machine version 10.0 Fiona Ebner
2025-06-25 15:56 ` [pve-devel] [RFC qemu-server 31/31] test: migration: update running machine to 10.0 Fiona Ebner
2025-06-26 13:09 ` [pve-devel] partially-applied: [PATCH-SERIES qemu-server 00/31] preparation for blockdev, part three Fabian Grünbichler
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250625155751.268047-16-f.ebner@proxmox.com \
--to=f.ebner@proxmox.com \
--cc=pve-devel@lists.proxmox.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.
Service provided by Proxmox Server Solutions GmbH | Privacy | Legal