From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [212.224.123.68]) by lore.proxmox.com (Postfix) with ESMTPS id 5BA5C1FF183 for ; Wed, 3 Dec 2025 14:31:33 +0100 (CET) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id E0F8FA2E8; Wed, 3 Dec 2025 14:30:34 +0100 (CET) From: Fiona Ebner To: pve-devel@lists.proxmox.com Date: Wed, 3 Dec 2025 14:26:38 +0100 Message-ID: <20251203132949.109685-13-f.ebner@proxmox.com> X-Mailer: git-send-email 2.47.3 In-Reply-To: <20251203132949.109685-1-f.ebner@proxmox.com> References: <20251203132949.109685-1-f.ebner@proxmox.com> MIME-Version: 1.0 X-Bm-Milter-Handled: 55990f41-d878-4baa-be0a-ee34c49e34d2 X-Bm-Transport-Timestamp: 1764768549045 X-SPAM-LEVEL: Spam detection results: 0 AWL -0.017 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record Subject: [pve-devel] [PATCH qemu-server 12/18] block job: switch qemu_blockjobs_cancel() to use QMP peer X-BeenThere: pve-devel@lists.proxmox.com X-Mailman-Version: 2.1.29 Precedence: list List-Id: Proxmox VE development discussion List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: Proxmox VE development discussion Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Errors-To: pve-devel-bounces@lists.proxmox.com Sender: "pve-devel" Signed-off-by: Fiona Ebner --- src/PVE/API2/Qemu.pm | 6 ++++-- src/PVE/QemuMigrate.pm | 4 ++-- src/PVE/QemuServer/BlockJob.pm | 16 ++++++++-------- 3 files changed, 14 insertions(+), 12 deletions(-) diff --git a/src/PVE/API2/Qemu.pm b/src/PVE/API2/Qemu.pm index 190878de..5a627936 100644 --- a/src/PVE/API2/Qemu.pm +++ b/src/PVE/API2/Qemu.pm @@ -35,7 +35,7 @@ use PVE::QemuServer::CPUConfig; use PVE::QemuServer::Drive qw(checked_volume_format checked_parse_volname); use PVE::QemuServer::Helpers; use PVE::QemuServer::ImportDisk; -use PVE::QemuServer::Monitor qw(mon_cmd); +use PVE::QemuServer::Monitor qw(mon_cmd vm_qmp_peer); use PVE::QemuServer::Machine; use PVE::QemuServer::Memory qw(get_current_memory); use PVE::QemuServer::MetaInfo; @@ -4607,7 +4607,9 @@ __PACKAGE__->register_method({ PVE::AccessControl::add_vm_to_pool($newid, $pool) if $pool; }; if (my $err = $@) { - eval { PVE::QemuServer::BlockJob::qemu_blockjobs_cancel($vmid, $jobs) }; + eval { + PVE::QemuServer::BlockJob::qemu_blockjobs_cancel(vm_qmp_peer($vmid), $jobs); + }; sleep 1; # some storage like rbd need to wait before release volume - really? foreach my $volid (@$newvollist) { diff --git a/src/PVE/QemuMigrate.pm b/src/PVE/QemuMigrate.pm index 8fa84080..b7aba504 100644 --- a/src/PVE/QemuMigrate.pm +++ b/src/PVE/QemuMigrate.pm @@ -33,7 +33,7 @@ use PVE::QemuServer::CPUConfig; use PVE::QemuServer::Drive qw(checked_volume_format); use PVE::QemuServer::Helpers qw(min_version); use PVE::QemuServer::Machine; -use PVE::QemuServer::Monitor qw(mon_cmd); +use PVE::QemuServer::Monitor qw(mon_cmd vm_qmp_peer); use PVE::QemuServer::Memory qw(get_current_memory); use PVE::QemuServer::Network; use PVE::QemuServer::QMPHelpers; @@ -1592,7 +1592,7 @@ sub phase2_cleanup { if ($self->{storage_migration}) { eval { PVE::QemuServer::BlockJob::qemu_blockjobs_cancel( - $vmid, + vm_qmp_peer($vmid), $self->{storage_migration_jobs}, ); }; diff --git a/src/PVE/QemuServer/BlockJob.pm b/src/PVE/QemuServer/BlockJob.pm index 33ff66bc..49bb13c7 100644 --- a/src/PVE/QemuServer/BlockJob.pm +++ b/src/PVE/QemuServer/BlockJob.pm @@ -42,16 +42,16 @@ sub qemu_handle_concluded_blockjob { } sub qemu_blockjobs_cancel { - my ($vmid, $jobs) = @_; + my ($qmp_peer, $jobs) = @_; foreach my $job (keys %$jobs) { print "$job: Cancelling block job\n"; - eval { mon_cmd($vmid, "block-job-cancel", device => $job); }; + eval { qmp_cmd($qmp_peer, "block-job-cancel", device => $job); }; $jobs->{$job}->{cancel} = 1; } while (1) { - my $stats = mon_cmd($vmid, "query-block-jobs"); + my $stats = qmp_cmd($qmp_peer, "query-block-jobs"); my $running_jobs = {}; foreach my $stat (@$stats) { @@ -61,7 +61,7 @@ sub qemu_blockjobs_cancel { foreach my $job (keys %$jobs) { my $info = $running_jobs->{$job}; eval { - qemu_handle_concluded_blockjob(vm_qmp_peer($vmid), $job, $info, $jobs->{$job}) + qemu_handle_concluded_blockjob($qmp_peer, $job, $info, $jobs->{$job}) if $info && $info->{status} eq 'concluded'; }; log_warn($@) if $@; # only warn and proceed with canceling other jobs @@ -177,7 +177,7 @@ sub qemu_drive_mirror_monitor { } # if we clone a disk for a new target vm, we don't switch the disk - qemu_blockjobs_cancel($vmid, $jobs); + qemu_blockjobs_cancel(vm_qmp_peer($vmid), $jobs); if ($agent_running) { print "unfreeze filesystem\n"; @@ -234,7 +234,7 @@ sub qemu_drive_mirror_monitor { my $err = $@; if ($err) { - eval { qemu_blockjobs_cancel($vmid, $jobs) }; + eval { qemu_blockjobs_cancel(vm_qmp_peer($vmid), $jobs) }; die "block job ($op) error: $err"; } } @@ -308,7 +308,7 @@ sub qemu_drive_mirror { # if a job already runs for this device we get an error, catch it for cleanup eval { mon_cmd($vmid, "drive-mirror", %$opts); }; if (my $err = $@) { - eval { qemu_blockjobs_cancel($vmid, $jobs) }; + eval { qemu_blockjobs_cancel(vm_qmp_peer($vmid), $jobs) }; warn "$@\n" if $@; die "mirroring error: $err\n"; } @@ -503,7 +503,7 @@ sub blockdev_mirror { # if a job already runs for this device we get an error, catch it for cleanup eval { mon_cmd($vmid, "blockdev-mirror", $qmp_opts->%*); }; if (my $err = $@) { - eval { qemu_blockjobs_cancel($vmid, $jobs) }; + eval { qemu_blockjobs_cancel(vm_qmp_peer($vmid), $jobs) }; log_warn("unable to cancel block jobs - $@"); eval { PVE::QemuServer::Blockdev::detach(vm_qmp_peer($vmid), $target_node_name); }; log_warn("unable to delete blockdev '$target_node_name' - $@"); -- 2.47.3 _______________________________________________ pve-devel mailing list pve-devel@lists.proxmox.com https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel