public inbox for pve-devel@lists.proxmox.com
 help / color / mirror / Atom feed
From: Fiona Ebner <f.ebner@proxmox.com>
To: pve-devel@lists.proxmox.com
Subject: [pve-devel] [PATCH qemu-server 12/18] block job: switch qemu_blockjobs_cancel() to use QMP peer
Date: Wed,  3 Dec 2025 14:26:38 +0100	[thread overview]
Message-ID: <20251203132949.109685-13-f.ebner@proxmox.com> (raw)
In-Reply-To: <20251203132949.109685-1-f.ebner@proxmox.com>

Signed-off-by: Fiona Ebner <f.ebner@proxmox.com>
---
 src/PVE/API2/Qemu.pm           |  6 ++++--
 src/PVE/QemuMigrate.pm         |  4 ++--
 src/PVE/QemuServer/BlockJob.pm | 16 ++++++++--------
 3 files changed, 14 insertions(+), 12 deletions(-)

diff --git a/src/PVE/API2/Qemu.pm b/src/PVE/API2/Qemu.pm
index 190878de..5a627936 100644
--- a/src/PVE/API2/Qemu.pm
+++ b/src/PVE/API2/Qemu.pm
@@ -35,7 +35,7 @@ use PVE::QemuServer::CPUConfig;
 use PVE::QemuServer::Drive qw(checked_volume_format checked_parse_volname);
 use PVE::QemuServer::Helpers;
 use PVE::QemuServer::ImportDisk;
-use PVE::QemuServer::Monitor qw(mon_cmd);
+use PVE::QemuServer::Monitor qw(mon_cmd vm_qmp_peer);
 use PVE::QemuServer::Machine;
 use PVE::QemuServer::Memory qw(get_current_memory);
 use PVE::QemuServer::MetaInfo;
@@ -4607,7 +4607,9 @@ __PACKAGE__->register_method({
                 PVE::AccessControl::add_vm_to_pool($newid, $pool) if $pool;
             };
             if (my $err = $@) {
-                eval { PVE::QemuServer::BlockJob::qemu_blockjobs_cancel($vmid, $jobs) };
+                eval {
+                    PVE::QemuServer::BlockJob::qemu_blockjobs_cancel(vm_qmp_peer($vmid), $jobs);
+                };
                 sleep 1; # some storage like rbd need to wait before release volume - really?
 
                 foreach my $volid (@$newvollist) {
diff --git a/src/PVE/QemuMigrate.pm b/src/PVE/QemuMigrate.pm
index 8fa84080..b7aba504 100644
--- a/src/PVE/QemuMigrate.pm
+++ b/src/PVE/QemuMigrate.pm
@@ -33,7 +33,7 @@ use PVE::QemuServer::CPUConfig;
 use PVE::QemuServer::Drive qw(checked_volume_format);
 use PVE::QemuServer::Helpers qw(min_version);
 use PVE::QemuServer::Machine;
-use PVE::QemuServer::Monitor qw(mon_cmd);
+use PVE::QemuServer::Monitor qw(mon_cmd vm_qmp_peer);
 use PVE::QemuServer::Memory qw(get_current_memory);
 use PVE::QemuServer::Network;
 use PVE::QemuServer::QMPHelpers;
@@ -1592,7 +1592,7 @@ sub phase2_cleanup {
     if ($self->{storage_migration}) {
         eval {
             PVE::QemuServer::BlockJob::qemu_blockjobs_cancel(
-                $vmid,
+                vm_qmp_peer($vmid),
                 $self->{storage_migration_jobs},
             );
         };
diff --git a/src/PVE/QemuServer/BlockJob.pm b/src/PVE/QemuServer/BlockJob.pm
index 33ff66bc..49bb13c7 100644
--- a/src/PVE/QemuServer/BlockJob.pm
+++ b/src/PVE/QemuServer/BlockJob.pm
@@ -42,16 +42,16 @@ sub qemu_handle_concluded_blockjob {
 }
 
 sub qemu_blockjobs_cancel {
-    my ($vmid, $jobs) = @_;
+    my ($qmp_peer, $jobs) = @_;
 
     foreach my $job (keys %$jobs) {
         print "$job: Cancelling block job\n";
-        eval { mon_cmd($vmid, "block-job-cancel", device => $job); };
+        eval { qmp_cmd($qmp_peer, "block-job-cancel", device => $job); };
         $jobs->{$job}->{cancel} = 1;
     }
 
     while (1) {
-        my $stats = mon_cmd($vmid, "query-block-jobs");
+        my $stats = qmp_cmd($qmp_peer, "query-block-jobs");
 
         my $running_jobs = {};
         foreach my $stat (@$stats) {
@@ -61,7 +61,7 @@ sub qemu_blockjobs_cancel {
         foreach my $job (keys %$jobs) {
             my $info = $running_jobs->{$job};
             eval {
-                qemu_handle_concluded_blockjob(vm_qmp_peer($vmid), $job, $info, $jobs->{$job})
+                qemu_handle_concluded_blockjob($qmp_peer, $job, $info, $jobs->{$job})
                     if $info && $info->{status} eq 'concluded';
             };
             log_warn($@) if $@; # only warn and proceed with canceling other jobs
@@ -177,7 +177,7 @@ sub qemu_drive_mirror_monitor {
                     }
 
                     # if we clone a disk for a new target vm, we don't switch the disk
-                    qemu_blockjobs_cancel($vmid, $jobs);
+                    qemu_blockjobs_cancel(vm_qmp_peer($vmid), $jobs);
 
                     if ($agent_running) {
                         print "unfreeze filesystem\n";
@@ -234,7 +234,7 @@ sub qemu_drive_mirror_monitor {
     my $err = $@;
 
     if ($err) {
-        eval { qemu_blockjobs_cancel($vmid, $jobs) };
+        eval { qemu_blockjobs_cancel(vm_qmp_peer($vmid), $jobs) };
         die "block job ($op) error: $err";
     }
 }
@@ -308,7 +308,7 @@ sub qemu_drive_mirror {
     # if a job already runs for this device we get an error, catch it for cleanup
     eval { mon_cmd($vmid, "drive-mirror", %$opts); };
     if (my $err = $@) {
-        eval { qemu_blockjobs_cancel($vmid, $jobs) };
+        eval { qemu_blockjobs_cancel(vm_qmp_peer($vmid), $jobs) };
         warn "$@\n" if $@;
         die "mirroring error: $err\n";
     }
@@ -503,7 +503,7 @@ sub blockdev_mirror {
     # if a job already runs for this device we get an error, catch it for cleanup
     eval { mon_cmd($vmid, "blockdev-mirror", $qmp_opts->%*); };
     if (my $err = $@) {
-        eval { qemu_blockjobs_cancel($vmid, $jobs) };
+        eval { qemu_blockjobs_cancel(vm_qmp_peer($vmid), $jobs) };
         log_warn("unable to cancel block jobs - $@");
         eval { PVE::QemuServer::Blockdev::detach(vm_qmp_peer($vmid), $target_node_name); };
         log_warn("unable to delete blockdev '$target_node_name' - $@");
-- 
2.47.3



_______________________________________________
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


  parent reply	other threads:[~2025-12-03 13:31 UTC|newest]

Thread overview: 19+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-12-03 13:26 [pve-devel] [PATCH-SERIES qemu-server 00/18] fix #7066: api: allow live snapshot (remove) of qcow2 TPM drive with snapshot-as-volume-chain Fiona Ebner
2025-12-03 13:26 ` [pve-devel] [PATCH qemu-server 01/18] block job: fix variable name in documentation Fiona Ebner
2025-12-03 13:26 ` [pve-devel] [PATCH qemu-server 02/18] qmp client: add default timeouts for more block commands Fiona Ebner
2025-12-03 13:26 ` [pve-devel] [PATCH qemu-server 03/18] drive: introduce drive_uses_qsd_fuse() helper Fiona Ebner
2025-12-03 13:26 ` [pve-devel] [PATCH qemu-server 04/18] monitor: add vm_qmp_peer() helper Fiona Ebner
2025-12-03 13:26 ` [pve-devel] [PATCH qemu-server 05/18] monitor: add qsd_peer() helper Fiona Ebner
2025-12-03 13:26 ` [pve-devel] [PATCH qemu-server 06/18] blockdev: rename variable in get_node_name_below_throttle() for readability Fiona Ebner
2025-12-03 13:26 ` [pve-devel] [PATCH qemu-server 07/18] blockdev: switch get_node_name_below_throttle() to use QMP peer Fiona Ebner
2025-12-03 13:26 ` [pve-devel] [PATCH qemu-server 08/18] blockdev: switch detach() " Fiona Ebner
2025-12-03 13:26 ` [pve-devel] [PATCH qemu-server 09/18] blockdev: switch blockdev_replace() " Fiona Ebner
2025-12-03 13:26 ` [pve-devel] [PATCH qemu-server 10/18] blockdev: switch blockdev_external_snapshot() " Fiona Ebner
2025-12-03 13:26 ` [pve-devel] [PATCH qemu-server 11/18] block job: switch qemu_handle_concluded_blockjob() " Fiona Ebner
2025-12-03 13:26 ` Fiona Ebner [this message]
2025-12-03 13:26 ` [pve-devel] [PATCH qemu-server 13/18] block job: switch qemu_drive_mirror_monitor() " Fiona Ebner
2025-12-03 13:26 ` [pve-devel] [PATCH qemu-server 14/18] blockdev: switch blockdev_delete() " Fiona Ebner
2025-12-03 13:26 ` [pve-devel] [PATCH qemu-server 15/18] blockdev: switch blockdev_stream() " Fiona Ebner
2025-12-03 13:26 ` [pve-devel] [PATCH qemu-server 16/18] blockdev: switch blockdev_commit() " Fiona Ebner
2025-12-03 13:26 ` [pve-devel] [PATCH qemu-server 17/18] snapshot: support live snapshot (remove) of qcow2 TPM drive on storage with snapshot-as-volume-chain Fiona Ebner
2025-12-03 13:26 ` [pve-devel] [PATCH qemu-server 18/18] fix #7066: api: allow live snapshot (remove) of qcow2 TPM drive " Fiona Ebner

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20251203132949.109685-13-f.ebner@proxmox.com \
    --to=f.ebner@proxmox.com \
    --cc=pve-devel@lists.proxmox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox
Service provided by Proxmox Server Solutions GmbH | Privacy | Legal