From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [212.224.123.68]) by lore.proxmox.com (Postfix) with ESMTPS id 83B9F1FF16F for ; Tue, 14 Oct 2025 16:40:33 +0200 (CEST) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id 3AD9A799C; Tue, 14 Oct 2025 16:40:02 +0200 (CEST) From: Fiona Ebner To: pve-devel@lists.proxmox.com Date: Tue, 14 Oct 2025 16:39:25 +0200 Message-ID: <20251014143946.160679-15-f.ebner@proxmox.com> X-Mailer: git-send-email 2.47.3 In-Reply-To: <20251014143946.160679-1-f.ebner@proxmox.com> References: <20251014143946.160679-1-f.ebner@proxmox.com> MIME-Version: 1.0 X-Bm-Milter-Handled: 55990f41-d878-4baa-be0a-ee34c49e34d2 X-Bm-Transport-Timestamp: 1760452753042 X-SPAM-LEVEL: Spam detection results: 0 AWL -0.021 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record Subject: [pve-devel] [PATCH qemu-server 14/16] introduce QSD module for qemu-storage-daemon functionality X-BeenThere: pve-devel@lists.proxmox.com X-Mailman-Version: 2.1.29 Precedence: list List-Id: Proxmox VE development discussion List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: Proxmox VE development discussion Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Errors-To: pve-devel-bounces@lists.proxmox.com Sender: "pve-devel" For now, supports creating FUSE exports based on Proxmox VE drive definitions. NBD exports could be added later. In preparation to allow qcow2 for TPM state volumes. A QEMU storage daemon instance is associated to a given VM. Target files where the FUSE export is mounted must already exist. The 'writable' flag for the export is taken to be the negation of the 'read-only' status of the added block node. The 'allow-other' flag is set to 'off', so only the user the storage daemon is running as may access the export. For now, exported images don't need to be resized, so the 'growable' flag is hard-coded to 'false'. When cleaning up, a 'quit' QMP command is sent to the storage daemon, with 60 second timeout, after which a SIGTERM is sent with 10 seconds timeout, before finally SIGKILL is used if the QEMU storage daemon would still be running. Signed-off-by: Fiona Ebner --- Dependency bump for QEMU needed! src/PVE/QemuServer/Helpers.pm | 18 +++++ src/PVE/QemuServer/Makefile | 1 + src/PVE/QemuServer/QSD.pm | 124 ++++++++++++++++++++++++++++++++++ 3 files changed, 143 insertions(+) create mode 100644 src/PVE/QemuServer/QSD.pm diff --git a/src/PVE/QemuServer/Helpers.pm b/src/PVE/QemuServer/Helpers.pm index 2c78a7b4..ab8aa389 100644 --- a/src/PVE/QemuServer/Helpers.pm +++ b/src/PVE/QemuServer/Helpers.pm @@ -89,6 +89,24 @@ sub qsd_pidfile_name { return "${var_run_tmpdir}/qsd-${vmid}.pid"; } +sub qsd_fuse_export_cleanup_files { + my ($vmid) = @_; + + PVE::Tools::dir_glob_foreach( + $var_run_tmpdir, + "qsd-${vmid}-.*.fuse", + sub { + my ($file) = @_; + unlink "${var_run_tmpdir}/${file}"; + }, + ); +} + +sub qsd_fuse_export_path { + my ($vmid, $export_name) = @_; + return "${var_run_tmpdir}/qsd-${vmid}-${export_name}.fuse"; +} + sub vm_pidfile_name { my ($vmid) = @_; return "${var_run_tmpdir}/$vmid.pid"; diff --git a/src/PVE/QemuServer/Makefile b/src/PVE/QemuServer/Makefile index 63c8d77c..d599ca91 100644 --- a/src/PVE/QemuServer/Makefile +++ b/src/PVE/QemuServer/Makefile @@ -23,6 +23,7 @@ SOURCES=Agent.pm \ PCI.pm \ QemuImage.pm \ QMPHelpers.pm \ + QSD.pm \ RNG.pm \ RunState.pm \ StateFile.pm \ diff --git a/src/PVE/QemuServer/QSD.pm b/src/PVE/QemuServer/QSD.pm new file mode 100644 index 00000000..4a538274 --- /dev/null +++ b/src/PVE/QemuServer/QSD.pm @@ -0,0 +1,124 @@ +package PVE::QemuServer::QSD; + +use v5.36; + +use JSON qw(to_json); + +use PVE::JSONSchema qw(json_bool); +use PVE::SafeSyslog qw(syslog); +use PVE::Storage; +use PVE::Tools; + +use PVE::QemuServer::Blockdev; +use PVE::QemuServer::Helpers; +use PVE::QemuServer::Monitor; + +=head3 start + + PVE::QemuServer::QSD::start($vmid); + +Start a QEMU storage daemon instance associated to VM C <$vmid>. + +=cut + +sub start($vmid) { + my $qmp_socket_path = PVE::QemuServer::Helpers::qmp_socket({ vmid => $vmid, type => 'qsd' }); + my $pidfile = PVE::QemuServer::Helpers::qsd_pidfile_name($vmid); + + my $cmd = [ + 'qemu-storage-daemon', + '--daemonize', + '--chardev', + "socket,id=qmp,path=$qmp_socket_path,server=on,wait=off", + '--monitor', + 'chardev=qmp,mode=control', + '--pidfile', + $pidfile, + ]; + + PVE::Tools::run_command($cmd); + + my $pid = PVE::QemuServer::Helpers::qsd_running_locally($vmid); + syslog("info", "QEMU storage daemon for $vmid started with PID $pid."); + + return; +} + +=head3 add_fuse_export + + my $path = PVE::QemuServer::QSD::add_fuse_export($vmid, $drive, $name); + +Attach drive C<$drive> to the storage daemon associated to VM C<$vmid> and export it with name +C<$name> via FUSE. Returns the path to the file representing the export. + +=cut + +sub add_fuse_export($vmid, $drive, $name) { + my $storage_config = PVE::Storage::config(); + + PVE::Storage::activate_volumes($storage_config, [$drive->{file}]); + + my ($node_name, $read_only) = + PVE::QemuServer::Blockdev::attach($storage_config, $vmid, $drive, { qsd => 1 }); + + my $fuse_path = PVE::QemuServer::Helpers::qsd_fuse_export_path($vmid, $name); + PVE::Tools::file_set_contents($fuse_path, '', 0600); # mountpoint file needs to exist up-front + + my $export = { + type => 'fuse', + id => "$name", + mountpoint => $fuse_path, + 'node-name' => "$node_name", + writable => json_bool(!$read_only), + growable => JSON::false, + 'allow-other' => 'off', + }; + + PVE::QemuServer::Monitor::qsd_cmd($vmid, 'block-export-add', $export->%*); + + return $fuse_path; +} + +=head3 quit + + PVE::QemuServer::QSD::quit($vmid); + +Shut down the QEMU storage daemon associated to VM C<$vmid> and cleans up its PID file and socket. +Waits for 60 seconds for clean shutdown, then sends SIGTERM and waits an additional 10 seconds +before sending SIGKILL. + +=cut + +sub quit($vmid) { + eval { PVE::QemuServer::Monitor::qsd_cmd($vmid, 'quit'); }; + my $qmp_err = $@; + warn "QEMU storage daemon for $vmid failed to handle 'quit' - $qmp_err" if $qmp_err; + + my $count = $qmp_err ? 60 : 0; # can't wait for QMP 'quit' to terminate the process if it failed + my $pid = PVE::QemuServer::Helpers::qsd_running_locally($vmid); + while ($pid) { + if ($count == 60) { + warn "QEMU storage daemon for $vmid still running with PID $pid" + . " - terminating now with SIGTERM\n"; + kill 15, $pid; + } elsif ($count == 70) { + warn "QEMU storage daemon for $vmid still running with PID $pid" + . " - terminating now with SIGKILL\n"; + kill 9, $pid; + last; + } + + sleep 1; + $count++; + $pid = PVE::QemuServer::Helpers::qsd_running_locally($vmid); + } + + unlink PVE::QemuServer::Helpers::qsd_pidfile_name($vmid); + unlink PVE::QemuServer::Helpers::qmp_socket({ vmid => $vmid, type => 'qsd' }); + + PVE::QemuServer::Helpers::qsd_fuse_export_cleanup_files($vmid); + + return; +} + +1; -- 2.47.3 _______________________________________________ pve-devel mailing list pve-devel@lists.proxmox.com https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel