From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [212.224.123.68]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits)) (No client certificate requested) by lists.proxmox.com (Postfix) with ESMTPS id AE7BCC249 for ; Thu, 6 Jul 2023 12:55:08 +0200 (CEST) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id 836FE1D996 for ; Thu, 6 Jul 2023 12:54:37 +0200 (CEST) Received: from proxmox-new.maurer-it.com (proxmox-new.maurer-it.com [94.136.29.106]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits)) (No client certificate requested) by firstgate.proxmox.com (Proxmox) with ESMTPS for ; Thu, 6 Jul 2023 12:54:35 +0200 (CEST) Received: from proxmox-new.maurer-it.com (localhost.localdomain [127.0.0.1]) by proxmox-new.maurer-it.com (Proxmox) with ESMTP id A39D046BE6 for ; Thu, 6 Jul 2023 12:54:35 +0200 (CEST) From: Markus Frank To: pve-devel@lists.proxmox.com Date: Thu, 6 Jul 2023 12:54:14 +0200 Message-Id: <20230706105421.54949-5-m.frank@proxmox.com> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20230706105421.54949-1-m.frank@proxmox.com> References: <20230706105421.54949-1-m.frank@proxmox.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-SPAM-LEVEL: Spam detection results: 0 AWL -0.055 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record T_SCC_BODY_TEXT_LINE -0.01 - Subject: [pve-devel] [PATCH qemu-server v6 1/3] feature #1027: virtio-fs support X-BeenThere: pve-devel@lists.proxmox.com X-Mailman-Version: 2.1.29 Precedence: list List-Id: Proxmox VE development discussion List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Thu, 06 Jul 2023 10:55:08 -0000 adds support for sharing directorys with a guest vm virtio-fs needs virtiofsd to be started. In order to start virtiofsd as a process (despite being a daemon it is does not run in the background), a double-fork is used. virtiofsd should close itself together with qemu. There are the parameters dirid & tag and the optional parameters direct-io & cache. The dirid gets mapped to the path on the current node. The tag parameter is for choosing the tag-name that is used with the mount command. example config: ``` virtiofs0: foo,tag=tag1,direct-io=1,cache=always virtiofs1: dirid=bar,tag=tag2,cache=never ``` For information on the optional parameters see there: https://gitlab.com/virtio-fs/virtiofsd/-/blob/main/README.md Signed-off-by: Markus Frank --- PVE/QemuServer.pm | 157 +++++++++++++++++++++++++++++++++++++++ PVE/QemuServer/Memory.pm | 25 +++++-- debian/control | 1 + 3 files changed, 177 insertions(+), 6 deletions(-) diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm index 940cdac..3a8b4c5 100644 --- a/PVE/QemuServer.pm +++ b/PVE/QemuServer.pm @@ -43,6 +43,7 @@ use PVE::PBSClient; use PVE::RESTEnvironment qw(log_warn); use PVE::RPCEnvironment; use PVE::Storage; +use PVE::Mapping::DIR; use PVE::SysFSTools; use PVE::Systemd; use PVE::Tools qw(run_command file_read_firstline file_get_contents dir_glob_foreach get_host_arch $IPV6RE); @@ -276,6 +277,35 @@ my $rng_fmt = { }, }; +my $virtiofs_fmt = { + 'dirid' => { + type => 'string', + default_key => 1, + description => "dirid of directory you want to share with the guest VM", + format_description => "virtiofs-dirid", + }, + 'tag' => { + type => 'string', + description => "tag name for mounting in the guest VM", + format_description => "virtiofs-tag", + }, + 'cache' => { + type => 'string', + description => "The caching policy the file system should use" + ." (auto, always, never).", + format_description => "virtiofs-cache", + enum => [qw(auto always never)], + optional => 1, + }, + 'direct-io' => { + type => 'boolean', + description => "Honor the O_DIRECT flag passed down by guest applications", + format_description => "virtiofs-directio", + optional => 1, + }, +}; +PVE::JSONSchema::register_format('pve-qm-virtiofs', $virtiofs_fmt); + my $meta_info_fmt = { 'ctime' => { type => 'integer', @@ -838,6 +868,7 @@ while (my ($k, $v) = each %$confdesc) { } my $MAX_NETS = 32; +my $MAX_VIRTIOFS = 10; my $MAX_SERIAL_PORTS = 4; my $MAX_PARALLEL_PORTS = 3; my $MAX_NUMA = 8; @@ -982,6 +1013,21 @@ my $netdesc = { PVE::JSONSchema::register_standard_option("pve-qm-net", $netdesc); +my $virtiofsdesc = { + optional => 1, + type => 'string', format => $virtiofs_fmt, + description => "share files between host and guest", +}; +PVE::JSONSchema::register_standard_option("pve-qm-virtiofs", $virtiofsdesc); + +sub max_virtiofs { + return $MAX_VIRTIOFS; +} + +for (my $i = 0; $i < $MAX_VIRTIOFS; $i++) { + $confdesc->{"virtiofs$i"} = $virtiofsdesc; +} + my $ipconfig_fmt = { ip => { type => 'string', @@ -4100,6 +4146,25 @@ sub config_to_command { push @$devices, '-device', $netdevicefull; } + my $onevirtiofs = 0; + for (my $i = 0; $i < $MAX_VIRTIOFS; $i++) { + my $virtiofsstr = "virtiofs$i"; + + next if !$conf->{$virtiofsstr}; + my $virtiofs = parse_property_string('pve-qm-virtiofs', $conf->{$virtiofsstr}); + next if !$virtiofs; + + push @$devices, '-chardev', "socket,id=virtfs$i,path=/var/run/virtiofsd/vm$vmid-fs$i"; + push @$devices, '-device', 'vhost-user-fs-pci,queue-size=1024' + .",chardev=virtfs$i,tag=$virtiofs->{tag}"; + + $onevirtiofs = 1; + } + + if ($onevirtiofs && $conf->{hugepages}){ + die "hugepages not supported in combination with virtiofs\n"; + } + if ($conf->{ivshmem}) { my $ivshmem = parse_property_string($ivshmem_fmt, $conf->{ivshmem}); @@ -4159,6 +4224,14 @@ sub config_to_command { } push @$machineFlags, "type=${machine_type_min}"; + if ($onevirtiofs && !$conf->{numa}) { + # kvm: '-machine memory-backend' and '-numa memdev' properties are + # mutually exclusive + push @$devices, '-object', 'memory-backend-file,id=virtiofs-mem' + .",size=$conf->{memory}M,mem-path=/dev/shm,share=on"; + push @$machineFlags, 'memory-backend=virtiofs-mem'; + } + push @$cmd, @$devices; push @$cmd, '-rtc', join(',', @$rtcFlags) if scalar(@$rtcFlags); push @$cmd, '-machine', join(',', @$machineFlags) if scalar(@$machineFlags); @@ -4185,6 +4258,72 @@ sub config_to_command { return wantarray ? ($cmd, $vollist, $spice_port, $pci_devices) : $cmd; } +sub start_virtiofs { + my ($vmid, $fsid, $virtiofs) = @_; + + my $dir_list = PVE::Mapping::DIR::find_on_current_node($virtiofs->{dirid}); + + if (!$dir_list || scalar($dir_list->@*) != 1) { + die "virtiofs needs exactly one mapping for this node\n"; + } + + eval { + PVE::Mapping::DIR::assert_valid($dir_list->[0]); + }; + if (my $err = $@) { + die "Directory Mapping invalid: $err\n"; + } + + my $dir_cfg = $dir_list->[0]; + my $path = $dir_cfg->{path}; + my $socket_path_root = "/var/run/virtiofsd"; + mkdir $socket_path_root; + my $socket_path = "$socket_path_root/vm$vmid-fs$fsid"; + unlink($socket_path); + my $socket = IO::Socket::UNIX->new( + Type => SOCK_STREAM, + Local => $socket_path, + Listen => 1, + ) or die "cannot create socket - $!\n"; + + my $flags = fcntl($socket, F_GETFD, 0) + or die "failed to get file descriptor flags: $!\n"; + fcntl($socket, F_SETFD, $flags & ~FD_CLOEXEC) + or die "failed to remove FD_CLOEXEC from file descriptor\n"; + + my $fd = $socket->fileno(); + + my $virtiofsd_bin = '/usr/libexec/virtiofsd'; + + my $pid = fork(); + if ($pid == 0) { + for my $fd_loop (3 .. POSIX::sysconf( &POSIX::_SC_OPEN_MAX )) { + POSIX::close($fd_loop) if ($fd_loop != $fd); + } + my $pid2 = fork(); + if ($pid2 == 0) { + my $cmd = [$virtiofsd_bin, "--fd=$fd", "--shared-dir=$path"]; + push @$cmd, '--xattr' if ($dir_cfg->{xattr}); + push @$cmd, '--posix-acl' if ($dir_cfg->{acl}); + push @$cmd, '--announce-submounts' if ($dir_cfg->{submounts}); + push @$cmd, '--allow-direct-io' if ($virtiofs->{'direct-io'}); + push @$cmd, "--cache=$virtiofs->{'cache'}" if ($virtiofs->{'cache'}); + run_command($cmd); + POSIX::_exit(0); + } elsif (!defined($pid2)) { + die "could not fork to start virtiofsd\n"; + } else { + POSIX::_exit(0); + } + } elsif (!defined($pid)) { + die "could not fork to start virtiofsd\n"; + } + + # return socket to keep it alive, + # so that qemu will wait for virtiofsd to start + return $socket; +} + sub check_rng_source { my ($source) = @_; @@ -5740,6 +5879,19 @@ sub vm_start_nolock { my ($cmd, $vollist, $spice_port, $pci_devices) = config_to_command($storecfg, $vmid, $conf, $defaults, $forcemachine, $forcecpu, $params->{'pbs-backing'}); + my @sockets; + for (my $i = 0; $i < $MAX_VIRTIOFS; $i++) { + my $virtiofsstr = "virtiofs$i"; + + next if !$conf->{$virtiofsstr}; + my $virtiofs = parse_property_string('pve-qm-virtiofs', $conf->{$virtiofsstr}); + next if !$virtiofs; + + + my $socket = start_virtiofs($vmid, $i, $virtiofs); + push @sockets, $socket; + } + my $migration_ip; my $get_migration_ip = sub { my ($nodename) = @_; @@ -6093,6 +6245,11 @@ sub vm_start_nolock { PVE::GuestHelpers::exec_hookscript($conf, $vmid, 'post-start'); + foreach my $socket (@sockets) { + shutdown($socket, 2); + close($socket); + } + return $res; } diff --git a/PVE/QemuServer/Memory.pm b/PVE/QemuServer/Memory.pm index 0601dd6..3b58b36 100644 --- a/PVE/QemuServer/Memory.pm +++ b/PVE/QemuServer/Memory.pm @@ -278,6 +278,16 @@ sub config { die "numa needs to be enabled to use hugepages" if $conf->{hugepages} && !$conf->{numa}; + my $onevirtiofs = 0; + for (my $i = 0; $i < PVE::QemuServer::max_virtiofs(); $i++) { + my $virtiofsstr = "virtiofs$i"; + next if !$conf->{$virtiofsstr}; + my $virtiofs = PVE::JSONSchema::parse_property_string('pve-qm-virtiofs', $conf->{$virtiofsstr}); + if ($virtiofs) { + $onevirtiofs = 1; + } + } + if ($conf->{numa}) { my $numa_totalmemory = undef; @@ -290,7 +300,8 @@ sub config { my $numa_memory = $numa->{memory}; $numa_totalmemory += $numa_memory; - my $mem_object = print_mem_object($conf, "ram-node$i", $numa_memory); + my $memdev = $onevirtiofs ? "virtiofs-mem$i" : "ram-node$i"; + my $mem_object = print_mem_object($conf, $memdev, $numa_memory); # cpus my $cpulists = $numa->{cpus}; @@ -315,7 +326,7 @@ sub config { } push @$cmd, '-object', $mem_object; - push @$cmd, '-numa', "node,nodeid=$i,cpus=$cpus,memdev=ram-node$i"; + push @$cmd, '-numa', "node,nodeid=$i,cpus=$cpus,memdev=$memdev"; } die "total memory for NUMA nodes must be equal to vm static memory\n" @@ -329,13 +340,13 @@ sub config { die "host NUMA node$i doesn't exist\n" if !host_numanode_exists($i) && $conf->{hugepages}; - my $mem_object = print_mem_object($conf, "ram-node$i", $numa_memory); - push @$cmd, '-object', $mem_object; - my $cpus = ($cores * $i); $cpus .= "-" . ($cpus + $cores - 1) if $cores > 1; - push @$cmd, '-numa', "node,nodeid=$i,cpus=$cpus,memdev=ram-node$i"; + my $memdev = $onevirtiofs ? "virtiofs-mem$i" : "ram-node$i"; + my $mem_object = print_mem_object($conf, $memdev, $numa_memory); + push @$cmd, '-object', $mem_object; + push @$cmd, '-numa', "node,nodeid=$i,cpus=$cpus,memdev=$memdev"; } } } @@ -364,6 +375,8 @@ sub print_mem_object { my $path = hugepages_mount_path($hugepages_size); return "memory-backend-file,id=$id,size=${size}M,mem-path=$path,share=on,prealloc=yes"; + } elsif ($id =~ m/^virtiofs-mem/) { + return "memory-backend-file,id=$id,size=${size}M,mem-path=/dev/shm,share=on"; } else { return "memory-backend-ram,id=$id,size=${size}M"; } diff --git a/debian/control b/debian/control index 49f67b2..f008a9b 100644 --- a/debian/control +++ b/debian/control @@ -53,6 +53,7 @@ Depends: dbus, socat, swtpm, swtpm-tools, + virtiofsd, ${misc:Depends}, ${perl:Depends}, ${shlibs:Depends}, -- 2.39.2