* [PATCH proxmox-perl-rs v2 1/1] pve: add binding for accessing vgpu info
2026-04-02 11:21 [PATCH common/manager/proxmox-perl-rs/qemu-server v2 0/5] use NVML for vGPU info querying Dominik Csapak
@ 2026-04-02 11:22 ` Dominik Csapak
2026-04-02 11:22 ` [PATCH qemu-server v2 1/2] pci: move mdev related code to own module Dominik Csapak
` (3 subsequent siblings)
4 siblings, 0 replies; 6+ messages in thread
From: Dominik Csapak @ 2026-04-02 11:22 UTC (permalink / raw)
To: pve-devel
Adds some basic perl bindings to return the creatable and supported vGPU
types for NVIDIA GPUs.
The 'supported' helper is not yet used, but it'll be useful when we
want to have a better api response for the available mdevs.
The description generated here is in the format that used to be exposed
by the sysfs via the standard mdev api.
Co-developed-by: Christoph Heiss <c.heiss@proxmox.com>
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
---
pve-rs/Cargo.toml | 1 +
pve-rs/Makefile | 1 +
pve-rs/debian/control | 1 +
pve-rs/src/bindings/mod.rs | 3 ++
pve-rs/src/bindings/nvml.rs | 91 +++++++++++++++++++++++++++++++++++++
5 files changed, 97 insertions(+)
create mode 100644 pve-rs/src/bindings/nvml.rs
diff --git a/pve-rs/Cargo.toml b/pve-rs/Cargo.toml
index c66ee23..bc77529 100644
--- a/pve-rs/Cargo.toml
+++ b/pve-rs/Cargo.toml
@@ -20,6 +20,7 @@ hex = "0.4"
http = "1"
libc = "0.2"
nix = "0.29"
+nvml-wrapper = "0.12"
openssl = "0.10.40"
serde = "1.0"
serde_bytes = "0.11"
diff --git a/pve-rs/Makefile b/pve-rs/Makefile
index c2f9b73..dbbeb4e 100644
--- a/pve-rs/Makefile
+++ b/pve-rs/Makefile
@@ -27,6 +27,7 @@ PERLMOD_GENPACKAGE := /usr/lib/perlmod/genpackage.pl \
PERLMOD_PACKAGES := \
PVE::RS::Firewall::SDN \
+ PVE::RS::NVML \
PVE::RS::OCI \
PVE::RS::OpenId \
PVE::RS::ResourceScheduling::Static \
diff --git a/pve-rs/debian/control b/pve-rs/debian/control
index 25e2121..d891f28 100644
--- a/pve-rs/debian/control
+++ b/pve-rs/debian/control
@@ -11,6 +11,7 @@ Build-Depends: debhelper-compat (= 13),
librust-http-1+default-dev,
librust-libc-0.2+default-dev,
librust-nix-0.29+default-dev,
+ librust-nvml-wrapper-dev (>= 0.12.0-1~bpo13+pve1),
librust-openssl-0.10+default-dev (>= 0.10.40-~~),
librust-perlmod-0.14+default-dev,
librust-perlmod-0.14+exporter-dev,
diff --git a/pve-rs/src/bindings/mod.rs b/pve-rs/src/bindings/mod.rs
index 853a3dd..33938ed 100644
--- a/pve-rs/src/bindings/mod.rs
+++ b/pve-rs/src/bindings/mod.rs
@@ -11,6 +11,9 @@ pub use tfa::pve_rs_tfa;
mod openid;
pub use openid::pve_rs_open_id;
+mod nvml;
+pub use nvml::pve_rs_nvml;
+
pub mod firewall;
mod sdn;
diff --git a/pve-rs/src/bindings/nvml.rs b/pve-rs/src/bindings/nvml.rs
new file mode 100644
index 0000000..0f4c81e
--- /dev/null
+++ b/pve-rs/src/bindings/nvml.rs
@@ -0,0 +1,91 @@
+//! Provides access to the state of NVIDIA (v)GPU devices connected to the system.
+
+#[perlmod::package(name = "PVE::RS::NVML", lib = "pve_rs")]
+pub mod pve_rs_nvml {
+ //! The `PVE::RS::NVML` package.
+ //!
+ //! Provides high level helpers to get info from the system with NVML.
+
+ use anyhow::Result;
+ use nvml_wrapper::Nvml;
+ use perlmod::Value;
+
+ /// Retrieves a list of *creatable* vGPU types for the specified GPU by bus id.
+ ///
+ /// The [`bus_id`] is of format "\<domain\>:\<bus\>:\<device\>.\<function\>",
+ /// e.g. "0000:01:01.0".
+ ///
+ /// # See also
+ ///
+ /// [`nvmlDeviceGetCreatableVgpus`]: <https://docs.nvidia.com/deploy/nvml-api/group__nvmlVgpu.html#group__nvmlVgpu_1ge86fff933c262740f7a374973c4747b6>
+ /// [`nvmlDeviceGetHandleByPciBusId_v2`]: <https://docs.nvidia.com/deploy/nvml-api/group__nvmlDeviceQueries.html#group__nvmlDeviceQueries_1gea7484bb9eac412c28e8a73842254c05>
+ /// [`struct nvmlPciInfo_t`]: <https://docs.nvidia.com/deploy/nvml-api/structnvmlPciInfo__t.html#structnvmlPciInfo__t_1a4d54ad9b596d7cab96ecc34613adbe4>
+ #[export]
+ fn creatable_vgpu_types_for_dev(bus_id: &str) -> Result<Vec<Value>> {
+ let nvml = Nvml::init()?;
+ let device = nvml.device_by_pci_bus_id(bus_id)?;
+
+ build_vgpu_type_list(device.vgpu_creatable_types()?)
+ }
+
+ /// Retrieves a list of *supported* vGPU types for the specified GPU by bus id.
+ ///
+ /// The [`bus_id`] is of format "\<domain\>:\<bus\>:\<device\>.\<function\>",
+ /// e.g. "0000:01:01.0".
+ ///
+ /// # See also
+ ///
+ /// [`nvmlDeviceGetSupportedVgpus`]: <https://docs.nvidia.com/deploy/nvml-api/group__nvmlVgpu.html#group__nvmlVgpu_1ge084b87e80350165859500ebec714274>
+ /// [`nvmlDeviceGetHandleByPciBusId_v2`]: <https://docs.nvidia.com/deploy/nvml-api/group__nvmlDeviceQueries.html#group__nvmlDeviceQueries_1gea7484bb9eac412c28e8a73842254c05>
+ /// [`struct nvmlPciInfo_t`]: <https://docs.nvidia.com/deploy/nvml-api/structnvmlPciInfo__t.html#structnvmlPciInfo__t_1a4d54ad9b596d7cab96ecc34613adbe4>
+ #[export]
+ fn supported_vgpu_types_for_dev(bus_id: &str) -> Result<Vec<Value>> {
+ let nvml = Nvml::init()?;
+ let device = nvml.device_by_pci_bus_id(bus_id)?;
+
+ build_vgpu_type_list(device.vgpu_supported_types()?)
+ }
+
+ fn build_vgpu_type_list(vgpu_types: Vec<nvml_wrapper::vgpu::VgpuType>) -> Result<Vec<Value>> {
+ let mut result = Vec::with_capacity(vgpu_types.len());
+ for vgpu in vgpu_types {
+ let mut value = perlmod::Value::new_hash();
+ if let Some(hash) = value.as_hash_mut() {
+ hash.insert("id", Value::new_uint(vgpu.id() as usize));
+ hash.insert("name", Value::new_string(&vgpu.name()?));
+ hash.insert("description", Value::new_string(&description(&vgpu)?));
+ }
+
+ result.push(Value::new_ref(&value));
+ }
+
+ Ok(result)
+ }
+
+ // a description like it used to exist in the sysfs with the standard mdev interface
+ fn description(vgpu_type: &nvml_wrapper::vgpu::VgpuType) -> Result<String> {
+ let class_name = vgpu_type.class_name()?;
+ let max_instances = vgpu_type.max_instances()?;
+ let max_instances_per_vm = vgpu_type.max_instances_per_vm()?;
+
+ let framebuffer_size_mb = vgpu_type.framebuffer_size()? / 1024 / 1024; // bytes to MiB
+ let num_heads = vgpu_type.num_display_heads()?;
+
+ let (max_res_x, max_res_y) = (0..num_heads)
+ .filter_map(|head| vgpu_type.resolution(head).ok())
+ .max()
+ .unwrap_or((0, 0));
+
+ let license = vgpu_type.license()?;
+
+ Ok(format!(
+ "class={class_name}\n\
+ max-instances={max_instances}\n\
+ max-instances-per-vm={max_instances_per_vm}\n\
+ framebuffer-size={framebuffer_size_mb}MiB\n\
+ num-heads={num_heads}\n\
+ max-resolution={max_res_x}x{max_res_y}\n\
+ license={license}"
+ ))
+ }
+}
--
2.47.3
^ permalink raw reply [flat|nested] 6+ messages in thread* [PATCH qemu-server v2 1/2] pci: move mdev related code to own module
2026-04-02 11:21 [PATCH common/manager/proxmox-perl-rs/qemu-server v2 0/5] use NVML for vGPU info querying Dominik Csapak
2026-04-02 11:22 ` [PATCH proxmox-perl-rs v2 1/1] pve: add binding for accessing vgpu info Dominik Csapak
@ 2026-04-02 11:22 ` Dominik Csapak
2026-04-02 11:22 ` [PATCH qemu-server v2 2/2] pci: mdev: use PVE::RS::NVML for nvidia mdev information Dominik Csapak
` (2 subsequent siblings)
4 siblings, 0 replies; 6+ messages in thread
From: Dominik Csapak @ 2026-04-02 11:22 UTC (permalink / raw)
To: pve-devel
some from PVE::QemuServer::PCI but also from PVE::SysFSTools, since it
makes much more sense to have this here.
Use the current PVE::File module instead of the legacy calls to
PVE::Tools, and modernize the code with perls v5.36 parameter syntax.
While at it change some old uses of SysFSTools generate_mdev_uuid to the
local one.
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
---
src/PVE/QemuServer.pm | 5 +-
src/PVE/QemuServer/Makefile | 1 +
src/PVE/QemuServer/PCI.pm | 58 +++----------
src/PVE/QemuServer/PCI/Makefile | 9 ++
src/PVE/QemuServer/PCI/Mdev.pm | 145 ++++++++++++++++++++++++++++++++
5 files changed, 171 insertions(+), 47 deletions(-)
create mode 100644 src/PVE/QemuServer/PCI/Makefile
create mode 100644 src/PVE/QemuServer/PCI/Mdev.pm
diff --git a/src/PVE/QemuServer.pm b/src/PVE/QemuServer.pm
index 2a469fff..a111435b 100644
--- a/src/PVE/QemuServer.pm
+++ b/src/PVE/QemuServer.pm
@@ -86,6 +86,7 @@ use PVE::QemuServer::Monitor qw(mon_cmd);
use PVE::QemuServer::Network;
use PVE::QemuServer::OVMF;
use PVE::QemuServer::PCI qw(print_pci_addr print_pcie_addr print_pcie_root_port parse_hostpci);
+use PVE::QemuServer::PCI::Mdev;
use PVE::QemuServer::QemuImage;
use PVE::QemuServer::QMPHelpers qw(qemu_deviceadd qemu_devicedel qemu_objectadd qemu_objectdel);
use PVE::QemuServer::QSD;
@@ -5686,7 +5687,7 @@ sub vm_start_nolock {
my $smbios_conf = parse_smbios1($conf->{smbios1});
$uuid = $smbios_conf->{uuid} if defined($smbios_conf->{uuid});
}
- $uuid = PVE::QemuServer::PCI::generate_mdev_uuid($vmid, $index)
+ $uuid = PVE::QemuServer::PCI::Mdev::generate_mdev_uuid($vmid, $index)
if !defined($uuid);
}
}
@@ -6116,7 +6117,7 @@ sub cleanup_pci_devices {
foreach my $key (keys %$conf) {
next if $key !~ m/^hostpci(\d+)$/;
my $hostpciindex = $1;
- my $uuid = PVE::SysFSTools::generate_mdev_uuid($vmid, $hostpciindex);
+ my $uuid = PVE::QemuServer::PCI::Mdev::generate_mdev_uuid($vmid, $hostpciindex);
my $d = parse_hostpci($conf->{$key});
if ($d->{mdev}) {
# NOTE: avoid PVE::SysFSTools::pci_cleanup_mdev_device as it requires PCI ID and we
diff --git a/src/PVE/QemuServer/Makefile b/src/PVE/QemuServer/Makefile
index 7e48c388..821556ef 100644
--- a/src/PVE/QemuServer/Makefile
+++ b/src/PVE/QemuServer/Makefile
@@ -35,3 +35,4 @@ SOURCES=Agent.pm \
install: $(SOURCES)
for i in $(SOURCES); do install -D -m 0644 $$i $(DESTDIR)$(PERLDIR)/PVE/QemuServer/$$i; done
$(MAKE) -C Cfg2Cmd install
+ $(MAKE) -C PCI install
diff --git a/src/PVE/QemuServer/PCI.pm b/src/PVE/QemuServer/PCI.pm
index c9cf8de0..0b67943c 100644
--- a/src/PVE/QemuServer/PCI.pm
+++ b/src/PVE/QemuServer/PCI.pm
@@ -12,6 +12,7 @@ use PVE::Tools;
use PVE::QemuServer::Helpers;
use PVE::QemuServer::Machine;
+use PVE::QemuServer::PCI::Mdev;
use base 'Exporter';
@@ -282,11 +283,6 @@ sub get_pci_addr_map {
return $pci_addr_map;
}
-sub generate_mdev_uuid {
- my ($vmid, $index) = @_;
- return sprintf("%08d-0000-0000-0000-%012d", $index, $vmid);
-}
-
my $get_addr_mapping_from_id = sub {
my ($map, $id) = @_;
@@ -543,41 +539,6 @@ sub parse_hostpci_devices {
return $parsed_devices;
}
-# set vgpu type of a vf of an nvidia gpu with kernel 6.8 or newer
-my sub create_nvidia_device {
- my ($id, $model) = @_;
-
- $id = PVE::SysFSTools::normalize_pci_id($id);
-
- my $creation = "/sys/bus/pci/devices/$id/nvidia/current_vgpu_type";
-
- die "no nvidia sysfs api for '$id'\n" if !-f $creation;
-
- my $current = PVE::Tools::file_read_firstline($creation);
- if ($current ne "0") {
- return 1 if $current eq $model;
- # reset vgpu type so we can see all available and set the real device
- die "unable to reset vgpu type for '$id'\n" if !PVE::SysFSTools::file_write($creation, "0");
- }
-
- my $types = PVE::SysFSTools::get_mdev_types($id);
- my $selected;
- for my $type_definition ($types->@*) {
- next if $type_definition->{type} ne "nvidia-$model";
- $selected = $type_definition;
- }
-
- if (!defined($selected) || $selected->{available} < 1) {
- die "vgpu type '$model' not available for '$id'\n";
- }
-
- if (!PVE::SysFSTools::file_write($creation, $model)) {
- die "could not set vgpu type to '$model' for '$id'\n";
- }
-
- return 1;
-}
-
# takes the hash returned by parse_hostpci_devices and for all non mdev gpus,
# selects one of the given alternatives by trying to reserve it
#
@@ -612,7 +573,10 @@ sub choose_hostpci_devices {
$add_used_device->($device->{ids});
if ($device->{nvidia} && !$dry_run) {
reserve_pci_usage($device->{ids}->[0]->{id}, $vmid, 10, undef);
- create_nvidia_device($device->{ids}->[0]->{id}, $device->{nvidia});
+ PVE::QemuServer::PCI::Mdev::create_nvidia_device(
+ $device->{ids}->[0]->{id},
+ $device->{nvidia},
+ );
}
next;
}
@@ -628,7 +592,11 @@ sub choose_hostpci_devices {
}
if ($device->{nvidia} && !$dry_run) {
- eval { create_nvidia_device($ids->[0], $device->{nvidia}) };
+ eval {
+ PVE::QemuServer::PCI::Mdev::create_nvidia_device(
+ $ids->[0], $device->{nvidia},
+ );
+ };
if (my $err = $@) {
warn $err;
remove_pci_reservation($vmid, $ids);
@@ -696,7 +664,7 @@ sub print_hostpci_devices {
my $sysfspath;
if ($d->{mdev}) {
- my $uuid = generate_mdev_uuid($vmid, $i);
+ my $uuid = PVE::QemuServer::PCI::Mdev::generate_mdev_uuid($vmid, $i);
$sysfspath = "/sys/bus/mdev/devices/$uuid";
}
@@ -748,8 +716,8 @@ sub prepare_pci_device {
if ($device->{nvidia} || $driver eq "keep") {
# nothing to do
} elsif (my $mdev = $device->{mdev}) {
- my $uuid = generate_mdev_uuid($vmid, $index);
- PVE::SysFSTools::pci_create_mdev_device($pciid, $uuid, $mdev);
+ my $uuid = PVE::QemuServer::PCI::Mdev::generate_mdev_uuid($vmid, $index);
+ PVE::QemuServer::PCI::Mdev::pci_create_mdev_device($pciid, $uuid, $mdev);
} else {
die "can't unbind/bind PCI group to VFIO '$pciid'\n"
if !PVE::SysFSTools::pci_dev_group_bind_to_vfio($pciid);
diff --git a/src/PVE/QemuServer/PCI/Makefile b/src/PVE/QemuServer/PCI/Makefile
new file mode 100644
index 00000000..ecf37411
--- /dev/null
+++ b/src/PVE/QemuServer/PCI/Makefile
@@ -0,0 +1,9 @@
+DESTDIR=
+PREFIX=/usr
+PERLDIR=$(PREFIX)/share/perl5
+
+SOURCES=Mdev.pm
+
+.PHONY: install
+install: $(SOURCES)
+ for i in $(SOURCES); do install -D -m 0644 $$i $(DESTDIR)$(PERLDIR)/PVE/QemuServer/PCI/$$i; done
diff --git a/src/PVE/QemuServer/PCI/Mdev.pm b/src/PVE/QemuServer/PCI/Mdev.pm
new file mode 100644
index 00000000..3b42ce2d
--- /dev/null
+++ b/src/PVE/QemuServer/PCI/Mdev.pm
@@ -0,0 +1,145 @@
+package PVE::QemuServer::PCI::Mdev;
+
+use v5.36;
+
+use PVE::SysFSTools;
+use PVE::File qw(file_read_first_line dir_glob_foreach file_get_contents);
+
+my $pcisysfs = "/sys/bus/pci";
+
+sub generate_mdev_uuid($vmid, $index) {
+ return sprintf("%08d-0000-0000-0000-%012d", $index, $vmid);
+}
+
+#
+# return format:
+# [
+# {
+# type => 'FooType_1',
+# description => "a longer description with custom format\nand newlines",
+# available => 5,
+# },
+# ...
+# ]
+#
+sub get_mdev_types($id) {
+ $id = PVE::SysFSTools::normalize_pci_id($id);
+
+ my $types = [];
+
+ my $dev_path = "$pcisysfs/devices/$id";
+ my $mdev_path = "$dev_path/mdev_supported_types";
+ my $nvidia_path = "$dev_path/nvidia/creatable_vgpu_types";
+ if (-d $mdev_path) {
+ dir_glob_foreach(
+ $mdev_path,
+ '[^\.].*',
+ sub {
+ my ($type) = @_;
+
+ my $type_path = "$mdev_path/$type";
+
+ my $available = int(file_read_first_line("$type_path/available_instances"));
+ my $description = file_get_contents("$type_path/description");
+
+ my $entry = {
+ type => $type,
+ description => $description,
+ available => $available,
+ };
+
+ my $name = file_read_first_line("$type_path/name");
+ $entry->{name} = $name if defined($name);
+
+ push @$types, $entry;
+ },
+ );
+ } elsif (-f $nvidia_path) {
+ my $creatable = PVE::Tools::file_get_contents($nvidia_path);
+ for my $line (split("\n", $creatable)) {
+ next if $line =~ m/^ID/; # header
+ next if $line !~ m/^(.*?)\s*:\s*(.*)$/;
+ my $id = $1;
+ my $name = $2;
+
+ push $types->@*, {
+ type => "nvidia-$id", # backwards compatibility
+ description => "", # TODO, read from xml/nvidia-smi ?
+ available => 1,
+ name => $name,
+ };
+ }
+ }
+
+ return $types;
+}
+
+sub pci_create_mdev_device($pciid, $uuid, $type) {
+ $pciid = PVE::SysFSTools::normalize_pci_id($pciid);
+
+ my $basedir = "$pcisysfs/devices/$pciid";
+ my $mdev_dir = "$basedir/mdev_supported_types";
+
+ die "pci device '$pciid' does not support mediated devices \n"
+ if !-d $mdev_dir;
+
+ die "pci device '$pciid' has no type '$type'\n"
+ if !-d "$mdev_dir/$type";
+
+ if (-d "$basedir/$uuid") {
+ # it already exists, checking type
+ my $typelink = readlink("$basedir/$uuid/mdev_type");
+ my ($existingtype) = $typelink =~ m|/([^/]+)$|;
+ die "mdev instance '$uuid' already exists, but type is not '$type'\n"
+ if $type ne $existingtype;
+
+ # instance exists, so use it but warn the user
+ warn "mdev instance '$uuid' already existed, using it.\n";
+ return undef;
+ }
+
+ my $instances = file_read_first_line("$mdev_dir/$type/available_instances");
+ my ($avail) = $instances =~ m/^(\d+)$/;
+ die "pci device '$pciid' has no available instances of '$type'\n"
+ if $avail < 1;
+
+ die "could not create '$type' for pci devices '$pciid'\n"
+ if !PVE::SysFSTools::file_write("$mdev_dir/$type/create", $uuid);
+
+ return undef;
+}
+
+# set vgpu type of a vf of an nvidia gpu with kernel 6.8 or newer
+sub create_nvidia_device($id, $model) {
+ $id = PVE::SysFSTools::normalize_pci_id($id);
+
+ my $creation = "$pcisysfs/devices/$id/nvidia/current_vgpu_type";
+
+ die "no nvidia sysfs api for '$id'\n" if !-f $creation;
+
+ my $current = file_read_first_line($creation);
+ if ($current ne "0") {
+ return 1 if $current eq $model;
+ # reset vgpu type so we can see all available and set the real device
+ die "unable to reset vgpu type for '$id'\n" if !PVE::SysFSTools::file_write($creation, "0");
+ }
+
+ my $types = get_mdev_types($id);
+ my $selected;
+ for my $type_definition ($types->@*) {
+ next if $type_definition->{type} ne "nvidia-$model";
+ $selected = $type_definition;
+ }
+
+ if (!defined($selected) || $selected->{available} < 1) {
+ die "vgpu type '$model' not available for '$id'\n";
+ }
+
+ if (!PVE::SysFSTools::file_write($creation, $model)) {
+ die "could not set vgpu type to '$model' for '$id'\n";
+ }
+
+ return 1;
+}
+
+1;
--
2.47.3
^ permalink raw reply [flat|nested] 6+ messages in thread* [PATCH qemu-server v2 2/2] pci: mdev: use PVE::RS::NVML for nvidia mdev information
2026-04-02 11:21 [PATCH common/manager/proxmox-perl-rs/qemu-server v2 0/5] use NVML for vGPU info querying Dominik Csapak
2026-04-02 11:22 ` [PATCH proxmox-perl-rs v2 1/1] pve: add binding for accessing vgpu info Dominik Csapak
2026-04-02 11:22 ` [PATCH qemu-server v2 1/2] pci: move mdev related code to own module Dominik Csapak
@ 2026-04-02 11:22 ` Dominik Csapak
2026-04-02 11:22 ` [PATCH manager v2 1/1] api: hardware: pci: use NVML for querying " Dominik Csapak
2026-04-02 11:22 ` [PATCH common v2 1/1] sysfs tools: remove moved code Dominik Csapak
4 siblings, 0 replies; 6+ messages in thread
From: Dominik Csapak @ 2026-04-02 11:22 UTC (permalink / raw)
To: pve-devel
This gets us the missing description that used to be in sysfs.
In case this is an SR-IOV virtual function, we have to get the physical
device first since only that is a valid device for querying with NVML.
'pci_dev_physfn_id' is only used here currently so it's a local sub, but
if we need it in more places, a good place could be 'PVE::SysFSTools' or
'PVE::QemuServer::PCI'.
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
---
NOTE: needs a bumped libpve-rs-perl dependency in d/control
src/PVE/QemuServer/PCI/Mdev.pm | 45 ++++++++++++++++++++++++----------
1 file changed, 32 insertions(+), 13 deletions(-)
diff --git a/src/PVE/QemuServer/PCI/Mdev.pm b/src/PVE/QemuServer/PCI/Mdev.pm
index 3b42ce2d..51dca474 100644
--- a/src/PVE/QemuServer/PCI/Mdev.pm
+++ b/src/PVE/QemuServer/PCI/Mdev.pm
@@ -2,11 +2,28 @@ package PVE::QemuServer::PCI::Mdev;
use v5.36;
+use File::Basename;
+
+use PVE::RS::NVML;
use PVE::SysFSTools;
use PVE::File qw(file_read_first_line dir_glob_foreach file_get_contents);
my $pcisysfs = "/sys/bus/pci";
+# Returns the PCI bus id of the physical function (IOW, parent device) of the
+# given device. If the device does not have a parent physical function, returns
+# the given ID unchanged.
+my sub pci_dev_physfn_id($id) {
+ $id = PVE::SysFSTools::normalize_pci_id($id);
+ my $devpath = "$pcisysfs/devices/$id";
+
+ if (-d "$devpath/physfn") {
+ return basename(readlink("$devpath/physfn"));
+ } else {
+ return $id;
+ }
+}
+
sub generate_mdev_uuid($vmid, $index) {
return sprintf("%08d-0000-0000-0000-%012d", $index, $vmid);
}
@@ -18,6 +35,7 @@ sub generate_mdev_uuid($vmid, $index) {
# type => 'FooType_1',
# description => "a longer description with custom format\nand newlines",
# available => 5,
+# name => "human readable name for the type",
# },
# ...
# ]
@@ -55,19 +73,20 @@ sub get_mdev_types($id) {
},
);
} elsif (-f $nvidia_path) {
- my $creatable = PVE::Tools::file_get_contents($nvidia_path);
- for my $line (split("\n", $creatable)) {
- next if $line =~ m/^ID/; # header
- next if $line !~ m/^(.*?)\s*:\s*(.*)$/;
- my $id = $1;
- my $name = $2;
-
- push $types->@*, {
- type => "nvidia-$id", # backwards compatibility
- description => "", # TODO, read from xml/nvidia-smi ?
- available => 1,
- name => $name,
- };
+ my $physfn = pci_dev_physfn_id($id);
+ my $creatable = eval { PVE::RS::NVML::creatable_vgpu_types_for_dev($physfn) };
+ die "failed to query NVIDIA vGPU types for $id - $@\n" if $@;
+
+ for my $type ($creatable->@*) {
+ my $nvidia_id = $type->{id};
+ my $name = $type->{name};
+ push $types->@*,
+ {
+ type => "nvidia-$nvidia_id",
+ description => $type->{description},
+ available => 1,
+ name => $name,
+ };
}
}
--
2.47.3
^ permalink raw reply [flat|nested] 6+ messages in thread* [PATCH manager v2 1/1] api: hardware: pci: use NVML for querying mdev information
2026-04-02 11:21 [PATCH common/manager/proxmox-perl-rs/qemu-server v2 0/5] use NVML for vGPU info querying Dominik Csapak
` (2 preceding siblings ...)
2026-04-02 11:22 ` [PATCH qemu-server v2 2/2] pci: mdev: use PVE::RS::NVML for nvidia mdev information Dominik Csapak
@ 2026-04-02 11:22 ` Dominik Csapak
2026-04-02 11:22 ` [PATCH common v2 1/1] sysfs tools: remove moved code Dominik Csapak
4 siblings, 0 replies; 6+ messages in thread
From: Dominik Csapak @ 2026-04-02 11:22 UTC (permalink / raw)
To: pve-devel
By using the new functions in PVE::QemuServer::PCI::Mdev instead of
SysFSTools.
This uses now NVML for NVIDIA devices where we can get more information
(namely the info we previously got from the standard mdev interface).
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
---
NOTE: needs a bumped qemu-server version dependency in d/control
PVE/API2/Hardware/PCI.pm | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/PVE/API2/Hardware/PCI.pm b/PVE/API2/Hardware/PCI.pm
index 984bb2ee..36b9741b 100644
--- a/PVE/API2/Hardware/PCI.pm
+++ b/PVE/API2/Hardware/PCI.pm
@@ -5,8 +5,8 @@ use warnings;
use PVE::JSONSchema qw(get_standard_option);
+use PVE::QemuServer::PCI::Mdev;
use PVE::RESTHandler;
-use PVE::SysFSTools;
use base qw(PVE::RESTHandler);
@@ -225,7 +225,7 @@ __PACKAGE__->register_method({
if ($param->{'pci-id-or-mapping'} =~
m/^(?:[0-9a-fA-F]{4}:)?[0-9a-fA-F]{2}:[0-9a-fA-F]{2}\.[0-9a-fA-F]$/
) {
- return PVE::SysFSTools::get_mdev_types($param->{'pci-id-or-mapping'}); # PCI ID
+ return PVE::QemuServer::PCI::Mdev::get_mdev_types($param->{'pci-id-or-mapping'}); # PCI ID
} else {
my $mapping = $param->{'pci-id-or-mapping'};
@@ -235,7 +235,7 @@ __PACKAGE__->register_method({
my $id = $device->{path};
next if $id =~ m/;/; # mdev not supported for multifunction devices
- my $device_types = PVE::SysFSTools::get_mdev_types($id);
+ my $device_types = PVE::QemuServer::PCI::Mdev::get_mdev_types($id);
for my $type_definition ($device_types->@*) {
my $type = $type_definition->{type};
--
2.47.3
^ permalink raw reply [flat|nested] 6+ messages in thread* [PATCH common v2 1/1] sysfs tools: remove moved code
2026-04-02 11:21 [PATCH common/manager/proxmox-perl-rs/qemu-server v2 0/5] use NVML for vGPU info querying Dominik Csapak
` (3 preceding siblings ...)
2026-04-02 11:22 ` [PATCH manager v2 1/1] api: hardware: pci: use NVML for querying " Dominik Csapak
@ 2026-04-02 11:22 ` Dominik Csapak
4 siblings, 0 replies; 6+ messages in thread
From: Dominik Csapak @ 2026-04-02 11:22 UTC (permalink / raw)
To: pve-devel
these functions now live in PVE::QemuServer::PCI::Mdev instead.
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
---
NOTE: would need a 'breaks' for older qemu-server/pve-manager versions
in d/control if applied now.
src/PVE/SysFSTools.pm | 111 ------------------------------------------
1 file changed, 111 deletions(-)
diff --git a/src/PVE/SysFSTools.pm b/src/PVE/SysFSTools.pm
index a00fbcb..f689a40 100644
--- a/src/PVE/SysFSTools.pm
+++ b/src/PVE/SysFSTools.pm
@@ -150,71 +150,6 @@ sub lspci {
return $devices;
}
-#
-# return format:
-# [
-# {
-# type => 'FooType_1',
-# description => "a longer description with custom format\nand newlines",
-# available => 5,
-# },
-# ...
-# ]
-#
-sub get_mdev_types {
- my ($id) = @_;
-
- $id = normalize_pci_id($id);
-
- my $types = [];
-
- my $dev_path = "$pcisysfs/devices/$id";
- my $mdev_path = "$dev_path/mdev_supported_types";
- my $nvidia_path = "$dev_path/nvidia/creatable_vgpu_types";
- if (-d $mdev_path) {
- dir_glob_foreach(
- $mdev_path,
- '[^\.].*',
- sub {
- my ($type) = @_;
-
- my $type_path = "$mdev_path/$type";
-
- my $available = int(file_read_firstline("$type_path/available_instances"));
- my $description = PVE::Tools::file_get_contents("$type_path/description");
-
- my $entry = {
- type => $type,
- description => $description,
- available => $available,
- };
-
- my $name = file_read_firstline("$type_path/name");
- $entry->{name} = $name if defined($name);
-
- push @$types, $entry;
- },
- );
- } elsif (-f $nvidia_path) {
- my $creatable = PVE::Tools::file_get_contents($nvidia_path);
- for my $line (split("\n", $creatable)) {
- next if $line =~ m/^ID/; # header
- next if $line !~ m/^(.*?)\s*:\s*(.*)$/;
- my $id = $1;
- my $name = $2;
-
- push $types->@*, {
- type => "nvidia-$id", # backwards compatibility
- description => "", # TODO, read from xml/nvidia-smi ?
- available => 1,
- name => $name,
- };
- }
- }
-
- return $types;
-}
-
sub check_iommu_support {
# we have IOMMU support if /sys/class/iommu/ is populated
return PVE::Tools::dir_glob_regex('/sys/class/iommu/', "[^\.].*");
@@ -372,52 +307,6 @@ sub pci_dev_group_bind_to_vfio {
return 1;
}
-sub pci_create_mdev_device {
- my ($pciid, $uuid, $type) = @_;
-
- $pciid = normalize_pci_id($pciid);
-
- my $basedir = "$pcisysfs/devices/$pciid";
- my $mdev_dir = "$basedir/mdev_supported_types";
-
- die "pci device '$pciid' does not support mediated devices \n"
- if !-d $mdev_dir;
-
- die "pci device '$pciid' has no type '$type'\n"
- if !-d "$mdev_dir/$type";
-
- if (-d "$basedir/$uuid") {
- # it already exists, checking type
- my $typelink = readlink("$basedir/$uuid/mdev_type");
- my ($existingtype) = $typelink =~ m|/([^/]+)$|;
- die "mdev instance '$uuid' already exits, but type is not '$type'\n"
- if $type ne $existingtype;
-
- # instance exists, so use it but warn the user
- warn "mdev instance '$uuid' already existed, using it.\n";
- return undef;
- }
-
- my $instances = file_read_firstline("$mdev_dir/$type/available_instances");
- my ($avail) = $instances =~ m/^(\d+)$/;
- die "pci device '$pciid' has no available instances of '$type'\n"
- if $avail < 1;
-
- die "could not create 'type' for pci devices '$pciid'\n"
- if !file_write("$mdev_dir/$type/create", $uuid);
-
- return undef;
-}
-
-# encode the hostpci index and vmid into the uuid
-sub generate_mdev_uuid {
- my ($vmid, $index) = @_;
-
- my $string = sprintf("%08d-0000-0000-0000-%012d", $index, $vmid);
-
- return $string;
-}
-
# idea is from usbutils package (/usr/bin/usb-devices) script
sub __scan_usb_device {
my ($res, $devpath, $parent, $level) = @_;
--
2.47.3
^ permalink raw reply [flat|nested] 6+ messages in thread