From: Aaron Lauterer <a.lauterer@proxmox.com>
To: pve-devel@lists.proxmox.com
Subject: [pve-devel] [PATCH manager 2/5] pvestatd: collect and distribute new pve9- metrics
Date: Fri, 23 May 2025 18:37:40 +0200 [thread overview]
Message-ID: <20250523163749.428293-5-a.lauterer@proxmox.com> (raw)
In-Reply-To: <20250523163749.428293-1-a.lauterer@proxmox.com>
If we see that the migration to the new pve9- rrd format has been done
or is ongoing (new dir exists), we collect and send out the new format with additional
columns for nodes and VMs (guests).
Those are:
Nodes:
* memfree
* membuffers
* memcached
* arcsize
* pressures:
* cpu some
* io some
* io full
* mem some
* mem full
VMs:
* memhost (memory consumption of all processes in the guests cgroup, host view)
* pressures:
* cpu some
* cpu full
* io some
* io full
* mem some
* mem full
Signed-off-by: Aaron Lauterer <a.lauterer@proxmox.com>
---
Notes:
this will automatically send the additional columns to the metric
servers as well. Not sure if that is okay or could be a problem that we
need to address or at least mention in the release notes.
PVE/Service/pvestatd.pm | 128 +++++++++++++++++++++++++++++++---------
1 file changed, 100 insertions(+), 28 deletions(-)
diff --git a/PVE/Service/pvestatd.pm b/PVE/Service/pvestatd.pm
index d80c62da..eb4352fa 100755
--- a/PVE/Service/pvestatd.pm
+++ b/PVE/Service/pvestatd.pm
@@ -82,6 +82,16 @@ my $cached_kvm_version = '';
my $next_flag_update_time;
my $failed_flag_update_delay_sec = 120;
+# Checks if RRD files exist in the specified location.
+my $rrd_dir_exists = sub{
+ my ($location) = @_;
+ if (-d "/var/lib/rrdcached/db/${location}") {
+ return 1;
+ } else {
+ return 0;
+ }
+};
+
sub update_supported_cpuflags {
my $kvm_version = PVE::QemuServer::kvm_user_version();
@@ -173,19 +183,38 @@ sub update_node_status {
my $meminfo = PVE::ProcFSTools::read_meminfo();
+ my $pressures = PVE::ProcFSTools::read_pressure();
+
my $dinfo = df('/', 1); # output is bytes
# everything not free is considered to be used
my $dused = $dinfo->{blocks} - $dinfo->{bfree};
my $ctime = time();
- my $data = $generate_rrd_string->(
- [$uptime, $sublevel, $ctime, $avg1, $maxcpu, $stat->{cpu}, $stat->{wait},
- $meminfo->{memtotal}, $meminfo->{memused},
- $meminfo->{swaptotal}, $meminfo->{swapused},
- $dinfo->{blocks}, $dused, $netin, $netout]
- );
- PVE::Cluster::broadcast_rrd("pve2-node/$nodename", $data);
+ my $data;
+ # TODO: switch fully to pve9-node
+ if ($rrd_dir_exists->("pve9-node")) {
+ $data = $generate_rrd_string->(
+ [$uptime, $sublevel, $ctime, $avg1, $maxcpu, $stat->{cpu},
+ $stat->{wait}, $meminfo->{memtotal}, $meminfo->{memused},
+ $meminfo->{swaptotal}, $meminfo->{swapused}, $dinfo->{blocks},
+ $dused, $netin, $netout, $meminfo->{memavailable},
+ $meminfo->{buffers}, $meminfo->{cached}, $meminfo->{arcsize},
+ $pressures->{cpu}{some}{avg10}, $pressures->{io}{some}{avg10},
+ $pressures->{io}{full}{avg10},
+ $pressures->{memory}{some}{avg10},
+ $pressures->{memory}{full}{avg10}]
+ );
+ PVE::Cluster::broadcast_rrd("pve9-node/$nodename", $data);
+ } else {
+ $data = $generate_rrd_string->(
+ [$uptime, $sublevel, $ctime, $avg1, $maxcpu, $stat->{cpu}, $stat->{wait},
+ $meminfo->{memtotal}, $meminfo->{memused},
+ $meminfo->{swaptotal}, $meminfo->{swapused},
+ $dinfo->{blocks}, $dused, $netin, $netout]
+ );
+ PVE::Cluster::broadcast_rrd("pve2-node/$nodename", $data);
+ }
my $node_metric = {
uptime => $uptime,
@@ -252,17 +281,39 @@ sub update_qemu_status {
my $data;
my $status = $d->{qmpstatus} || $d->{status} || 'stopped';
my $template = $d->{template} ? $d->{template} : "0";
- if ($d->{pid}) { # running
- $data = $generate_rrd_string->(
- [$d->{uptime}, $d->{name}, $status, $template, $ctime, $d->{cpus}, $d->{cpu},
- $d->{maxmem}, $d->{mem}, $d->{maxdisk}, $d->{disk},
- $d->{netin}, $d->{netout}, $d->{diskread}, $d->{diskwrite}]);
+
+ # TODO: switch fully to pve9-vm
+ if ($rrd_dir_exists->("pve9-vm")) {
+ if ($d->{pid}) { # running
+ $data = $generate_rrd_string->(
+ [$d->{uptime}, $d->{name}, $status, $template, $ctime,
+ $d->{cpus}, $d->{cpu}, $d->{maxmem}, $d->{mem},
+ $d->{maxdisk}, $d->{disk}, $d->{netin}, $d->{netout},
+ $d->{diskread}, $d->{diskwrite},$d->{memhost},
+ $d->{pressurecpusome}, $d->{pressurecpufull},
+ $d->{pressureiosome}, $d->{pressureiofull},
+ $d->{pressurememorysome}, $d->{pressurememoryfull}]);
+ } else {
+ $data = $generate_rrd_string->(
+ [0, $d->{name}, $status, $template, $ctime, $d->{cpus},
+ undef, $d->{maxmem}, undef, $d->{maxdisk}, $d->{disk},
+ undef, undef, undef, undef,undef, undef, undef, undef,
+ undef, undef, undef]);
+ }
+ PVE::Cluster::broadcast_rrd("pve9-vm/$vmid", $data);
} else {
- $data = $generate_rrd_string->(
- [0, $d->{name}, $status, $template, $ctime, $d->{cpus}, undef,
- $d->{maxmem}, undef, $d->{maxdisk}, $d->{disk}, undef, undef, undef, undef]);
+ if ($d->{pid}) { # running
+ $data = $generate_rrd_string->(
+ [$d->{uptime}, $d->{name}, $status, $template, $ctime, $d->{cpus}, $d->{cpu},
+ $d->{maxmem}, $d->{mem}, $d->{maxdisk}, $d->{disk},
+ $d->{netin}, $d->{netout}, $d->{diskread}, $d->{diskwrite}]);
+ } else {
+ $data = $generate_rrd_string->(
+ [0, $d->{name}, $status, $template, $ctime, $d->{cpus}, undef,
+ $d->{maxmem}, undef, $d->{maxdisk}, $d->{disk}, undef, undef, undef, undef]);
+ }
+ PVE::Cluster::broadcast_rrd("pve2.3-vm/$vmid", $data);
}
- PVE::Cluster::broadcast_rrd("pve2.3-vm/$vmid", $data);
PVE::ExtMetric::update_all($transactions, 'qemu', $vmid, $d, $ctime, $nodename);
}
@@ -460,20 +511,40 @@ sub update_lxc_status {
my $d = $vmstatus->{$vmid};
my $template = $d->{template} ? $d->{template} : "0";
my $data;
- if ($d->{status} eq 'running') { # running
- $data = $generate_rrd_string->(
- [$d->{uptime}, $d->{name}, $d->{status}, $template,
- $ctime, $d->{cpus}, $d->{cpu},
- $d->{maxmem}, $d->{mem},
- $d->{maxdisk}, $d->{disk},
- $d->{netin}, $d->{netout},
- $d->{diskread}, $d->{diskwrite}]);
+ if ($rrd_dir_exists->("pve9-vm")) {
+ if ($d->{pid}) { # running
+ $data = $generate_rrd_string->(
+ [$d->{uptime}, $d->{name}, $d->{status}, $template, $ctime,
+ $d->{cpus}, $d->{cpu}, $d->{maxmem}, $d->{mem},
+ $d->{maxdisk}, $d->{disk}, $d->{netin}, $d->{netout},
+ $d->{diskread}, $d->{diskwrite}, undef,
+ $d->{pressurecpusome}, $d->{pressurecpufull},
+ $d->{pressureiosome}, $d->{pressureiofull},
+ $d->{pressurememorysome}, $d->{pressurememoryfull}]);
+ } else {
+ $data = $generate_rrd_string->(
+ [0, $d->{name}, $d->{status}, $template, $ctime,
+ $d->{cpus}, undef, $d->{maxmem}, undef, $d->{maxdisk},
+ $d->{disk}, undef, undef, undef, undef, undef, undef,
+ undef, undef, undef, undef, undef]);
+ }
+ PVE::Cluster::broadcast_rrd("pve9-vm/$vmid", $data);
} else {
- $data = $generate_rrd_string->(
- [0, $d->{name}, $d->{status}, $template, $ctime, $d->{cpus}, undef,
- $d->{maxmem}, undef, $d->{maxdisk}, $d->{disk}, undef, undef, undef, undef]);
+ if ($d->{status} eq 'running') { # running
+ $data = $generate_rrd_string->(
+ [$d->{uptime}, $d->{name}, $d->{status}, $template,
+ $ctime, $d->{cpus}, $d->{cpu},
+ $d->{maxmem}, $d->{mem},
+ $d->{maxdisk}, $d->{disk},
+ $d->{netin}, $d->{netout},
+ $d->{diskread}, $d->{diskwrite}]);
+ } else {
+ $data = $generate_rrd_string->(
+ [0, $d->{name}, $d->{status}, $template, $ctime, $d->{cpus}, undef,
+ $d->{maxmem}, undef, $d->{maxdisk}, $d->{disk}, undef, undef, undef, undef]);
+ }
+ PVE::Cluster::broadcast_rrd("pve2.3-vm/$vmid", $data);
}
- PVE::Cluster::broadcast_rrd("pve2.3-vm/$vmid", $data);
PVE::ExtMetric::update_all($transactions, 'lxc', $vmid, $d, $ctime, $nodename);
}
@@ -498,6 +569,7 @@ sub update_storage_status {
my $data = $generate_rrd_string->([$ctime, $d->{total}, $d->{used}]);
my $key = "pve2-storage/${nodename}/$storeid";
+ $key = "pve9-storage/${nodename}/$storeid" if $rrd_dir_exists->("pve9-storage");
PVE::Cluster::broadcast_rrd($key, $data);
PVE::ExtMetric::update_all($transactions, 'storage', $nodename, $storeid, $d, $ctime);
--
2.39.5
_______________________________________________
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel
next prev parent reply other threads:[~2025-05-23 16:38 UTC|newest]
Thread overview: 29+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-05-23 16:00 [pve-devel] [RFC cluster/common/container/manager/pve9-rrd-migration-tool/qemu-server/storage 00/19] Expand and migrate RRD data Aaron Lauterer
2025-05-23 16:00 ` [pve-devel] [PATCH cluster-pve8 1/2] cfs status.c: drop old pve2-vm rrd schema support Aaron Lauterer
2025-05-23 16:00 ` [pve-devel] [PATCH cluster-pve8 2/2] status: handle new pve9- metrics update data Aaron Lauterer
2025-05-23 16:35 ` Aaron Lauterer
2025-06-02 13:31 ` Thomas Lamprecht
2025-06-11 14:18 ` Aaron Lauterer
2025-05-23 16:00 ` [pve-devel] [PATCH pve9-rrd-migration-tool 1/1] introduce rrd migration tool for pve8 -> pve9 Aaron Lauterer
2025-05-23 16:00 ` [pve-devel] [PATCH cluster 1/1] status: introduce new pve9- rrd and metric format Aaron Lauterer
2025-05-23 16:37 ` [pve-devel] [PATCH common 1/4] fix error in pressure parsing Aaron Lauterer
2025-05-23 16:37 ` [pve-devel] [PATCH common 2/4] add functions to retrieve pressures for vm/ct Aaron Lauterer
2025-05-23 16:37 ` [pve-devel] [PATCH common 3/4] add helper to fetch value from smaps_rollup for pid Aaron Lauterer
2025-06-02 14:11 ` Thomas Lamprecht
2025-05-23 16:37 ` [pve-devel] [PATCH common 4/4] metrics: add buffer and cache to meminfo Aaron Lauterer
2025-06-02 14:07 ` Thomas Lamprecht
2025-06-11 15:17 ` Aaron Lauterer
2025-05-23 16:37 ` [pve-devel] [PATCH manager 1/5] api2tools: drop old VM rrd schema Aaron Lauterer
2025-05-23 16:37 ` Aaron Lauterer [this message]
2025-05-23 16:37 ` [pve-devel] [PATCH manager 3/5] api: nodes: rrd and rrddata fetch from new pve9-node rrd files if present Aaron Lauterer
2025-05-23 16:37 ` [pve-devel] [PATCH manager 4/5] api2tools: extract stats: handle existence of new pve9- data Aaron Lauterer
2025-05-23 16:37 ` [pve-devel] [PATCH manager 5/5] ui: rrdmodels: add new columns Aaron Lauterer
2025-05-23 16:37 ` [pve-devel] [PATCH storage 1/1] status: rrddata: use new pve9 rrd location if file is present Aaron Lauterer
2025-05-23 16:37 ` [pve-devel] [PATCH qemu-server 1/4] metrics: add pressure to metrics Aaron Lauterer
2025-05-23 16:37 ` [pve-devel] [PATCH qemu-server 2/4] vmstatus: add memhost for host view of vm mem consumption Aaron Lauterer
2025-05-23 16:37 ` [pve-devel] [PATCH qemu-server 3/4] vmstatus: switch mem stat to PSS of VM cgroup Aaron Lauterer
2025-05-23 16:37 ` [pve-devel] [PATCH qemu-server 4/4] rrddata: use new pve9 rrd location if file is present Aaron Lauterer
2025-05-23 16:37 ` [pve-devel] [PATCH container 1/1] " Aaron Lauterer
2025-06-02 14:39 ` [pve-devel] [PATCH common 2/4] add functions to retrieve pressures for vm/ct Thomas Lamprecht
2025-05-26 11:52 ` [pve-devel] [RFC cluster/common/container/manager/pve9-rrd-migration-tool/qemu-server/storage 00/19] Expand and migrate RRD data DERUMIER, Alexandre via pve-devel
2025-07-09 11:26 ` Aaron Lauterer
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250523163749.428293-5-a.lauterer@proxmox.com \
--to=a.lauterer@proxmox.com \
--cc=pve-devel@lists.proxmox.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.