* [PATCH datacenter-manager v5 1/6] metric collection: collect PDM host metrics in a new collection task
2026-04-24 11:34 [PATCH datacenter-manager v5 0/6] metric collection for the PDM host Lukas Wagner
@ 2026-04-24 11:34 ` Lukas Wagner
2026-04-24 11:34 ` [PATCH datacenter-manager v5 2/6] api: fix /nodes/localhost/rrddata endpoint Lukas Wagner
` (4 subsequent siblings)
5 siblings, 0 replies; 7+ messages in thread
From: Lukas Wagner @ 2026-04-24 11:34 UTC (permalink / raw)
To: pdm-devel
The whole architecture is pretty similar to the remote metric
collection. We introduce a task that fetches host metrics and sends them
via a channel to the RRD task, which is responsible for persisting them
in the RRD database.
Signed-off-by: Lukas Wagner <l.wagner@proxmox.com>
Reviewed-by: Arthur Bied-Charreton <a.bied-charreton@proxmox.com>
Reviewed-by: Michael Köppl <m.koeppl@proxmox.com>
Tested-by: Arthur Bied-Charreton <a.bied-charreton@proxmox.com>
Tested-by: Michael Köppl <m.koeppl@proxmox.com>
---
Cargo.toml | 2 +
debian/control | 2 +
server/Cargo.toml | 2 +
.../local_collection_task.rs | 199 ++++++++++++++++++
server/src/metric_collection/mod.rs | 21 +-
server/src/metric_collection/rrd_task.rs | 185 ++++++++++++++++
6 files changed, 406 insertions(+), 5 deletions(-)
create mode 100644 server/src/metric_collection/local_collection_task.rs
diff --git a/Cargo.toml b/Cargo.toml
index 0f784c02..3fa32ff3 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -39,6 +39,7 @@ proxmox-auth-api = "1.0.5"
proxmox-base64 = "1"
proxmox-client = "1"
proxmox-daemon = "1"
+proxmox-disks = "0.2"
proxmox-docgen = "1"
proxmox-http = { version = "1.0.4", features = [ "client", "http-helpers", "websocket" ] } # see below
proxmox-human-byte = "1"
@@ -47,6 +48,7 @@ proxmox-ldap = { version = "1.1", features = ["sync"] }
proxmox-lang = "1.1"
proxmox-log = "1"
proxmox-login = "1.0.2"
+proxmox-procfs = "0.1"
proxmox-rest-server = "1"
# some use "cli", some use "cli" and "server", pbs-config uses nothing
proxmox-router = { version = "3.0.0", default-features = false }
diff --git a/debian/control b/debian/control
index 3f14090e..f9ea6302 100644
--- a/debian/control
+++ b/debian/control
@@ -52,6 +52,7 @@ Build-Depends: debhelper-compat (= 13),
librust-proxmox-config-digest-1+default-dev,
librust-proxmox-config-digest-1+openssl-dev,
librust-proxmox-daemon-1+default-dev,
+ librust-proxmox-disks-0.1+default-dev,
librust-proxmox-dns-api-1+default-dev,
librust-proxmox-dns-api-1+impl-dev,
librust-proxmox-docgen-1+default-dev,
@@ -72,6 +73,7 @@ Build-Depends: debhelper-compat (= 13),
librust-proxmox-network-api-1+impl-dev,
librust-proxmox-node-status-1+api-dev,
librust-proxmox-openid-1+default-dev (>= 1.0.2-~~),
+ librust-proxmox-procfs-0.1+default-dev,
librust-proxmox-product-config-1+default-dev,
librust-proxmox-rest-server-1+default-dev,
librust-proxmox-rest-server-1+templates-dev,
diff --git a/server/Cargo.toml b/server/Cargo.toml
index 0d6371d1..e6845989 100644
--- a/server/Cargo.toml
+++ b/server/Cargo.toml
@@ -37,6 +37,7 @@ proxmox-async.workspace = true
proxmox-auth-api = { workspace = true, features = [ "api", "ticket", "pam-authenticator", "password-authenticator" ] }
proxmox-base64.workspace = true
proxmox-daemon.workspace = true
+proxmox-disks.workspace = true
proxmox-docgen.workspace = true
proxmox-http = { workspace = true, features = [ "client-trait", "proxmox-async" ] } # pbs-client doesn't use these
proxmox-lang.workspace = true
@@ -44,6 +45,7 @@ proxmox-ldap.workspace = true
proxmox-log.workspace = true
proxmox-login.workspace = true
proxmox-openid.workspace = true
+proxmox-procfs.workspace = true
proxmox-rest-server = { workspace = true, features = [ "templates" ] }
proxmox-router = { workspace = true, features = [ "cli", "server"] }
proxmox-rrd.workspace = true
diff --git a/server/src/metric_collection/local_collection_task.rs b/server/src/metric_collection/local_collection_task.rs
new file mode 100644
index 00000000..034b51a3
--- /dev/null
+++ b/server/src/metric_collection/local_collection_task.rs
@@ -0,0 +1,199 @@
+use std::sync::Mutex;
+use std::time::Instant;
+use std::{collections::HashMap, time::Duration};
+
+use anyhow::{Context, Error};
+use tokio::{sync::mpsc::Sender, time::MissedTickBehavior};
+
+use proxmox_disks::Disks;
+use proxmox_log::{debug, error};
+use proxmox_network_api::IpLink;
+use proxmox_procfs::pressure::{PressureData, Resource};
+use proxmox_sys::fs;
+use proxmox_sys::linux::procfs;
+
+use super::rrd_task::RrdStoreRequest;
+
+const HOST_METRIC_COLLECTION_INTERVAL: Duration = Duration::from_secs(10);
+
+/// Task which periodically collects metrics from the PDM host and stores
+/// them in the local metrics database.
+pub(super) struct LocalMetricCollectionTask {
+ metric_data_tx: Sender<RrdStoreRequest>,
+}
+
+impl LocalMetricCollectionTask {
+ /// Create a new metric collection task.
+ pub(super) fn new(metric_data_tx: Sender<RrdStoreRequest>) -> Self {
+ Self { metric_data_tx }
+ }
+
+ /// Run the metric collection task.
+ ///
+ /// This function never returns.
+ pub(super) async fn run(&mut self) {
+ let mut timer = tokio::time::interval(HOST_METRIC_COLLECTION_INTERVAL);
+ timer.set_missed_tick_behavior(MissedTickBehavior::Skip);
+
+ loop {
+ timer.tick().await;
+ self.handle_tick().await;
+ }
+ }
+
+ /// Handle a timer tick.
+ async fn handle_tick(&mut self) {
+ let stats = match tokio::task::spawn_blocking(collect_host_metrics).await {
+ Ok(stats) => stats,
+ Err(err) => {
+ error!("join error while collecting host stats: {err}");
+ return;
+ }
+ };
+
+ let _ = self
+ .metric_data_tx
+ .send(RrdStoreRequest::Host {
+ timestamp: proxmox_time::epoch_i64(),
+ metrics: Box::new(stats),
+ })
+ .await;
+ }
+}
+
+/// Container type for various metrics of a PDM host.
+pub(super) struct PdmHostMetrics {
+ /// CPU statistics from `/proc/stat`.
+ pub proc: Option<procfs::ProcFsStat>,
+ /// Memory statistics from `/proc/meminfo`.
+ pub meminfo: Option<procfs::ProcFsMemInfo>,
+ /// System load stats from `/proc/loadavg`.
+ pub load: Option<procfs::Loadavg>,
+ /// Aggregated network device traffic for all physical NICs.
+ pub netstats: Option<NetDevStats>,
+ /// Block device stats for the root disk.
+ pub root_blockdev_stat: Option<proxmox_disks::BlockDevStat>,
+ /// File system usage for the root disk.
+ pub root_filesystem_info: Option<fs::FileSystemInformation>,
+ /// CPU pressure stall information for the host.
+ pub cpu_pressure: Option<PressureData>,
+ /// CPU pressure stall information for the host.
+ pub memory_pressure: Option<PressureData>,
+ /// IO pressure stall information for the host.
+ pub io_pressure: Option<PressureData>,
+}
+
+/// Aggregated network device traffic for all physical NICs.
+pub(super) struct NetDevStats {
+ /// Aggregate inbound traffic over all physical NICs in bytes.
+ pub netin: u64,
+ /// Aggregate outbound traffic over all physical NICs in bytes.
+ pub netout: u64,
+}
+
+fn collect_host_metrics() -> PdmHostMetrics {
+ let proc = procfs::read_proc_stat()
+ .inspect_err(|err| error!("failed to read '/proc/stat': {err:#}"))
+ .ok();
+
+ let meminfo = procfs::read_meminfo()
+ .inspect_err(|err| error!("failed to read '/proc/meminfo': {err:#}"))
+ .ok();
+
+ let cpu_pressure = PressureData::read_system(Resource::Cpu)
+ .inspect_err(|err| error!("failed to read CPU pressure stall information: {err:#}"))
+ .ok();
+
+ let memory_pressure = PressureData::read_system(Resource::Memory)
+ .inspect_err(|err| error!("failed to read memory pressure stall information: {err:#}"))
+ .ok();
+
+ let io_pressure = PressureData::read_system(Resource::Io)
+ .inspect_err(|err| error!("failed to read IO pressure stall information: {err:#}"))
+ .ok();
+
+ let load = procfs::read_loadavg()
+ .inspect_err(|err| error!("failed to read '/proc/loadavg': {err:#}"))
+ .ok();
+
+ let root_blockdev_stat = Disks::new()
+ .blockdev_stat_for_path("/")
+ .inspect_err(|err| error!("failed to collect blockdev statistics for '/': {err:#}"))
+ .ok();
+
+ let root_filesystem_info = proxmox_sys::fs::fs_info("/")
+ .inspect_err(|err| {
+ error!("failed to query filesystem usage for '/': {err:#}");
+ })
+ .ok();
+
+ let netstats = collect_netdev_metrics()
+ .inspect_err(|err| {
+ error!("failed to collect network device statistics: {err:#}");
+ })
+ .ok();
+
+ PdmHostMetrics {
+ proc,
+ meminfo,
+ load,
+ netstats,
+ root_blockdev_stat,
+ root_filesystem_info,
+ cpu_pressure,
+ memory_pressure,
+ io_pressure,
+ }
+}
+
+struct NetdevCacheEntry {
+ interfaces: HashMap<String, IpLink>,
+ timestamp: Instant,
+}
+
+const NETWORK_INTERFACE_CACHE_MAX_AGE: Duration = Duration::from_secs(300);
+static NETWORK_INTERFACE_CACHE: Mutex<Option<NetdevCacheEntry>> = Mutex::new(None);
+
+fn collect_netdev_metrics() -> Result<NetDevStats, Error> {
+ let net_devs = procfs::read_proc_net_dev()?;
+
+ let mut cache = NETWORK_INTERFACE_CACHE.lock().unwrap();
+
+ let now = Instant::now();
+
+ let needs_refresh = match cache.as_ref() {
+ Some(entry) => now.duration_since(entry.timestamp) > NETWORK_INTERFACE_CACHE_MAX_AGE,
+ None => true,
+ };
+
+ if needs_refresh {
+ cache.replace({
+ debug!("updating cached network devices");
+
+ let interfaces = proxmox_network_api::get_network_interfaces()
+ .context("failed to enumerate network devices")?;
+
+ NetdevCacheEntry {
+ interfaces,
+ timestamp: now,
+ }
+ });
+ }
+
+ // unwrap: at this point we *know* that the Option is Some
+ let ip_links = cache.as_ref().unwrap();
+
+ let mut netin = 0;
+ let mut netout = 0;
+
+ for net_dev in net_devs {
+ if let Some(ip_link) = ip_links.interfaces.get(&net_dev.device) {
+ if ip_link.is_physical() {
+ netin += net_dev.receive;
+ netout += net_dev.send;
+ }
+ }
+ }
+
+ Ok(NetDevStats { netin, netout })
+}
diff --git a/server/src/metric_collection/mod.rs b/server/src/metric_collection/mod.rs
index 3cd58148..8a945fab 100644
--- a/server/src/metric_collection/mod.rs
+++ b/server/src/metric_collection/mod.rs
@@ -10,6 +10,7 @@ use tokio::sync::oneshot;
use pdm_api_types::RemoteMetricCollectionStatus;
use pdm_buildcfg::PDM_STATE_DIR_M;
+mod local_collection_task;
mod remote_collection_task;
pub mod rrd_cache;
mod rrd_task;
@@ -19,6 +20,8 @@ pub mod top_entities;
use remote_collection_task::{ControlMsg, RemoteMetricCollectionTask};
use rrd_cache::RrdCache;
+use crate::metric_collection::local_collection_task::LocalMetricCollectionTask;
+
const RRD_CACHE_BASEDIR: &str = concat!(PDM_STATE_DIR_M!(), "/rrdb");
static CONTROL_MESSAGE_TX: OnceLock<Sender<ControlMsg>> = OnceLock::new();
@@ -39,14 +42,22 @@ pub fn init() -> Result<(), Error> {
pub fn start_task() -> Result<(), Error> {
let (metric_data_tx, metric_data_rx) = mpsc::channel(128);
+ let cache = rrd_cache::get_cache();
+ tokio::spawn(async move {
+ let rrd_task_future = pin!(rrd_task::store_in_rrd_task(cache, metric_data_rx));
+ let abort_future = pin!(proxmox_daemon::shutdown_future());
+ futures::future::select(rrd_task_future, abort_future).await;
+ });
+
let (trigger_collection_tx, trigger_collection_rx) = mpsc::channel(128);
if CONTROL_MESSAGE_TX.set(trigger_collection_tx).is_err() {
bail!("control message sender already set");
}
+ let metric_data_tx_clone = metric_data_tx.clone();
tokio::spawn(async move {
let metric_collection_task_future = pin!(async move {
- match RemoteMetricCollectionTask::new(metric_data_tx, trigger_collection_rx) {
+ match RemoteMetricCollectionTask::new(metric_data_tx_clone, trigger_collection_rx) {
Ok(mut task) => task.run().await,
Err(err) => log::error!("could not start metric collection task: {err}"),
}
@@ -56,12 +67,12 @@ pub fn start_task() -> Result<(), Error> {
futures::future::select(metric_collection_task_future, abort_future).await;
});
- let cache = rrd_cache::get_cache();
-
tokio::spawn(async move {
- let rrd_task_future = pin!(rrd_task::store_in_rrd_task(cache, metric_data_rx));
+ let metric_collection_task_future =
+ pin!(async move { LocalMetricCollectionTask::new(metric_data_tx).run().await });
+
let abort_future = pin!(proxmox_daemon::shutdown_future());
- futures::future::select(rrd_task_future, abort_future).await;
+ futures::future::select(metric_collection_task_future, abort_future).await;
});
Ok(())
diff --git a/server/src/metric_collection/rrd_task.rs b/server/src/metric_collection/rrd_task.rs
index 29137858..4cf18679 100644
--- a/server/src/metric_collection/rrd_task.rs
+++ b/server/src/metric_collection/rrd_task.rs
@@ -8,6 +8,7 @@ use proxmox_rrd::rrd::DataSourceType;
use pbs_api_types::{MetricDataPoint, MetricDataType, Metrics};
use pve_api_types::{ClusterMetrics, ClusterMetricsData, ClusterMetricsDataType};
+use super::local_collection_task::PdmHostMetrics;
use super::rrd_cache::RrdCache;
/// Store request for the RRD task.
@@ -45,6 +46,16 @@ pub(super) enum RrdStoreRequest {
/// Statistics.
stats: CollectionStats,
},
+ /// Store PDM host metrics.
+ Host {
+ /// Timestamp at which the metrics were collected (UNIX epoch).
+ timestamp: i64,
+
+ /// Metric data for this PDM host.
+ // Boxed to avoid a clippy warning regarding large size differences between
+ // enum variants.
+ metrics: Box<PdmHostMetrics>,
+ },
}
/// Result for a [`RrdStoreRequest`].
@@ -117,6 +128,9 @@ pub(super) async fn store_in_rrd_task(
RrdStoreRequest::CollectionStats { timestamp, stats } => {
store_stats(&cache_clone, &stats, timestamp)
}
+ RrdStoreRequest::Host { timestamp, metrics } => {
+ store_pdm_host_metrics(&cache_clone, timestamp, &metrics)
+ }
};
})
.await;
@@ -194,6 +208,177 @@ fn store_stats(cache: &RrdCache, stats: &CollectionStats, timestamp: i64) {
);
}
+fn store_pdm_host_metrics(cache: &RrdCache, timestamp: i64, metrics: &PdmHostMetrics) {
+ if let Some(proc) = &metrics.proc {
+ cache.update_value(
+ "nodes/localhost/cpu-current",
+ proc.cpu,
+ timestamp,
+ DataSourceType::Gauge,
+ );
+ cache.update_value(
+ "nodes/localhost/cpu-iowait",
+ proc.iowait_percent,
+ timestamp,
+ DataSourceType::Gauge,
+ );
+ }
+
+ if let Some(load) = &metrics.load {
+ cache.update_value(
+ "nodes/localhost/cpu-avg1",
+ load.0,
+ timestamp,
+ DataSourceType::Gauge,
+ );
+ cache.update_value(
+ "nodes/localhost/cpu-avg5",
+ load.1,
+ timestamp,
+ DataSourceType::Gauge,
+ );
+ cache.update_value(
+ "nodes/localhost/cpu-avg15",
+ load.2,
+ timestamp,
+ DataSourceType::Gauge,
+ );
+ }
+
+ if let Some(cpu_pressure) = &metrics.cpu_pressure {
+ cache.update_value(
+ "nodes/localhost/cpu-pressure-some-avg10",
+ cpu_pressure.some.average_10,
+ timestamp,
+ DataSourceType::Gauge,
+ );
+
+ // NOTE: On a system level, 'full' CPU pressure is undefined and reported as 0,
+ // so it does not make sense to store it.
+ // https://docs.kernel.org/accounting/psi.html#pressure-interface
+ }
+
+ if let Some(meminfo) = &metrics.meminfo {
+ cache.update_value(
+ "nodes/localhost/mem-total",
+ meminfo.memtotal as f64,
+ timestamp,
+ DataSourceType::Gauge,
+ );
+ cache.update_value(
+ "nodes/localhost/mem-used",
+ meminfo.memused as f64,
+ timestamp,
+ DataSourceType::Gauge,
+ );
+ cache.update_value(
+ "nodes/localhost/swap-total",
+ meminfo.swaptotal as f64,
+ timestamp,
+ DataSourceType::Gauge,
+ );
+ cache.update_value(
+ "nodes/localhost/swap-used",
+ meminfo.swapused as f64,
+ timestamp,
+ DataSourceType::Gauge,
+ );
+ }
+
+ if let Some(memory_pressure) = &metrics.memory_pressure {
+ cache.update_value(
+ "nodes/localhost/mem-pressure-some-avg10",
+ memory_pressure.some.average_10,
+ timestamp,
+ DataSourceType::Gauge,
+ );
+ cache.update_value(
+ "nodes/localhost/mem-pressure-full-avg10",
+ memory_pressure.full.average_10,
+ timestamp,
+ DataSourceType::Gauge,
+ );
+ }
+
+ if let Some(netstats) = &metrics.netstats {
+ cache.update_value(
+ "nodes/localhost/net-in",
+ netstats.netin as f64,
+ timestamp,
+ DataSourceType::Derive,
+ );
+ cache.update_value(
+ "nodes/localhost/net-out",
+ netstats.netout as f64,
+ timestamp,
+ DataSourceType::Derive,
+ );
+ }
+
+ if let Some(disk) = &metrics.root_filesystem_info {
+ cache.update_value(
+ "nodes/localhost/disk-total",
+ disk.total as f64,
+ timestamp,
+ DataSourceType::Gauge,
+ );
+ cache.update_value(
+ "nodes/localhost/disk-used",
+ disk.used as f64,
+ timestamp,
+ DataSourceType::Gauge,
+ );
+ }
+
+ if let Some(stat) = &metrics.root_blockdev_stat {
+ cache.update_value(
+ "nodes/localhost/disk-read-iops",
+ stat.read_ios as f64,
+ timestamp,
+ DataSourceType::Derive,
+ );
+ cache.update_value(
+ "nodes/localhost/disk-write-iops",
+ stat.write_ios as f64,
+ timestamp,
+ DataSourceType::Derive,
+ );
+ cache.update_value(
+ "nodes/localhost/disk-read",
+ (stat.read_sectors * 512) as f64,
+ timestamp,
+ DataSourceType::Derive,
+ );
+ cache.update_value(
+ "nodes/localhost/disk-write",
+ (stat.write_sectors * 512) as f64,
+ timestamp,
+ DataSourceType::Derive,
+ );
+ cache.update_value(
+ "nodes/localhost/disk-io-ticks",
+ (stat.io_ticks as f64) / 1000.0,
+ timestamp,
+ DataSourceType::Derive,
+ );
+ }
+
+ if let Some(io_pressure) = &metrics.io_pressure {
+ cache.update_value(
+ "nodes/localhost/io-pressure-some-avg10",
+ io_pressure.some.average_10,
+ timestamp,
+ DataSourceType::Gauge,
+ );
+ cache.update_value(
+ "nodes/localhost/io-pressure-full-avg10",
+ io_pressure.full.average_10,
+ timestamp,
+ DataSourceType::Gauge,
+ );
+ }
+}
+
#[cfg(test)]
mod tests {
use proxmox_rrd_api_types::{RrdMode, RrdTimeframe};
--
2.47.3
^ permalink raw reply related [flat|nested] 7+ messages in thread* [PATCH datacenter-manager v5 2/6] api: fix /nodes/localhost/rrddata endpoint
2026-04-24 11:34 [PATCH datacenter-manager v5 0/6] metric collection for the PDM host Lukas Wagner
2026-04-24 11:34 ` [PATCH datacenter-manager v5 1/6] metric collection: collect PDM host metrics in a new collection task Lukas Wagner
@ 2026-04-24 11:34 ` Lukas Wagner
2026-04-24 11:34 ` [PATCH datacenter-manager v5 3/6] pdm: node rrd data: rename 'total-time' to 'metric-collection-total-time' Lukas Wagner
` (3 subsequent siblings)
5 siblings, 0 replies; 7+ messages in thread
From: Lukas Wagner @ 2026-04-24 11:34 UTC (permalink / raw)
To: pdm-devel
We didn't use this existing endpoint so far, which is why this mistake
was not discovered yet. First, there was a typo in the API handler path,
and second the `node` parameter was missing from the handler itself.
Signed-off-by: Lukas Wagner <l.wagner@proxmox.com>
Reviewed-by: Arthur Bied-Charreton <a.bied-charreton@proxmox.com>
Reviewed-by: Michael Köppl <m.koeppl@proxmox.com>
Tested-by: Arthur Bied-Charreton <a.bied-charreton@proxmox.com>
Tested-by: Michael Köppl <m.koeppl@proxmox.com>
---
lib/pdm-client/src/lib.rs | 2 +-
server/src/api/nodes/mod.rs | 2 +-
server/src/api/nodes/rrddata.rs | 18 ++++++++++++++++--
3 files changed, 18 insertions(+), 4 deletions(-)
diff --git a/lib/pdm-client/src/lib.rs b/lib/pdm-client/src/lib.rs
index 8324a27d..00f1b3f9 100644
--- a/lib/pdm-client/src/lib.rs
+++ b/lib/pdm-client/src/lib.rs
@@ -380,7 +380,7 @@ impl<T: HttpApiClient> PdmClient<T> {
&self,
mode: RrdMode,
timeframe: RrdTimeframe,
- ) -> Result<pdm_api_types::rrddata::PdmNodeDatapoint, Error> {
+ ) -> Result<Vec<pdm_api_types::rrddata::PdmNodeDatapoint>, Error> {
let path = ApiPathBuilder::new("/api2/extjs/nodes/localhost/rrddata")
.arg("cf", mode)
.arg("timeframe", timeframe)
diff --git a/server/src/api/nodes/mod.rs b/server/src/api/nodes/mod.rs
index bd1396bc..7903d63a 100644
--- a/server/src/api/nodes/mod.rs
+++ b/server/src/api/nodes/mod.rs
@@ -48,7 +48,7 @@ pub const SUBDIRS: SubdirMap = &sorted!([
("journal", &journal::ROUTER),
("network", &network::ROUTER),
("report", &report::ROUTER),
- ("rrdata", &rrddata::ROUTER),
+ ("rrddata", &rrddata::ROUTER),
("sdn", &sdn::ROUTER),
("subscription", &subscription::ROUTER),
("status", &status::ROUTER),
diff --git a/server/src/api/nodes/rrddata.rs b/server/src/api/nodes/rrddata.rs
index 75900965..4c2302c8 100644
--- a/server/src/api/nodes/rrddata.rs
+++ b/server/src/api/nodes/rrddata.rs
@@ -1,10 +1,11 @@
use anyhow::Error;
use proxmox_rrd_api_types::{RrdMode, RrdTimeframe};
-use proxmox_router::Router;
+use proxmox_router::{http_bail, Router};
use proxmox_schema::api;
use pdm_api_types::rrddata::PdmNodeDatapoint;
+use pdm_api_types::NODE_SCHEMA;
use crate::api::rrd_common::{self, DataPoint};
@@ -36,6 +37,9 @@ impl DataPoint for PdmNodeDatapoint {
cf: {
type: RrdMode,
},
+ node: {
+ schema: NODE_SCHEMA,
+ },
},
},
returns: {
@@ -47,7 +51,17 @@ impl DataPoint for PdmNodeDatapoint {
}
)]
/// Read RRD data for this PDM node.
-fn get_node_rrddata(timeframe: RrdTimeframe, cf: RrdMode) -> Result<Vec<PdmNodeDatapoint>, Error> {
+fn get_node_rrddata(
+ node: String,
+ timeframe: RrdTimeframe,
+ cf: RrdMode,
+) -> Result<Vec<PdmNodeDatapoint>, Error> {
+ if node != "localhost" {
+ http_bail!(
+ BAD_REQUEST,
+ "PDM only supports `localhost` as a `node` parameter"
+ );
+ }
let base = "nodes/localhost";
rrd_common::create_datapoints_from_rrd(base, timeframe, cf)
}
--
2.47.3
^ permalink raw reply related [flat|nested] 7+ messages in thread* [PATCH datacenter-manager v5 3/6] pdm: node rrd data: rename 'total-time' to 'metric-collection-total-time'
2026-04-24 11:34 [PATCH datacenter-manager v5 0/6] metric collection for the PDM host Lukas Wagner
2026-04-24 11:34 ` [PATCH datacenter-manager v5 1/6] metric collection: collect PDM host metrics in a new collection task Lukas Wagner
2026-04-24 11:34 ` [PATCH datacenter-manager v5 2/6] api: fix /nodes/localhost/rrddata endpoint Lukas Wagner
@ 2026-04-24 11:34 ` Lukas Wagner
2026-04-24 11:34 ` [PATCH datacenter-manager v5 4/6] pdm-api-types: add PDM host metric fields Lukas Wagner
` (2 subsequent siblings)
5 siblings, 0 replies; 7+ messages in thread
From: Lukas Wagner @ 2026-04-24 11:34 UTC (permalink / raw)
To: pdm-devel
In the initial version of the remote metric collection series, there was
a separate API endpoint for metric collection rrd data, hence the short
name. Unfortunately we forgot to rename the field when the metric
collection stats were move the PDM host stats.
Neither the client tool nor the UI used this field yet, and also we
didn't stabilize PDMs API yet, so it should be fine to just rename the
field.
Signed-off-by: Lukas Wagner <l.wagner@proxmox.com>
Reviewed-by: Arthur Bied-Charreton <a.bied-charreton@proxmox.com>
Reviewed-by: Michael Köppl <m.koeppl@proxmox.com>
Tested-by: Arthur Bied-Charreton <a.bied-charreton@proxmox.com>
Tested-by: Michael Köppl <m.koeppl@proxmox.com>
---
lib/pdm-api-types/src/rrddata.rs | 2 +-
server/src/api/nodes/rrddata.rs | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/lib/pdm-api-types/src/rrddata.rs b/lib/pdm-api-types/src/rrddata.rs
index 70619233..6eaaff3c 100644
--- a/lib/pdm-api-types/src/rrddata.rs
+++ b/lib/pdm-api-types/src/rrddata.rs
@@ -242,7 +242,7 @@ pub struct PdmNodeDatapoint {
/// Total time in milliseconds needed for full metric collection run.
#[serde(skip_serializing_if = "Option::is_none")]
- pub total_time: Option<f64>,
+ pub metric_collection_total_time: Option<f64>,
}
#[api]
diff --git a/server/src/api/nodes/rrddata.rs b/server/src/api/nodes/rrddata.rs
index 4c2302c8..00c4eee0 100644
--- a/server/src/api/nodes/rrddata.rs
+++ b/server/src/api/nodes/rrddata.rs
@@ -23,7 +23,7 @@ impl DataPoint for PdmNodeDatapoint {
fn set_field(&mut self, name: &str, value: f64) {
if name == "metric-collection-total-time" {
- self.total_time = Some(value);
+ self.metric_collection_total_time = Some(value);
}
}
}
--
2.47.3
^ permalink raw reply related [flat|nested] 7+ messages in thread* [PATCH datacenter-manager v5 4/6] pdm-api-types: add PDM host metric fields
2026-04-24 11:34 [PATCH datacenter-manager v5 0/6] metric collection for the PDM host Lukas Wagner
` (2 preceding siblings ...)
2026-04-24 11:34 ` [PATCH datacenter-manager v5 3/6] pdm: node rrd data: rename 'total-time' to 'metric-collection-total-time' Lukas Wagner
@ 2026-04-24 11:34 ` Lukas Wagner
2026-04-24 11:34 ` [PATCH datacenter-manager v5 5/6] ui: node status: add RRD graphs for PDM host metrics Lukas Wagner
2026-04-24 11:34 ` [PATCH datacenter-manager v5 6/6] ui: lxc/qemu/node: use RRD value render helpers from yew-comp Lukas Wagner
5 siblings, 0 replies; 7+ messages in thread
From: Lukas Wagner @ 2026-04-24 11:34 UTC (permalink / raw)
To: pdm-devel
Signed-off-by: Lukas Wagner <l.wagner@proxmox.com>
Reviewed-by: Arthur Bied-Charreton <a.bied-charreton@proxmox.com>
Reviewed-by: Michael Köppl <m.koeppl@proxmox.com>
Tested-by: Arthur Bied-Charreton <a.bied-charreton@proxmox.com>
Tested-by: Michael Köppl <m.koeppl@proxmox.com>
---
lib/pdm-api-types/src/rrddata.rs | 72 +++++++++++++++++++++++++++++++-
server/src/api/nodes/rrddata.rs | 55 ++++++++++++++++++++++--
2 files changed, 122 insertions(+), 5 deletions(-)
diff --git a/lib/pdm-api-types/src/rrddata.rs b/lib/pdm-api-types/src/rrddata.rs
index 6eaaff3c..452597a8 100644
--- a/lib/pdm-api-types/src/rrddata.rs
+++ b/lib/pdm-api-types/src/rrddata.rs
@@ -233,13 +233,81 @@ pub struct PbsDatastoreDataPoint {
}
#[api]
-#[derive(Serialize, Deserialize, Default)]
+#[derive(Serialize, Deserialize, Default, Debug)]
#[serde(rename_all = "kebab-case")]
/// RRD datapoint for statistics about the metric collection loop.
pub struct PdmNodeDatapoint {
/// Timestamp (UNIX epoch)
pub time: u64,
-
+ /// Current CPU utilization
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub cpu_current: Option<f64>,
+ /// Current IO wait
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub cpu_iowait: Option<f64>,
+ /// CPU utilization, averaged over the last minute
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub cpu_avg1: Option<f64>,
+ /// CPU utilization, averaged over the last five minutes
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub cpu_avg5: Option<f64>,
+ /// CPU utilization, averaged over the last fifteen minutes
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub cpu_avg15: Option<f64>,
+ /// Total root disk size
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub disk_total: Option<f64>,
+ /// Total root disk usage
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub disk_used: Option<f64>,
+ /// Root disk read IOPS
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub disk_read_iops: Option<f64>,
+ /// Root disk write IOPS
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub disk_write_iops: Option<f64>,
+ /// Root disk read rate
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub disk_read: Option<f64>,
+ /// Root disk write rate
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub disk_write: Option<f64>,
+ /// Root disk IO ticks
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub disk_io_ticks: Option<f64>,
+ /// Total memory size
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub mem_total: Option<f64>,
+ /// Currently used memory
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub mem_used: Option<f64>,
+ /// Total swap size
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub swap_total: Option<f64>,
+ /// Current swap usage
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub swap_used: Option<f64>,
+ /// Inbound network data rate
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub net_in: Option<f64>,
+ /// Outbound network data rate
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub net_out: Option<f64>,
+ /// Average 'some' CPU pressure over the last 10 minutes.
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub cpu_pressure_some_avg10: Option<f64>,
+ /// Average 'some' memory pressure over the last 10 minutes.
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub mem_pressure_some_avg10: Option<f64>,
+ /// Average 'full' memory pressure over the last 10 minutes.
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub mem_pressure_full_avg10: Option<f64>,
+ /// Average 'some' IO pressure over the last 10 minutes.
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub io_pressure_some_avg10: Option<f64>,
+ /// Average 'full' IO pressure over the last 10 minutes.
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub io_pressure_full_avg10: Option<f64>,
/// Total time in milliseconds needed for full metric collection run.
#[serde(skip_serializing_if = "Option::is_none")]
pub metric_collection_total_time: Option<f64>,
diff --git a/server/src/api/nodes/rrddata.rs b/server/src/api/nodes/rrddata.rs
index 00c4eee0..8ba11a5f 100644
--- a/server/src/api/nodes/rrddata.rs
+++ b/server/src/api/nodes/rrddata.rs
@@ -18,12 +18,61 @@ impl DataPoint for PdmNodeDatapoint {
}
fn fields() -> &'static [&'static str] {
- &["metric-collection-total-time"]
+ &[
+ "cpu-current",
+ "cpu-iowait",
+ "cpu-avg1",
+ "cpu-avg5",
+ "cpu-avg15",
+ "cpu-pressure-some-avg10",
+ "disk-total",
+ "disk-used",
+ "disk-read-iops",
+ "disk-write-iops",
+ "disk-read",
+ "disk-write",
+ "disk-io-ticks",
+ "io-pressure-some-avg10",
+ "io-pressure-full-avg10",
+ "mem-total",
+ "mem-used",
+ "mem-pressure-some-avg10",
+ "mem-pressure-full-avg10",
+ "swap-total",
+ "swap-used",
+ "net-in",
+ "net-out",
+ "metric-collection-total-time",
+ ]
}
fn set_field(&mut self, name: &str, value: f64) {
- if name == "metric-collection-total-time" {
- self.metric_collection_total_time = Some(value);
+ match name {
+ "cpu-current" => self.cpu_current = Some(value),
+ "cpu-iowait" => self.cpu_iowait = Some(value),
+ "cpu-avg1" => self.cpu_avg1 = Some(value),
+ "cpu-avg5" => self.cpu_avg5 = Some(value),
+ "cpu-avg15" => self.cpu_avg15 = Some(value),
+ "cpu-pressure-some-avg10" => self.cpu_pressure_some_avg10 = Some(value),
+ "disk-total" => self.disk_total = Some(value),
+ "disk-used" => self.disk_used = Some(value),
+ "disk-read-iops" => self.disk_read_iops = Some(value),
+ "disk-write-iops" => self.disk_write_iops = Some(value),
+ "disk-read" => self.disk_read = Some(value),
+ "disk-write" => self.disk_write = Some(value),
+ "disk-io-ticks" => self.disk_io_ticks = Some(value),
+ "io-pressure-some-avg10" => self.io_pressure_some_avg10 = Some(value),
+ "io-pressure-full-avg10" => self.io_pressure_full_avg10 = Some(value),
+ "mem-total" => self.mem_total = Some(value),
+ "mem-used" => self.mem_used = Some(value),
+ "mem-pressure-some-avg10" => self.mem_pressure_some_avg10 = Some(value),
+ "mem-pressure-full-avg10" => self.mem_pressure_full_avg10 = Some(value),
+ "swap-total" => self.swap_total = Some(value),
+ "swap-used" => self.swap_used = Some(value),
+ "net-in" => self.net_in = Some(value),
+ "net-out" => self.net_out = Some(value),
+ "metric-collection-total-time" => self.metric_collection_total_time = Some(value),
+ _ => log::error!("setting invalid field '{name}' in PdmNodeDatapoint"),
}
}
}
--
2.47.3
^ permalink raw reply related [flat|nested] 7+ messages in thread* [PATCH datacenter-manager v5 5/6] ui: node status: add RRD graphs for PDM host metrics
2026-04-24 11:34 [PATCH datacenter-manager v5 0/6] metric collection for the PDM host Lukas Wagner
` (3 preceding siblings ...)
2026-04-24 11:34 ` [PATCH datacenter-manager v5 4/6] pdm-api-types: add PDM host metric fields Lukas Wagner
@ 2026-04-24 11:34 ` Lukas Wagner
2026-04-24 11:34 ` [PATCH datacenter-manager v5 6/6] ui: lxc/qemu/node: use RRD value render helpers from yew-comp Lukas Wagner
5 siblings, 0 replies; 7+ messages in thread
From: Lukas Wagner @ 2026-04-24 11:34 UTC (permalink / raw)
To: pdm-devel
This adds RRD graphs in the existing node status panel. We add graphs
for
- CPU/IOWait
- Load-Avg
- Memory usage
- Network utilization
- Pressure (CPU, memory, IO)
Signed-off-by: Lukas Wagner <l.wagner@proxmox.com>
Reviewed-by: Arthur Bied-Charreton <a.bied-charreton@proxmox.com>
Reviewed-by: Michael Köppl <m.koeppl@proxmox.com>
Tested-by: Arthur Bied-Charreton <a.bied-charreton@proxmox.com>
Tested-by: Michael Köppl <m.koeppl@proxmox.com>
---
ui/src/administration/node_status.rs | 312 ++++++++++++++++++++++++++-
1 file changed, 305 insertions(+), 7 deletions(-)
diff --git a/ui/src/administration/node_status.rs b/ui/src/administration/node_status.rs
index a61f25a2..8d4d5a53 100644
--- a/ui/src/administration/node_status.rs
+++ b/ui/src/administration/node_status.rs
@@ -7,17 +7,28 @@ use proxmox_node_status::NodePowerCommand;
use proxmox_time::epoch_i64;
use proxmox_yew_comp::percent_encoding::percent_encode_component;
use proxmox_yew_comp::utils::{copy_text_to_clipboard, render_epoch};
-use proxmox_yew_comp::{http_post, ConfirmButton, NodeStatusPanel};
-use pwt::prelude::*;
+use proxmox_yew_comp::{
+ http_post, rrd_value_renderer, ConfirmButton, NodeStatusPanel, RRDGraph, RRDGrid, RRDTimeframe,
+ RRDTimeframeSelector, Series,
+};
+use pwt::css::JustifyContent;
use pwt::widget::{Button, Column, Container, Row};
use pwt::AsyncAbortGuard;
+use pwt::{prelude::*, AsyncPool};
-use crate::get_nodename;
+use pdm_api_types::rrddata::PdmNodeDatapoint;
+
+use crate::{get_nodename, renderer};
#[derive(Properties, Clone, PartialEq)]
-pub(crate) struct NodeStatus {}
+pub(crate) struct NodeStatus {
+ #[prop_or(60_000)]
+ /// The interval for refreshing the rrd data
+ pub rrd_interval: u32,
+}
impl NodeStatus {
+ /// Create new [`NodeStatus`] panel.
pub(crate) fn new() -> Self {
yew::props!(Self {})
}
@@ -31,20 +42,58 @@ impl From<NodeStatus> for VNode {
enum Msg {
Reload,
+ ReloadRrd,
+ UpdateRrdTimeframe(RRDTimeframe),
Error(Error),
RebootOrShutdown(NodePowerCommand),
ShowSystemReport(bool),
ShowPackageVersions(bool),
+ RrdLoadFinished(Result<Vec<PdmNodeDatapoint>, proxmox_client::Error>),
}
struct PdmNodeStatus {
+ time_data: Rc<Vec<i64>>,
+
+ cpu_data: Rc<Series>,
+ iowait_data: Rc<Series>,
+ load_data: Rc<Series>,
+ mem_data: Rc<Series>,
+ mem_total_data: Rc<Series>,
+ swap_data: Rc<Series>,
+ swap_total_data: Rc<Series>,
+ disk_usage_data: Rc<Series>,
+ disk_total_data: Rc<Series>,
+ disk_transfer_read_data: Rc<Series>,
+ disk_transfer_write_data: Rc<Series>,
+ disk_iops_read_data: Rc<Series>,
+ disk_iops_write_data: Rc<Series>,
+ cpu_pressure_some_data: Rc<Series>,
+ mem_pressure_some_data: Rc<Series>,
+ mem_pressure_full_data: Rc<Series>,
+ io_pressure_some_data: Rc<Series>,
+ io_pressure_full_data: Rc<Series>,
+ net_in: Rc<Series>,
+ net_out: Rc<Series>,
+
+ rrd_time_frame: RRDTimeframe,
error: Option<Error>,
abort_guard: Option<AsyncAbortGuard>,
show_system_report: bool,
show_package_versions: bool,
+
+ async_pool: AsyncPool,
+ _timeout: Option<gloo_timers::callback::Timeout>,
}
impl PdmNodeStatus {
+ async fn reload_rrd(rrd_time_frame: RRDTimeframe) -> Msg {
+ let res = crate::pdm_client()
+ .get_pdm_node_rrddata(rrd_time_frame.mode, rrd_time_frame.timeframe)
+ .await;
+
+ Msg::RrdLoadFinished(res)
+ }
+
fn change_power_state(&mut self, ctx: &yew::Context<Self>, command: NodePowerCommand) {
let link = ctx.link().clone();
self.abort_guard.replace(AsyncAbortGuard::spawn(async move {
@@ -184,8 +233,37 @@ impl Component for PdmNodeStatus {
type Message = Msg;
type Properties = NodeStatus;
- fn create(_ctx: &yew::Context<Self>) -> Self {
+ fn create(ctx: &yew::Context<Self>) -> Self {
+ ctx.link().send_message(Msg::ReloadRrd);
+
Self {
+ time_data: Rc::new(Vec::new()),
+
+ cpu_data: empty_series(),
+ cpu_pressure_some_data: empty_series(),
+ mem_pressure_some_data: empty_series(),
+ mem_pressure_full_data: empty_series(),
+ io_pressure_some_data: empty_series(),
+ io_pressure_full_data: empty_series(),
+ iowait_data: empty_series(),
+ load_data: empty_series(),
+ mem_data: empty_series(),
+ mem_total_data: empty_series(),
+ swap_data: empty_series(),
+ swap_total_data: empty_series(),
+ net_in: empty_series(),
+ net_out: empty_series(),
+ disk_usage_data: empty_series(),
+ disk_total_data: empty_series(),
+ disk_transfer_read_data: empty_series(),
+ disk_transfer_write_data: empty_series(),
+ disk_iops_read_data: empty_series(),
+ disk_iops_write_data: empty_series(),
+
+ async_pool: AsyncPool::new(),
+ _timeout: None,
+
+ rrd_time_frame: RRDTimeframe::load(),
error: None,
abort_guard: None,
show_system_report: false,
@@ -212,6 +290,121 @@ impl Component for PdmNodeStatus {
self.show_package_versions = show_package_versions;
true
}
+ Msg::ReloadRrd => {
+ self._timeout = None;
+ let timeframe = self.rrd_time_frame;
+ self.async_pool.send_future(ctx.link().clone(), async move {
+ Self::reload_rrd(timeframe).await
+ });
+ true
+ }
+ Msg::RrdLoadFinished(res) => match res {
+ Ok(data_points) => {
+ self.error = None;
+ let mut cpu_vec = Vec::with_capacity(data_points.len());
+ let mut cpu_pressure_some_vec = Vec::with_capacity(data_points.len());
+ let mut iowait_vec = Vec::with_capacity(data_points.len());
+ let mut load_vec = Vec::with_capacity(data_points.len());
+ let mut mem_vec = Vec::with_capacity(data_points.len());
+ let mut mem_total_vec = Vec::with_capacity(data_points.len());
+ let mut swap_vec = Vec::with_capacity(data_points.len());
+ let mut swap_total_vec = Vec::with_capacity(data_points.len());
+ let mut mem_pressure_some_vec = Vec::with_capacity(data_points.len());
+ let mut mem_pressure_full_vec = Vec::with_capacity(data_points.len());
+ let mut io_pressure_some_vec = Vec::with_capacity(data_points.len());
+ let mut io_pressure_full_vec = Vec::with_capacity(data_points.len());
+ let mut time_vec = Vec::with_capacity(data_points.len());
+ let mut net_in_vec = Vec::with_capacity(data_points.len());
+ let mut net_out_vec = Vec::with_capacity(data_points.len());
+ let mut disk_usage_vec = Vec::with_capacity(data_points.len());
+ let mut disk_total_vec = Vec::with_capacity(data_points.len());
+ let mut disk_transfer_read_vec = Vec::with_capacity(data_points.len());
+ let mut disk_transfer_write_vec = Vec::with_capacity(data_points.len());
+ let mut disk_iops_read_vec = Vec::with_capacity(data_points.len());
+ let mut disk_iops_write_vec = Vec::with_capacity(data_points.len());
+
+ for data in data_points {
+ cpu_vec.push(data.cpu_current.unwrap_or(f64::NAN));
+ iowait_vec.push(data.cpu_iowait.unwrap_or(f64::NAN));
+ load_vec.push(data.cpu_avg1.unwrap_or(f64::NAN));
+ cpu_pressure_some_vec
+ .push(data.cpu_pressure_some_avg10.unwrap_or(f64::NAN));
+ mem_vec.push(data.mem_used.unwrap_or(f64::NAN));
+ mem_total_vec.push(data.mem_total.unwrap_or(f64::NAN));
+ swap_vec.push(data.swap_used.unwrap_or(f64::NAN));
+ swap_total_vec.push(data.swap_total.unwrap_or(f64::NAN));
+ mem_pressure_some_vec
+ .push(data.mem_pressure_some_avg10.unwrap_or(f64::NAN));
+ mem_pressure_full_vec
+ .push(data.mem_pressure_full_avg10.unwrap_or(f64::NAN));
+ net_in_vec.push(data.net_in.unwrap_or(f64::NAN));
+ net_out_vec.push(data.net_out.unwrap_or(f64::NAN));
+ io_pressure_some_vec.push(data.io_pressure_some_avg10.unwrap_or(f64::NAN));
+ io_pressure_full_vec.push(data.io_pressure_full_avg10.unwrap_or(f64::NAN));
+
+ disk_total_vec.push(data.disk_total.unwrap_or(f64::NAN));
+ disk_usage_vec.push(data.disk_used.unwrap_or(f64::NAN));
+ disk_transfer_read_vec.push(data.disk_read.unwrap_or(f64::NAN));
+ disk_transfer_write_vec.push(data.disk_write.unwrap_or(f64::NAN));
+
+ disk_iops_read_vec.push(data.disk_read_iops.unwrap_or(f64::NAN));
+ disk_iops_write_vec.push(data.disk_write_iops.unwrap_or(f64::NAN));
+
+ time_vec.push(data.time as i64);
+ }
+
+ self.cpu_data = Rc::new(Series::new(tr!("CPU usage"), cpu_vec));
+ self.iowait_data = Rc::new(Series::new(tr!("IO delay"), iowait_vec));
+ self.load_data = Rc::new(Series::new(tr!("Server Load"), load_vec));
+ self.cpu_pressure_some_data =
+ Rc::new(Series::new(tr!("Some"), cpu_pressure_some_vec));
+ self.mem_data = Rc::new(Series::new(tr!("Used Memory"), mem_vec));
+ self.mem_total_data = Rc::new(Series::new(tr!("Total Memory"), mem_total_vec));
+ self.swap_data = Rc::new(Series::new(tr!("Used Swap"), swap_vec));
+ self.swap_total_data = Rc::new(Series::new(tr!("Total Swap"), swap_total_vec));
+ self.mem_pressure_some_data =
+ Rc::new(Series::new(tr!("Some"), mem_pressure_some_vec));
+ self.mem_pressure_full_data =
+ Rc::new(Series::new(tr!("Full"), mem_pressure_full_vec));
+ self.io_pressure_some_data =
+ Rc::new(Series::new(tr!("Some"), io_pressure_some_vec));
+ self.io_pressure_full_data =
+ Rc::new(Series::new(tr!("Full"), io_pressure_full_vec));
+
+ self.net_in = Rc::new(Series::new(tr!("Incoming"), net_in_vec));
+ self.net_out = Rc::new(Series::new(tr!("Outgoing"), net_out_vec));
+
+ self.disk_usage_data = Rc::new(Series::new(tr!("Used Disk"), disk_usage_vec));
+ self.disk_total_data = Rc::new(Series::new(tr!("Total Disk"), disk_total_vec));
+ self.disk_transfer_read_data =
+ Rc::new(Series::new(tr!("Read"), disk_transfer_read_vec));
+ self.disk_transfer_write_data =
+ Rc::new(Series::new(tr!("Write"), disk_transfer_write_vec));
+ self.disk_iops_read_data =
+ Rc::new(Series::new(tr!("Read"), disk_iops_read_vec));
+ self.disk_iops_write_data =
+ Rc::new(Series::new(tr!("Write"), disk_iops_write_vec));
+
+ self.time_data = Rc::new(time_vec);
+
+ let link = ctx.link().clone();
+ self._timeout = Some(gloo_timers::callback::Timeout::new(
+ ctx.props().rrd_interval,
+ move || link.send_message(Msg::ReloadRrd),
+ ));
+
+ true
+ }
+ Err(err) => {
+ self.error = Some(err.into());
+ true
+ }
+ },
+ Msg::UpdateRrdTimeframe(rrd_time_frame) => {
+ self.rrd_time_frame = rrd_time_frame;
+ ctx.link().send_message(Msg::ReloadRrd);
+ false
+ }
}
}
@@ -267,12 +460,113 @@ impl Component for PdmNodeStatus {
),
)
.with_child(
- Row::new()
+ Column::new()
.class("pwt-content-spacer-padding")
.class("pwt-content-spacer-colors")
.class("pwt-default-colors")
.class(pwt::css::FlexFit)
- .with_child(NodeStatusPanel::new().status_base_url("/nodes/localhost/status")),
+ .with_child(
+ NodeStatusPanel::new()
+ .status_base_url("/nodes/localhost/status")
+ .with_child(renderer::separator().padding_x(4))
+ .with_optional_child(
+ self.error
+ .as_ref()
+ .map(|err| pwt::widget::error_message(&err.to_string())),
+ )
+ .with_child(
+ Row::new()
+ .padding_x(4)
+ .padding_y(1)
+ .class(JustifyContent::FlexEnd)
+ .with_child(
+ RRDTimeframeSelector::new().on_change(
+ ctx.link().callback(Msg::UpdateRrdTimeframe),
+ ),
+ ),
+ )
+ .with_child(
+ RRDGrid::new()
+ .with_child(
+ RRDGraph::new(self.time_data.clone())
+ .title(tr!("CPU Usage"))
+ .render_value(rrd_value_renderer::render_cpu_usage)
+ .serie0(Some(self.cpu_data.clone()))
+ .serie1(Some(self.iowait_data.clone())),
+ )
+ .with_child(
+ RRDGraph::new(self.time_data.clone())
+ .title(tr!("Server Load"))
+ .render_value(rrd_value_renderer::render_load)
+ .serie0(Some(self.load_data.clone())),
+ )
+ .with_child(
+ RRDGraph::new(self.time_data.clone())
+ .title(tr!("Memory Usage"))
+ .binary(true)
+ .render_value(rrd_value_renderer::render_bytes)
+ .serie0(Some(self.mem_total_data.clone()))
+ .serie1(Some(self.mem_data.clone())),
+ )
+ .with_child(
+ RRDGraph::new(self.time_data.clone())
+ .title(tr!("Swap Usage"))
+ .binary(true)
+ .render_value(rrd_value_renderer::render_bytes)
+ .serie0(Some(self.swap_total_data.clone()))
+ .serie1(Some(self.swap_data.clone())),
+ )
+ .with_child(
+ RRDGraph::new(self.time_data.clone())
+ .title(tr!("Network Traffic"))
+ .binary(true)
+ .render_value(rrd_value_renderer::render_bandwidth)
+ .serie0(Some(self.net_in.clone()))
+ .serie1(Some(self.net_out.clone())),
+ )
+ .with_child(
+ RRDGraph::new(self.time_data.clone())
+ .title(tr!("CPU Pressure Stall"))
+ .render_value(rrd_value_renderer::render_pressure)
+ .serie0(Some(self.cpu_pressure_some_data.clone())),
+ )
+ .with_child(
+ RRDGraph::new(self.time_data.clone())
+ .title(tr!("Memory Pressure Stall"))
+ .render_value(rrd_value_renderer::render_pressure)
+ .serie0(Some(self.mem_pressure_some_data.clone()))
+ .serie1(Some(self.mem_pressure_full_data.clone())),
+ )
+ .with_child(
+ RRDGraph::new(self.time_data.clone())
+ .title(tr!("IO Pressure Stall"))
+ .render_value(rrd_value_renderer::render_pressure)
+ .serie0(Some(self.io_pressure_some_data.clone()))
+ .serie1(Some(self.io_pressure_full_data.clone())),
+ )
+ .with_child(
+ RRDGraph::new(self.time_data.clone())
+ .title(tr!("Root Disk Usage"))
+ .render_value(rrd_value_renderer::render_bytes)
+ .serie0(Some(self.disk_usage_data.clone()))
+ .serie1(Some(self.disk_total_data.clone())),
+ )
+ .with_child(
+ RRDGraph::new(self.time_data.clone())
+ .title(tr!("Root Disk Transfer Rate"))
+ .binary(true)
+ .render_value(rrd_value_renderer::render_bandwidth)
+ .serie0(Some(self.disk_transfer_read_data.clone()))
+ .serie1(Some(self.disk_transfer_write_data.clone())),
+ )
+ .with_child(
+ RRDGraph::new(self.time_data.clone())
+ .title(tr!("Root Disk IOPS"))
+ .serie0(Some(self.disk_iops_read_data.clone()))
+ .serie1(Some(self.disk_iops_write_data.clone())),
+ ),
+ ),
+ ),
)
.with_optional_child(
self.show_system_report
@@ -285,3 +579,7 @@ impl Component for PdmNodeStatus {
.into()
}
}
+
+fn empty_series() -> Rc<Series> {
+ Rc::new(Series::new("", Vec::new()))
+}
--
2.47.3
^ permalink raw reply related [flat|nested] 7+ messages in thread* [PATCH datacenter-manager v5 6/6] ui: lxc/qemu/node: use RRD value render helpers from yew-comp
2026-04-24 11:34 [PATCH datacenter-manager v5 0/6] metric collection for the PDM host Lukas Wagner
` (4 preceding siblings ...)
2026-04-24 11:34 ` [PATCH datacenter-manager v5 5/6] ui: node status: add RRD graphs for PDM host metrics Lukas Wagner
@ 2026-04-24 11:34 ` Lukas Wagner
5 siblings, 0 replies; 7+ messages in thread
From: Lukas Wagner @ 2026-04-24 11:34 UTC (permalink / raw)
To: pdm-devel
This changes the precision of CPU usage labels a tiny bit, before there
were two decimal places (24.42%) while now there is only one (24.3%).
Using one decimal place here seems a bit cleaner in the UI and the
additional precision is not very useful for these kinds of values.
Signed-off-by: Lukas Wagner <l.wagner@proxmox.com>
Reviewed-by: Arthur Bied-Charreton <a.bied-charreton@proxmox.com>
Reviewed-by: Michael Köppl <m.koeppl@proxmox.com>
Tested-by: Arthur Bied-Charreton <a.bied-charreton@proxmox.com>
Tested-by: Michael Köppl <m.koeppl@proxmox.com>
---
ui/src/pbs/node/overview.rs | 28 ++++++----------------------
ui/src/pve/lxc/overview.rs | 34 +++++-----------------------------
ui/src/pve/node/overview.rs | 28 ++++++----------------------
ui/src/pve/qemu/overview.rs | 34 +++++-----------------------------
4 files changed, 22 insertions(+), 102 deletions(-)
diff --git a/ui/src/pbs/node/overview.rs b/ui/src/pbs/node/overview.rs
index b63d45f2..9aef2851 100644
--- a/ui/src/pbs/node/overview.rs
+++ b/ui/src/pbs/node/overview.rs
@@ -5,7 +5,9 @@ use yew::{
Context,
};
-use proxmox_yew_comp::{node_info, RRDGraph, RRDTimeframe, RRDTimeframeSelector, Series};
+use proxmox_yew_comp::{
+ node_info, rrd_value_renderer, RRDGraph, RRDTimeframe, RRDTimeframeSelector, Series,
+};
use pwt::{
css::{ColorScheme, FlexFit, JustifyContent},
prelude::*,
@@ -232,38 +234,20 @@ impl yew::Component for PbsNodeOverviewPanelComp {
.with_child(
RRDGraph::new(self.time_data.clone())
.title(tr!("CPU Usage"))
- .render_value(|v: &f64| {
- if v.is_finite() {
- format!("{:.2}%", v * 100.0)
- } else {
- v.to_string()
- }
- })
+ .render_value(rrd_value_renderer::render_cpu_usage)
.serie0(Some(self.cpu_data.clone())),
)
.with_child(
RRDGraph::new(self.time_data.clone())
.title(tr!("Server Load"))
- .render_value(|v: &f64| {
- if v.is_finite() {
- format!("{:.2}", v)
- } else {
- v.to_string()
- }
- })
+ .render_value(rrd_value_renderer::render_load)
.serie0(Some(self.load_data.clone())),
)
.with_child(
RRDGraph::new(self.time_data.clone())
.title(tr!("Memory Usage"))
.binary(true)
- .render_value(|v: &f64| {
- if v.is_finite() {
- proxmox_human_byte::HumanByte::from(*v as u64).to_string()
- } else {
- v.to_string()
- }
- })
+ .render_value(rrd_value_renderer::render_bytes)
.serie0(Some(self.mem_data.clone()))
.serie1(Some(self.mem_total_data.clone())),
),
diff --git a/ui/src/pve/lxc/overview.rs b/ui/src/pve/lxc/overview.rs
index 8c0196b3..2d8cc670 100644
--- a/ui/src/pve/lxc/overview.rs
+++ b/ui/src/pve/lxc/overview.rs
@@ -13,7 +13,7 @@ use pwt::props::WidgetBuilder;
use pwt::widget::{Column, Container, Panel, Progress, Row};
use pwt::AsyncPool;
-use proxmox_yew_comp::{RRDGraph, RRDTimeframe, RRDTimeframeSelector, Series};
+use proxmox_yew_comp::{rrd_value_renderer, RRDGraph, RRDTimeframe, RRDTimeframeSelector, Series};
use pdm_api_types::{resource::PveLxcResource, rrddata::LxcDataPoint};
use pdm_client::types::{IsRunning, LxcStatus};
@@ -338,25 +338,13 @@ impl yew::Component for LxcanelComp {
.with_child(
RRDGraph::new(self.time.clone())
.title(tr!("CPU Usage"))
- .render_value(|v: &f64| {
- if v.is_finite() {
- format!("{:.2}%", v * 100.0)
- } else {
- v.to_string()
- }
- })
+ .render_value(rrd_value_renderer::render_cpu_usage)
.serie0(Some(self.cpu.clone())),
)
.with_child(
RRDGraph::new(self.time.clone())
.title(tr!("Memory usage"))
- .render_value(|v: &f64| {
- if v.is_finite() {
- proxmox_human_byte::HumanByte::from(*v as u64).to_string()
- } else {
- v.to_string()
- }
- })
+ .render_value(rrd_value_renderer::render_bytes)
.serie0(Some(self.memory.clone()))
.serie1(Some(self.memory_max.clone())),
)
@@ -364,13 +352,7 @@ impl yew::Component for LxcanelComp {
RRDGraph::new(self.time.clone())
.title(tr!("Network Traffic"))
.binary(true)
- .render_value(|v: &f64| {
- if v.is_finite() {
- proxmox_human_byte::HumanByte::from(*v as u64).to_string()
- } else {
- v.to_string()
- }
- })
+ .render_value(rrd_value_renderer::render_bandwidth)
.serie0(Some(self.netin.clone()))
.serie1(Some(self.netout.clone())),
)
@@ -378,13 +360,7 @@ impl yew::Component for LxcanelComp {
RRDGraph::new(self.time.clone())
.title(tr!("Disk I/O"))
.binary(true)
- .render_value(|v: &f64| {
- if v.is_finite() {
- proxmox_human_byte::HumanByte::from(*v as u64).to_string()
- } else {
- v.to_string()
- }
- })
+ .render_value(rrd_value_renderer::render_bandwidth)
.serie0(Some(self.diskread.clone()))
.serie1(Some(self.diskwrite.clone())),
),
diff --git a/ui/src/pve/node/overview.rs b/ui/src/pve/node/overview.rs
index c07180b0..9df79c42 100644
--- a/ui/src/pve/node/overview.rs
+++ b/ui/src/pve/node/overview.rs
@@ -5,7 +5,9 @@ use yew::{
Context,
};
-use proxmox_yew_comp::{node_info, RRDGraph, RRDTimeframe, RRDTimeframeSelector, Series};
+use proxmox_yew_comp::{
+ node_info, rrd_value_renderer, RRDGraph, RRDTimeframe, RRDTimeframeSelector, Series,
+};
use pwt::{
css::{ColorScheme, FlexFit, JustifyContent},
prelude::*,
@@ -236,38 +238,20 @@ impl yew::Component for PveNodeOverviewPanelComp {
.with_child(
RRDGraph::new(self.time_data.clone())
.title(tr!("CPU Usage"))
- .render_value(|v: &f64| {
- if v.is_finite() {
- format!("{:.2}%", v * 100.0)
- } else {
- v.to_string()
- }
- })
+ .render_value(rrd_value_renderer::render_cpu_usage)
.serie0(Some(self.cpu_data.clone())),
)
.with_child(
RRDGraph::new(self.time_data.clone())
.title(tr!("Server Load"))
- .render_value(|v: &f64| {
- if v.is_finite() {
- format!("{:.2}", v)
- } else {
- v.to_string()
- }
- })
+ .render_value(rrd_value_renderer::render_load)
.serie0(Some(self.load_data.clone())),
)
.with_child(
RRDGraph::new(self.time_data.clone())
.title(tr!("Memory Usage"))
.binary(true)
- .render_value(|v: &f64| {
- if v.is_finite() {
- proxmox_human_byte::HumanByte::from(*v as u64).to_string()
- } else {
- v.to_string()
- }
- })
+ .render_value(rrd_value_renderer::render_bytes)
.serie0(Some(self.mem_total_data.clone()))
.serie1(Some(self.mem_data.clone())),
),
diff --git a/ui/src/pve/qemu/overview.rs b/ui/src/pve/qemu/overview.rs
index 6e601d00..7592de11 100644
--- a/ui/src/pve/qemu/overview.rs
+++ b/ui/src/pve/qemu/overview.rs
@@ -5,7 +5,7 @@ use serde_json::json;
use yew::virtual_dom::{VComp, VNode};
use proxmox_human_byte::HumanByte;
-use proxmox_yew_comp::{RRDGraph, RRDTimeframe, RRDTimeframeSelector, Series};
+use proxmox_yew_comp::{rrd_value_renderer, RRDGraph, RRDTimeframe, RRDTimeframeSelector, Series};
use pwt::prelude::*;
use pwt::props::WidgetBuilder;
@@ -347,25 +347,13 @@ impl yew::Component for QemuOverviewPanelComp {
.with_child(
RRDGraph::new(self.time.clone())
.title(tr!("CPU Usage"))
- .render_value(|v: &f64| {
- if v.is_finite() {
- format!("{:.2}%", v * 100.0)
- } else {
- v.to_string()
- }
- })
+ .render_value(rrd_value_renderer::render_cpu_usage)
.serie0(Some(self.cpu.clone())),
)
.with_child(
RRDGraph::new(self.time.clone())
.title(tr!("Memory usage"))
- .render_value(|v: &f64| {
- if v.is_finite() {
- proxmox_human_byte::HumanByte::from(*v as u64).to_string()
- } else {
- v.to_string()
- }
- })
+ .render_value(rrd_value_renderer::render_bytes)
.serie0(Some(self.memory.clone()))
.serie1(Some(self.memory_max.clone())),
)
@@ -373,13 +361,7 @@ impl yew::Component for QemuOverviewPanelComp {
RRDGraph::new(self.time.clone())
.title(tr!("Network Traffic"))
.binary(true)
- .render_value(|v: &f64| {
- if v.is_finite() {
- proxmox_human_byte::HumanByte::from(*v as u64).to_string()
- } else {
- v.to_string()
- }
- })
+ .render_value(rrd_value_renderer::render_bandwidth)
.serie0(Some(self.netin.clone()))
.serie1(Some(self.netout.clone())),
)
@@ -387,13 +369,7 @@ impl yew::Component for QemuOverviewPanelComp {
RRDGraph::new(self.time.clone())
.title(tr!("Disk I/O"))
.binary(true)
- .render_value(|v: &f64| {
- if v.is_finite() {
- proxmox_human_byte::HumanByte::from(*v as u64).to_string()
- } else {
- v.to_string()
- }
- })
+ .render_value(rrd_value_renderer::render_bandwidth)
.serie0(Some(self.diskread.clone()))
.serie1(Some(self.diskwrite.clone())),
),
--
2.47.3
^ permalink raw reply related [flat|nested] 7+ messages in thread