public inbox for pdm-devel@lists.proxmox.com
 help / color / mirror / Atom feed
From: Dominik Csapak <d.csapak@proxmox.com>
To: pdm-devel@lists.proxmox.com
Subject: [PATCH datacenter-manager 1/4] api: return global cpu/memory/storage statistics
Date: Mon, 23 Mar 2026 12:03:38 +0100	[thread overview]
Message-ID: <20260323110728.1500528-2-d.csapak@proxmox.com> (raw)
In-Reply-To: <20260323110728.1500528-1-d.csapak@proxmox.com>

Global CPU/memory/storage usage (per remote type) is useful and
interesting from an administration POV. Calculate and return these so
we can use them on the dashboards.

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
---
 lib/pdm-api-types/src/lib.rs      |  2 +-
 lib/pdm-api-types/src/resource.rs | 27 +++++++++++++
 server/src/api/resources.rs       | 65 ++++++++++++++++++++++++-------
 3 files changed, 80 insertions(+), 14 deletions(-)

diff --git a/lib/pdm-api-types/src/lib.rs b/lib/pdm-api-types/src/lib.rs
index d4cc7ef0..9bccd50f 100644
--- a/lib/pdm-api-types/src/lib.rs
+++ b/lib/pdm-api-types/src/lib.rs
@@ -191,7 +191,7 @@ pub const PVE_STORAGE_ID_SCHEMA: Schema = StringSchema::new("Storage ID.")
 // Complex type definitions
 
 #[api()]
-#[derive(Default, Serialize, Deserialize)]
+#[derive(Default, Serialize, Deserialize, PartialEq, Clone)]
 /// Storage space usage information.
 pub struct StorageStatus {
     /// Total space (bytes).
diff --git a/lib/pdm-api-types/src/resource.rs b/lib/pdm-api-types/src/resource.rs
index d2db3b5a..1f74e09c 100644
--- a/lib/pdm-api-types/src/resource.rs
+++ b/lib/pdm-api-types/src/resource.rs
@@ -6,6 +6,8 @@ use serde::{Deserialize, Serialize};
 use proxmox_schema::{api, ApiStringFormat, ApiType, EnumEntry, OneOfSchema, Schema, StringSchema};
 
 use super::remotes::{RemoteType, REMOTE_ID_SCHEMA};
+use super::StorageStatus;
+
 use pve_api_types::ClusterResourceNetworkType;
 
 /// High PBS datastore usage threshold
@@ -666,6 +668,18 @@ pub struct SdnZoneCount {
     pub unknown: u64,
 }
 
+#[api]
+#[derive(Default, Serialize, Deserialize, Clone, PartialEq)]
+/// Statistics for CPU utilization
+pub struct CpuStatistics {
+    /// Amount of threads utilized
+    pub used: f64,
+    /// Amount of physically available cpu threads
+    pub max: f64,
+    /// Currently allocated cores of running guests (only on PVE)
+    pub allocated: Option<f64>,
+}
+
 #[api(
     properties: {
         "failed_remotes_list": {
@@ -697,6 +711,19 @@ pub struct ResourcesStatus {
     pub pbs_nodes: NodeStatusCount,
     /// Status of PBS Datastores
     pub pbs_datastores: PbsDatastoreStatusCount,
+    /// Combined CPU statistics for all PVE remotes
+    pub pve_cpu_stats: CpuStatistics,
+    /// Combined CPU statistics for all PBS remotes
+    pub pbs_cpu_stats: CpuStatistics,
+    /// Combined Memory statistics for all PVE remotes
+    pub pve_memory_stats: StorageStatus,
+    /// Combined Memory statistics for all PBS remotes
+    pub pbs_memory_stats: StorageStatus,
+    /// Combined Storage statistics for all PVE remotes (shared storages are only counted once per
+    /// remote).
+    pub pve_storage_stats: StorageStatus,
+    /// Combined Storage statistics for all PBS remotes
+    pub pbs_storage_stats: StorageStatus,
     /// List of the failed remotes including type and error
     #[serde(default, skip_serializing_if = "Vec::is_empty")]
     pub failed_remotes_list: Vec<FailedRemote>,
diff --git a/server/src/api/resources.rs b/server/src/api/resources.rs
index 5cb67bf5..04628a81 100644
--- a/server/src/api/resources.rs
+++ b/server/src/api/resources.rs
@@ -1,4 +1,4 @@
-use std::collections::HashMap;
+use std::collections::{HashMap, HashSet};
 use std::str::FromStr;
 use std::sync::{LazyLock, RwLock};
 
@@ -468,6 +468,7 @@ pub async fn get_status(
     let remotes_with_resources =
         get_resources_impl(max_age, None, None, view.as_deref(), Some(rpcenv)).await?;
     let mut counts = ResourcesStatus::default();
+    let mut pve_cpu_allocated = 0.0;
     for remote_with_resources in remotes_with_resources {
         if let Some(err) = remote_with_resources.error {
             counts.failed_remotes += 1;
@@ -479,29 +480,52 @@ pub async fn get_status(
         } else {
             counts.remotes += 1;
         }
+        let mut seen_storages = HashSet::new();
         for resource in remote_with_resources.resources {
             match resource {
-                Resource::PveStorage(r) => match r.status.as_str() {
-                    "available" => counts.storages.available += 1,
-                    _ => counts.storages.unknown += 1,
-                },
+                Resource::PveStorage(r) => {
+                    match r.status.as_str() {
+                        "available" => counts.storages.available += 1,
+                        _ => counts.storages.unknown += 1,
+                    }
+                    if !r.shared || !seen_storages.contains(&r.storage) {
+                        counts.pve_storage_stats.total += r.maxdisk;
+                        counts.pve_storage_stats.used += r.disk;
+                        counts.pve_storage_stats.avail += r.maxdisk - r.disk;
+                        seen_storages.insert(r.storage);
+                    }
+                }
                 Resource::PveQemu(r) => match r.status.as_str() {
                     _ if r.template => counts.qemu.template += 1,
-                    "running" => counts.qemu.running += 1,
+                    "running" => {
+                        counts.qemu.running += 1;
+                        pve_cpu_allocated += r.maxcpu;
+                    }
                     "stopped" => counts.qemu.stopped += 1,
                     _ => counts.qemu.unknown += 1,
                 },
                 Resource::PveLxc(r) => match r.status.as_str() {
                     _ if r.template => counts.lxc.template += 1,
-                    "running" => counts.lxc.running += 1,
+                    "running" => {
+                        counts.lxc.running += 1;
+                        pve_cpu_allocated += r.maxcpu;
+                    }
                     "stopped" => counts.lxc.stopped += 1,
                     _ => counts.lxc.unknown += 1,
                 },
-                Resource::PveNode(r) => match r.status.as_str() {
-                    "online" => counts.pve_nodes.online += 1,
-                    "offline" => counts.pve_nodes.offline += 1,
-                    _ => counts.pve_nodes.unknown += 1,
-                },
+                Resource::PveNode(r) => {
+                    match r.status.as_str() {
+                        "online" => counts.pve_nodes.online += 1,
+                        "offline" => counts.pve_nodes.offline += 1,
+                        _ => counts.pve_nodes.unknown += 1,
+                    }
+                    counts.pve_cpu_stats.used += r.cpu * r.maxcpu;
+                    counts.pve_cpu_stats.max += r.maxcpu;
+
+                    counts.pve_memory_stats.total += r.maxmem;
+                    counts.pve_memory_stats.used += r.mem;
+                    counts.pve_memory_stats.avail += r.maxmem - r.mem;
+                }
                 Resource::PveNetwork(r) => {
                     if let PveNetworkResource::Zone(zone) = r {
                         match zone.status() {
@@ -521,7 +545,16 @@ pub async fn get_status(
                     }
                 }
                 // FIXME better status for pbs/datastores
-                Resource::PbsNode(_) => counts.pbs_nodes.online += 1,
+                Resource::PbsNode(r) => {
+                    counts.pbs_nodes.online += 1;
+
+                    counts.pbs_cpu_stats.used += r.cpu * r.maxcpu;
+                    counts.pbs_cpu_stats.max += r.maxcpu;
+
+                    counts.pbs_memory_stats.total += r.maxmem;
+                    counts.pbs_memory_stats.used += r.mem;
+                    counts.pbs_memory_stats.avail += r.maxmem - r.mem;
+                }
                 Resource::PbsDatastore(r) => {
                     if r.maintenance.is_none() {
                         counts.pbs_datastores.online += 1;
@@ -546,11 +579,17 @@ pub async fn get_status(
                         }
                         _ => (),
                     }
+
+                    counts.pbs_storage_stats.total += r.maxdisk;
+                    counts.pbs_storage_stats.used += r.disk;
+                    counts.pbs_storage_stats.avail += r.maxdisk - r.disk;
                 }
             }
         }
     }
 
+    counts.pve_cpu_stats.allocated = Some(pve_cpu_allocated);
+
     Ok(counts)
 }
 
-- 
2.47.3





  reply	other threads:[~2026-03-23 11:07 UTC|newest]

Thread overview: 11+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-03-23 11:03 [PATCH datacenter-manager 0/4] add resource gauge panels to dashboard/views Dominik Csapak
2026-03-23 11:03 ` Dominik Csapak [this message]
2026-03-25 16:49   ` [PATCH datacenter-manager 1/4] api: return global cpu/memory/storage statistics Thomas Lamprecht
2026-03-26  7:59     ` Dominik Csapak
2026-03-23 11:03 ` [PATCH datacenter-manager 2/4] ui: css: use mask for svg icons Dominik Csapak
2026-03-23 11:03 ` [PATCH datacenter-manager 3/4] ui: dashboard: add new gauge panels widget type Dominik Csapak
2026-03-23 11:03 ` [PATCH datacenter-manager 4/4] ui: dashboard: add resource gauges to default dashboard Dominik Csapak
2026-03-24 10:25 ` [PATCH datacenter-manager 0/4] add resource gauge panels to dashboard/views Lukas Wagner
2026-03-25 11:48   ` Thomas Lamprecht
2026-03-25 13:12     ` Dominik Csapak
2026-03-30 13:16 ` superseded: " Dominik Csapak

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260323110728.1500528-2-d.csapak@proxmox.com \
    --to=d.csapak@proxmox.com \
    --cc=pdm-devel@lists.proxmox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox
Service provided by Proxmox Server Solutions GmbH | Privacy | Legal