From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [IPv6:2a01:7e0:0:424::9]) by lore.proxmox.com (Postfix) with ESMTPS id 0976F1FF142 for ; Mon, 02 Mar 2026 14:44:42 +0100 (CET) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id E716D34049; Mon, 2 Mar 2026 14:45:42 +0100 (CET) From: Lukas Sichert To: pdm-devel@lists.proxmox.com Subject: [PATCH datacenter-manager 4/4] fix #7135: ui: correct calculations for shared storages Date: Mon, 2 Mar 2026 14:45:37 +0100 Message-ID: <20260302134537.108696-5-l.sichert@proxmox.com> X-Mailer: git-send-email 2.47.3 In-Reply-To: <20260302134537.108696-1-l.sichert@proxmox.com> References: <20260302134537.108696-1-l.sichert@proxmox.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-SPAM-LEVEL: Spam detection results: 0 AWL 0.498 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment KAM_LAZY_DOMAIN_SECURITY 1 Sending domain does not have any anti-forgery methods RDNS_NONE 0.793 Delivered to internal network by a host with no rDNS SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_NONE 0.001 SPF: sender does not publish an SPF Record Message-ID-Hash: GL2G63A6AJRABM6N7OHMBDHPRWSGSDBU X-Message-ID-Hash: GL2G63A6AJRABM6N7OHMBDHPRWSGSDBU X-MailFrom: lsichert@pve.proxmox.com X-Mailman-Rule-Misses: dmarc-mitigation; no-senders; approved; loop; banned-address; emergency; member-moderation; nonmember-moderation; administrivia; implicit-dest; max-recipients; max-size; news-moderation; no-subject; digests; suspicious-header CC: Lukas Sichert X-Mailman-Version: 3.3.10 Precedence: list List-Id: Proxmox Datacenter Manager development discussion List-Help: List-Owner: List-Post: List-Subscribe: List-Unsubscribe: Currently the storage calculation for a cluster in the datacenter-manager is incorrect when a shared storage is involved. Each node reports the shared storage separately, causing it to be counted multiple times in the total capacity. To be able to handle shared storages correctly, introduce a `shared` field to PveStorageResource and parse it from the API-Resonse. After that use the field and the storage name to only count the shared storage once. This also aligns pdm storage calculation with the one in the pve-frontend. Signed-off-by: Lukas Sichert --- cli/client/src/resources.rs | 1 + lib/pdm-api-types/src/resource.rs | 2 ++ server/src/api/resources.rs | 1 + server/src/views/tests.rs | 1 + ui/src/pve/remote_overview.rs | 9 +++++++-- 5 files changed, 12 insertions(+), 2 deletions(-) diff --git a/cli/client/src/resources.rs b/cli/client/src/resources.rs index 9b91a4b..74763bf 100644 --- a/cli/client/src/resources.rs +++ b/cli/client/src/resources.rs @@ -85,6 +85,7 @@ impl fmt::Display for PrintResource { disk, maxdisk, id: _, + shared: _, ref storage, ref node, ref status, diff --git a/lib/pdm-api-types/src/resource.rs b/lib/pdm-api-types/src/resource.rs index 93f8bd2..b68777c 100644 --- a/lib/pdm-api-types/src/resource.rs +++ b/lib/pdm-api-types/src/resource.rs @@ -355,6 +355,8 @@ pub struct PveStorageResource { pub node: String, /// Storage status pub status: String, + /// shared flag + pub shared: bool, } #[api] diff --git a/server/src/api/resources.rs b/server/src/api/resources.rs index 5d5fe2c..a966109 100644 --- a/server/src/api/resources.rs +++ b/server/src/api/resources.rs @@ -1135,6 +1135,7 @@ pub(super) fn map_pve_storage( storage: resource.storage.unwrap_or_default(), node: resource.node.unwrap_or_default(), status: resource.status.unwrap_or_default(), + shared: resource.shared.unwrap_or_default(), }), _ => None, } diff --git a/server/src/views/tests.rs b/server/src/views/tests.rs index 9f49620..b8c63b4 100644 --- a/server/src/views/tests.rs +++ b/server/src/views/tests.rs @@ -14,6 +14,7 @@ fn make_storage_resource(remote: &str, node: &str, storage_name: &str) -> Resour storage: storage_name.into(), node: node.into(), status: "available".into(), + shared: false, }) } diff --git a/ui/src/pve/remote_overview.rs b/ui/src/pve/remote_overview.rs index e452131..5b63539 100644 --- a/ui/src/pve/remote_overview.rs +++ b/ui/src/pve/remote_overview.rs @@ -1,3 +1,4 @@ +use std::collections::HashSet; use std::rc::Rc; use proxmox_yew_comp::{Status, StatusRow}; @@ -72,12 +73,16 @@ impl RemotePanelComp { let mut nodes = 0; let mut cpu_usage = 0.0; let mut level = None; + let mut seen_shared_storages: HashSet = HashSet::new(); for res in ctx.props().resources.iter() { match res { PveResource::Storage(store) => { - storage += store.disk; - max_storage += store.maxdisk; + let storage_name = store.storage.clone(); + if seen_shared_storages.insert(storage_name) { + storage += store.disk; + max_storage += store.maxdisk; + } } PveResource::Qemu(qemu) => { guests += 1; -- 2.47.3