From: Dominik Csapak <d.csapak@proxmox.com>
To: pdm-devel@lists.proxmox.com
Subject: [pdm-devel] [PATCH datacenter-manager 6/7] ui: pve: add storage to tree and show basic panel
Date: Mon, 8 Sep 2025 16:04:17 +0200 [thread overview]
Message-ID: <20250908140424.3376082-11-d.csapak@proxmox.com> (raw)
In-Reply-To: <20250908140424.3376082-1-d.csapak@proxmox.com>
similar to the panel we have in PVE already
storage and content type strings were copied from pve-managers Utils.js
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
---
ui/src/pve/mod.rs | 5 +
ui/src/pve/storage.rs | 360 ++++++++++++++++++++++++++++++++++++++++++
ui/src/pve/tree.rs | 37 ++++-
ui/src/pve/utils.rs | 41 ++++-
4 files changed, 440 insertions(+), 3 deletions(-)
create mode 100644 ui/src/pve/storage.rs
diff --git a/ui/src/pve/mod.rs b/ui/src/pve/mod.rs
index 496cbc6..7313d5e 100644
--- a/ui/src/pve/mod.rs
+++ b/ui/src/pve/mod.rs
@@ -26,6 +26,7 @@ pub mod lxc;
pub mod node;
pub mod qemu;
pub mod remote;
+pub mod storage;
pub mod utils;
mod tree;
@@ -180,6 +181,10 @@ impl LoadableComponent for PveRemoteComp {
PveTreeNode::Lxc(lxc) => {
lxc::LxcPanel::new(remote.clone(), lxc.node.clone(), lxc.clone()).into()
}
+ PveTreeNode::Storage(storage) => {
+ storage::StoragePanel::new(remote.clone(), storage.node.clone(), storage.clone())
+ .into()
+ }
};
let link = ctx.link();
diff --git a/ui/src/pve/storage.rs b/ui/src/pve/storage.rs
new file mode 100644
index 0000000..4602397
--- /dev/null
+++ b/ui/src/pve/storage.rs
@@ -0,0 +1,360 @@
+use core::f64;
+use std::rc::Rc;
+
+use gloo_timers::callback::Timeout;
+use yew::{
+ virtual_dom::{VComp, VNode},
+ Properties,
+};
+
+use proxmox_human_byte::HumanByte;
+use proxmox_yew_comp::{RRDGraph, RRDTimeframe, RRDTimeframeSelector, Series, Status};
+use pwt::{
+ css::{AlignItems, ColorScheme, FlexFit, JustifyContent},
+ prelude::*,
+ props::WidgetBuilder,
+ widget::{Column, Container, Fa, Panel, Progress, Row},
+ AsyncPool,
+};
+
+use pdm_api_types::{resource::PveStorageResource, rrddata::PveStorageDataPoint};
+use pdm_client::types::PveStorageStatus;
+
+use crate::{
+ pve::utils::{render_content_type, render_storage_type},
+ renderer::separator,
+};
+
+#[derive(Clone, Debug, Properties)]
+pub struct StoragePanel {
+ remote: String,
+ node: String,
+ info: PveStorageResource,
+
+ #[prop_or(60_000)]
+ /// The interval for refreshing the rrd data
+ pub rrd_interval: u32,
+
+ #[prop_or(10_000)]
+ /// The interval for refreshing the status data
+ pub status_interval: u32,
+}
+
+impl PartialEq for StoragePanel {
+ fn eq(&self, other: &Self) -> bool {
+ if self.remote == other.remote && self.node == other.node {
+ // only check some fields, so we don't update when e.g. only the cpu changes
+ self.info.storage == other.info.storage
+ && self.info.id == other.info.id
+ && self.info.node == other.node
+ } else {
+ false
+ }
+ }
+}
+impl Eq for StoragePanel {}
+
+impl StoragePanel {
+ pub fn new(remote: String, node: String, info: PveStorageResource) -> Self {
+ yew::props!(Self { remote, node, info })
+ }
+}
+
+impl Into<VNode> for StoragePanel {
+ fn into(self) -> VNode {
+ VComp::new::<StoragePanelComp>(Rc::new(self), None).into()
+ }
+}
+
+pub enum Msg {
+ ReloadStatus,
+ ReloadRrd,
+ StatusResult(Result<PveStorageStatus, proxmox_client::Error>),
+ RrdResult(Result<Vec<PveStorageDataPoint>, proxmox_client::Error>),
+ UpdateRrdTimeframe(RRDTimeframe),
+}
+
+pub struct StoragePanelComp {
+ status: Option<PveStorageStatus>,
+ last_status_error: Option<proxmox_client::Error>,
+ last_rrd_error: Option<proxmox_client::Error>,
+ _status_timeout: Option<Timeout>,
+ _rrd_timeout: Option<Timeout>,
+ _async_pool: AsyncPool,
+
+ rrd_time_frame: RRDTimeframe,
+
+ time: Rc<Vec<i64>>,
+ disk: Rc<Series>,
+ disk_max: Rc<Series>,
+}
+
+impl StoragePanelComp {
+ async fn reload_status(
+ remote: &str,
+ node: &str,
+ id: &str,
+ ) -> Result<PveStorageStatus, proxmox_client::Error> {
+ let status = crate::pdm_client()
+ .pve_storage_status(remote, node, id)
+ .await?;
+ Ok(status)
+ }
+
+ async fn reload_rrd(
+ remote: &str,
+ node: &str,
+ id: &str,
+ rrd_time_frame: RRDTimeframe,
+ ) -> Result<Vec<PveStorageDataPoint>, proxmox_client::Error> {
+ let rrd = crate::pdm_client()
+ .pve_storage_rrddata(
+ remote,
+ node,
+ id,
+ rrd_time_frame.mode,
+ rrd_time_frame.timeframe,
+ )
+ .await?;
+ Ok(rrd)
+ }
+}
+
+impl yew::Component for StoragePanelComp {
+ type Message = Msg;
+
+ type Properties = StoragePanel;
+
+ fn create(ctx: &yew::Context<Self>) -> Self {
+ ctx.link()
+ .send_message_batch(vec![Msg::ReloadStatus, Msg::ReloadRrd]);
+ Self {
+ status: None,
+ _status_timeout: None,
+ _rrd_timeout: None,
+ _async_pool: AsyncPool::new(),
+ last_rrd_error: None,
+ last_status_error: None,
+
+ rrd_time_frame: RRDTimeframe::load(),
+
+ time: Rc::new(Vec::new()),
+ disk: Rc::new(Series::new("", Vec::new())),
+ disk_max: Rc::new(Series::new("", Vec::new())),
+ }
+ }
+
+ fn update(&mut self, ctx: &Context<Self>, msg: Self::Message) -> bool {
+ let link = ctx.link().clone();
+ let props = ctx.props();
+ let remote = props.remote.clone();
+ let node = props.node.clone();
+ let id = props.info.storage.clone();
+ match msg {
+ Msg::ReloadStatus => {
+ self._async_pool.send_future(link, async move {
+ Msg::StatusResult(Self::reload_status(&remote, &node, &id).await)
+ });
+ false
+ }
+ Msg::ReloadRrd => {
+ let timeframe = self.rrd_time_frame;
+ self._async_pool.send_future(link, async move {
+ Msg::RrdResult(Self::reload_rrd(&remote, &node, &id, timeframe).await)
+ });
+ false
+ }
+ Msg::StatusResult(res) => {
+ match res {
+ Ok(status) => {
+ self.last_status_error = None;
+ self.status = Some(status);
+ }
+ Err(err) => {
+ self.last_status_error = Some(err);
+ }
+ }
+
+ self._status_timeout = Some(Timeout::new(props.status_interval, move || {
+ link.send_message(Msg::ReloadStatus)
+ }));
+ true
+ }
+ Msg::RrdResult(res) => {
+ match res {
+ Ok(rrd) => {
+ self.last_rrd_error = None;
+
+ let mut disk = Vec::new();
+ let mut disk_max = Vec::new();
+ let mut time = Vec::new();
+ for data in rrd {
+ disk.push(data.disk_used.unwrap_or(f64::NAN));
+ disk_max.push(data.disk_total.unwrap_or(f64::NAN));
+ time.push(data.time as i64);
+ }
+
+ self.disk = Rc::new(Series::new(tr!("Usage"), disk));
+ self.disk_max = Rc::new(Series::new(tr!("Total"), disk_max));
+ self.time = Rc::new(time);
+ }
+ Err(err) => self.last_rrd_error = Some(err),
+ }
+ self._status_timeout = Some(Timeout::new(props.rrd_interval, move || {
+ link.send_message(Msg::ReloadRrd)
+ }));
+ true
+ }
+ Msg::UpdateRrdTimeframe(rrd_time_frame) => {
+ self.rrd_time_frame = rrd_time_frame;
+ ctx.link().send_message(Msg::ReloadRrd);
+ false
+ }
+ }
+ }
+
+ fn changed(&mut self, ctx: &Context<Self>, old_props: &Self::Properties) -> bool {
+ let props = ctx.props();
+
+ if props.remote != old_props.remote || props.info != old_props.info {
+ self.status = None;
+ self.last_status_error = None;
+ self.last_rrd_error = None;
+
+ self.time = Rc::new(Vec::new());
+ self.disk = Rc::new(Series::new("", Vec::new()));
+ self.disk_max = Rc::new(Series::new("", Vec::new()));
+ self._async_pool = AsyncPool::new();
+ ctx.link()
+ .send_message_batch(vec![Msg::ReloadStatus, Msg::ReloadRrd]);
+ true
+ } else {
+ false
+ }
+ }
+
+ fn view(&self, ctx: &yew::Context<Self>) -> yew::Html {
+ let props = ctx.props();
+ let title: Html = Row::new()
+ .gap(2)
+ .class(AlignItems::Baseline)
+ .with_child(Fa::new("database"))
+ .with_child(tr! {"Storage '{0}'", props.info.storage})
+ .into();
+
+ let mut status_comp = Column::new().gap(2).padding(4);
+ let status = match &self.status {
+ Some(status) => status,
+ None => &PveStorageStatus {
+ active: None,
+ avail: Some(props.info.maxdisk as i64 - props.info.disk as i64),
+ content: vec![],
+ enabled: None,
+ shared: None,
+ total: Some(props.info.maxdisk as i64),
+ ty: String::new(),
+ used: Some(props.info.disk as i64),
+ },
+ };
+
+ status_comp = status_comp
+ .with_child(make_row(
+ tr!("Enabled"),
+ Fa::new(if status.enabled.unwrap_or_default() {
+ "toggle-on"
+ } else {
+ "toggle-off"
+ }),
+ String::new(),
+ ))
+ .with_child(make_row(
+ tr!("Active"),
+ Fa::from(if status.active.unwrap_or_default() {
+ Status::Success
+ } else {
+ Status::Error
+ }),
+ String::new(),
+ ))
+ .with_child(make_row(
+ tr!("Content"),
+ Fa::new("list"),
+ status
+ .content
+ .iter()
+ .map(|c| render_content_type(&c))
+ .collect::<Vec<_>>()
+ .join(", "),
+ ))
+ .with_child(make_row(
+ tr!("Type"),
+ Fa::new("database"),
+ render_storage_type(&status.ty),
+ ));
+
+ status_comp.add_child(Container::new().padding(1)); // spacer
+
+ let disk = status.used.unwrap_or_default();
+ let maxdisk = status.total.unwrap_or_default();
+ let disk_usage = disk as f64 / maxdisk as f64;
+ status_comp.add_child(crate::renderer::status_row(
+ tr!("Usage"),
+ Fa::new("database"),
+ tr!(
+ "{0}% ({1} of {2})",
+ format!("{:.2}", disk_usage * 100.0),
+ HumanByte::from(disk as u64),
+ HumanByte::from(maxdisk as u64),
+ ),
+ Some(disk_usage as f32),
+ false,
+ ));
+
+ let loading = self.status.is_none() && self.last_status_error.is_none();
+
+ Panel::new()
+ .class(FlexFit)
+ .title(title)
+ .class(ColorScheme::Neutral)
+ .with_child(
+ // FIXME: add some 'visible' or 'active' property to the progress
+ Progress::new()
+ .value((!loading).then_some(0.0))
+ .style("opacity", (!loading).then_some("0")),
+ )
+ .with_child(status_comp)
+ .with_child(separator().padding_x(4))
+ .with_child(
+ Row::new()
+ .padding_x(4)
+ .padding_y(1)
+ .class(JustifyContent::FlexEnd)
+ .with_child(
+ RRDTimeframeSelector::new()
+ .on_change(ctx.link().callback(Msg::UpdateRrdTimeframe)),
+ ),
+ )
+ .with_child(
+ Container::new().class(FlexFit).with_child(
+ Column::new().padding(4).gap(4).with_child(
+ RRDGraph::new(self.time.clone())
+ .title(tr!("Usage"))
+ .render_value(|v: &f64| {
+ if v.is_finite() {
+ proxmox_human_byte::HumanByte::from(*v as u64).to_string()
+ } else {
+ v.to_string()
+ }
+ })
+ .serie0(Some(self.disk.clone()))
+ .serie1(Some(self.disk_max.clone())),
+ ),
+ ),
+ )
+ .into()
+ }
+}
+
+fn make_row(title: String, icon: Fa, text: String) -> Column {
+ crate::renderer::status_row(title, icon, text, None, true)
+}
diff --git a/ui/src/pve/tree.rs b/ui/src/pve/tree.rs
index c4d1322..168e322 100644
--- a/ui/src/pve/tree.rs
+++ b/ui/src/pve/tree.rs
@@ -22,7 +22,7 @@ use pwt::widget::{
use pwt::{prelude::*, widget::Button};
use pdm_api_types::{
- resource::{PveLxcResource, PveNodeResource, PveQemuResource, PveResource},
+ resource::{PveLxcResource, PveNodeResource, PveQemuResource, PveResource, PveStorageResource},
RemoteUpid,
};
@@ -39,6 +39,7 @@ pub enum PveTreeNode {
Node(PveNodeResource),
Lxc(PveLxcResource),
Qemu(PveQemuResource),
+ Storage(PveStorageResource),
}
impl ExtractPrimaryKey for PveTreeNode {
@@ -48,6 +49,7 @@ impl ExtractPrimaryKey for PveTreeNode {
PveTreeNode::Node(node) => node.id.as_str(),
PveTreeNode::Lxc(lxc) => lxc.id.as_str(),
PveTreeNode::Qemu(qemu) => qemu.id.as_str(),
+ PveTreeNode::Storage(storage) => storage.id.as_str(),
})
}
}
@@ -59,6 +61,9 @@ impl PveTreeNode {
PveTreeNode::Node(node) => format!("node+{}", node.node),
PveTreeNode::Lxc(lxc) => format!("guest+{}", lxc.vmid),
PveTreeNode::Qemu(qemu) => format!("guest+{}", qemu.vmid),
+ PveTreeNode::Storage(storage) => {
+ format!("storage+{}+{}", storage.node, storage.storage)
+ }
}
}
}
@@ -181,7 +186,19 @@ impl PveTreeComp {
}
node.append(PveTreeNode::Lxc(lxc_info.clone()));
}
- _ => {} //PveResource::Storage(pve_storage_resource) => todo!(),
+ PveResource::Storage(storage) => {
+ let node_id = format!("remote/{}/node/{}", remote, storage.node);
+ let key = Key::from(node_id.as_str());
+ let mut node = match root.find_node_by_key_mut(&key) {
+ Some(node) => node,
+ None => root.append(create_empty_node(node_id)),
+ };
+
+ if !self.loaded {
+ node.set_expanded(true);
+ }
+ node.append(PveTreeNode::Storage(storage.clone()));
+ }
}
}
if !self.loaded {
@@ -212,6 +229,13 @@ impl PveTreeComp {
(PveTreeNode::Qemu(a), PveTreeNode::Qemu(b)) => {
cmp_guests(a.template, b.template, a.vmid, b.vmid)
}
+ (PveTreeNode::Lxc(_) | PveTreeNode::Qemu(_), PveTreeNode::Storage(_)) => {
+ std::cmp::Ordering::Less
+ }
+ (PveTreeNode::Storage(_), PveTreeNode::Lxc(_) | PveTreeNode::Qemu(_)) => {
+ std::cmp::Ordering::Greater
+ }
+ (PveTreeNode::Storage(a), PveTreeNode::Storage(b)) => a.id.cmp(&b.id),
});
let first_id = root
.children()
@@ -549,6 +573,9 @@ fn columns(
PveTreeNode::Lxc(r) => {
(utils::render_lxc_status_icon(r), render_lxc_name(r, true))
}
+ PveTreeNode::Storage(r) => {
+ (utils::render_storage_status_icon(r), r.storage.clone())
+ }
};
Row::new()
@@ -604,6 +631,12 @@ fn columns(
None,
Some(r.node.clone()),
),
+ PveTreeNode::Storage(r) => (
+ r.id.as_str(),
+ format!("storage/{}/{}", r.node, r.storage),
+ None,
+ Some(r.node.clone()),
+ ),
};
Row::new()
diff --git a/ui/src/pve/utils.rs b/ui/src/pve/utils.rs
index d0c8ccc..7663734 100644
--- a/ui/src/pve/utils.rs
+++ b/ui/src/pve/utils.rs
@@ -4,13 +4,14 @@ use pdm_api_types::resource::{
};
use pdm_client::types::{
LxcConfig, LxcConfigMp, LxcConfigRootfs, LxcConfigUnused, PveQmIde, QemuConfig, QemuConfigSata,
- QemuConfigScsi, QemuConfigUnused, QemuConfigVirtio,
+ QemuConfigScsi, QemuConfigUnused, QemuConfigVirtio, StorageContent,
};
use proxmox_schema::property_string::PropertyString;
use proxmox_yew_comp::{GuestState, NodeState, StorageState};
use pwt::{
css::Opacity,
props::{ContainerBuilder, WidgetBuilder, WidgetStyleBuilder},
+ tr,
widget::{Container, Fa, Row},
};
@@ -238,3 +239,41 @@ where
f(&key, res.map_err(Error::from));
}
}
+
+/// Renders the backend types of storages from PVE to a human understandable type
+pub(crate) fn render_storage_type(ty: &str) -> String {
+ if ty == "dir" {
+ return tr!("Directory");
+ }
+ String::from(match ty {
+ "lvm" => "LVM",
+ "lvmthin" => "LVM-Thin",
+ "btrfs" => "BTRFS",
+ "nfs" => "NFS",
+ "cifs" => "SMB/CIFS",
+ "iscsi" => "iSCSI",
+ "cephfs" => "CephFS",
+ "pvecephfs" => "CephFS (PVE)",
+ "rbd" => "RBD",
+ "pveceph" => "RBD (PVE)",
+ "zfs" => "ZFS over iSCSI",
+ "zfspool" => "ZFS",
+ "pbs" => "Proxmox Backup Server",
+ "esxi" => "ESXi",
+ _ => ty,
+ })
+}
+
+/// Renders the backend content type of PVE into a human understandable type
+pub(crate) fn render_content_type(ty: &StorageContent) -> String {
+ match ty {
+ StorageContent::Backup => tr!("Backup"),
+ StorageContent::Images => tr!("Disk Image"),
+ StorageContent::Import => tr!("Import"),
+ StorageContent::Iso => tr!("ISO image"),
+ StorageContent::Rootdir => tr!("Container"),
+ StorageContent::Snippets => tr!("Snippets"),
+ StorageContent::Vztmpl => tr!("Container template"),
+ StorageContent::None => tr!("None"),
+ }
+}
--
2.47.2
_______________________________________________
pdm-devel mailing list
pdm-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pdm-devel
next prev parent reply other threads:[~2025-09-08 14:05 UTC|newest]
Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-09-08 14:04 [pdm-devel] [PATCH datacenter-manager/proxmox-api-types/storage 00/11] add Dominik Csapak
2025-09-08 14:04 ` [pdm-devel] [PATCH storage 1/1] api: status: document return types Dominik Csapak
2025-09-08 14:44 ` [pdm-devel] applied: " Wolfgang Bumiller
2025-09-08 14:04 ` [pdm-devel] [PATCH proxmox-api-types 1/3] regenerate pve-api.json Dominik Csapak
2025-09-08 14:04 ` [pdm-devel] [PATCH proxmox-api-types 2/3] node: storage: add status api Dominik Csapak
2025-09-08 14:04 ` [pdm-devel] [PATCH proxmox-api-types 3/3] regenerate with new node storage " Dominik Csapak
2025-09-08 14:04 ` [pdm-devel] [PATCH datacenter-manager 1/7] pdm-api-types: add pve storage id schema Dominik Csapak
2025-09-08 14:04 ` [pdm-devel] [PATCH datacenter-manager 2/7] pdm-api-types: add PVE storage data point for RRD Dominik Csapak
2025-09-08 14:04 ` [pdm-devel] [PATCH datacenter-manager 3/7] server: api: add rrddata endpoint for pve storages Dominik Csapak
2025-09-08 14:04 ` [pdm-devel] [PATCH datacenter-manager 4/7] server: api: pve: add nodes/storage api for status and rrddata Dominik Csapak
2025-09-08 14:04 ` [pdm-devel] [PATCH datacenter-manager 5/7] pdm-client: add pve storage status/rrddata methods Dominik Csapak
2025-09-08 14:04 ` Dominik Csapak [this message]
2025-09-08 14:04 ` [pdm-devel] [PATCH datacenter-manager 7/7] ui: enable navigation to pve storage Dominik Csapak
2025-09-08 14:59 ` [pdm-devel] applied-series: [PATCH datacenter-manager/proxmox-api-types/storage 00/11] add Wolfgang Bumiller
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250908140424.3376082-11-d.csapak@proxmox.com \
--to=d.csapak@proxmox.com \
--cc=pdm-devel@lists.proxmox.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.