From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [212.224.123.68]) by lore.proxmox.com (Postfix) with ESMTPS id A83BD1FF187 for ; Mon, 8 Sep 2025 16:05:21 +0200 (CEST) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id 633BD13C85; Mon, 8 Sep 2025 16:05:24 +0200 (CEST) From: Dominik Csapak To: pdm-devel@lists.proxmox.com Date: Mon, 8 Sep 2025 16:04:17 +0200 Message-ID: <20250908140424.3376082-11-d.csapak@proxmox.com> X-Mailer: git-send-email 2.47.2 In-Reply-To: <20250908140424.3376082-1-d.csapak@proxmox.com> References: <20250908140424.3376082-1-d.csapak@proxmox.com> MIME-Version: 1.0 X-SPAM-LEVEL: Spam detection results: 0 AWL -0.127 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment POISEN_SPAM_PILL 0.1 Meta: its spam POISEN_SPAM_PILL_2 0.1 random spam to be learned in bayes POISEN_SPAM_PILL_4 0.1 random spam to be learned in bayes SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record Subject: [pdm-devel] [PATCH datacenter-manager 6/7] ui: pve: add storage to tree and show basic panel X-BeenThere: pdm-devel@lists.proxmox.com X-Mailman-Version: 2.1.29 Precedence: list List-Id: Proxmox Datacenter Manager development discussion List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: Proxmox Datacenter Manager development discussion Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Errors-To: pdm-devel-bounces@lists.proxmox.com Sender: "pdm-devel" similar to the panel we have in PVE already storage and content type strings were copied from pve-managers Utils.js Signed-off-by: Dominik Csapak --- ui/src/pve/mod.rs | 5 + ui/src/pve/storage.rs | 360 ++++++++++++++++++++++++++++++++++++++++++ ui/src/pve/tree.rs | 37 ++++- ui/src/pve/utils.rs | 41 ++++- 4 files changed, 440 insertions(+), 3 deletions(-) create mode 100644 ui/src/pve/storage.rs diff --git a/ui/src/pve/mod.rs b/ui/src/pve/mod.rs index 496cbc6..7313d5e 100644 --- a/ui/src/pve/mod.rs +++ b/ui/src/pve/mod.rs @@ -26,6 +26,7 @@ pub mod lxc; pub mod node; pub mod qemu; pub mod remote; +pub mod storage; pub mod utils; mod tree; @@ -180,6 +181,10 @@ impl LoadableComponent for PveRemoteComp { PveTreeNode::Lxc(lxc) => { lxc::LxcPanel::new(remote.clone(), lxc.node.clone(), lxc.clone()).into() } + PveTreeNode::Storage(storage) => { + storage::StoragePanel::new(remote.clone(), storage.node.clone(), storage.clone()) + .into() + } }; let link = ctx.link(); diff --git a/ui/src/pve/storage.rs b/ui/src/pve/storage.rs new file mode 100644 index 0000000..4602397 --- /dev/null +++ b/ui/src/pve/storage.rs @@ -0,0 +1,360 @@ +use core::f64; +use std::rc::Rc; + +use gloo_timers::callback::Timeout; +use yew::{ + virtual_dom::{VComp, VNode}, + Properties, +}; + +use proxmox_human_byte::HumanByte; +use proxmox_yew_comp::{RRDGraph, RRDTimeframe, RRDTimeframeSelector, Series, Status}; +use pwt::{ + css::{AlignItems, ColorScheme, FlexFit, JustifyContent}, + prelude::*, + props::WidgetBuilder, + widget::{Column, Container, Fa, Panel, Progress, Row}, + AsyncPool, +}; + +use pdm_api_types::{resource::PveStorageResource, rrddata::PveStorageDataPoint}; +use pdm_client::types::PveStorageStatus; + +use crate::{ + pve::utils::{render_content_type, render_storage_type}, + renderer::separator, +}; + +#[derive(Clone, Debug, Properties)] +pub struct StoragePanel { + remote: String, + node: String, + info: PveStorageResource, + + #[prop_or(60_000)] + /// The interval for refreshing the rrd data + pub rrd_interval: u32, + + #[prop_or(10_000)] + /// The interval for refreshing the status data + pub status_interval: u32, +} + +impl PartialEq for StoragePanel { + fn eq(&self, other: &Self) -> bool { + if self.remote == other.remote && self.node == other.node { + // only check some fields, so we don't update when e.g. only the cpu changes + self.info.storage == other.info.storage + && self.info.id == other.info.id + && self.info.node == other.node + } else { + false + } + } +} +impl Eq for StoragePanel {} + +impl StoragePanel { + pub fn new(remote: String, node: String, info: PveStorageResource) -> Self { + yew::props!(Self { remote, node, info }) + } +} + +impl Into for StoragePanel { + fn into(self) -> VNode { + VComp::new::(Rc::new(self), None).into() + } +} + +pub enum Msg { + ReloadStatus, + ReloadRrd, + StatusResult(Result), + RrdResult(Result, proxmox_client::Error>), + UpdateRrdTimeframe(RRDTimeframe), +} + +pub struct StoragePanelComp { + status: Option, + last_status_error: Option, + last_rrd_error: Option, + _status_timeout: Option, + _rrd_timeout: Option, + _async_pool: AsyncPool, + + rrd_time_frame: RRDTimeframe, + + time: Rc>, + disk: Rc, + disk_max: Rc, +} + +impl StoragePanelComp { + async fn reload_status( + remote: &str, + node: &str, + id: &str, + ) -> Result { + let status = crate::pdm_client() + .pve_storage_status(remote, node, id) + .await?; + Ok(status) + } + + async fn reload_rrd( + remote: &str, + node: &str, + id: &str, + rrd_time_frame: RRDTimeframe, + ) -> Result, proxmox_client::Error> { + let rrd = crate::pdm_client() + .pve_storage_rrddata( + remote, + node, + id, + rrd_time_frame.mode, + rrd_time_frame.timeframe, + ) + .await?; + Ok(rrd) + } +} + +impl yew::Component for StoragePanelComp { + type Message = Msg; + + type Properties = StoragePanel; + + fn create(ctx: &yew::Context) -> Self { + ctx.link() + .send_message_batch(vec![Msg::ReloadStatus, Msg::ReloadRrd]); + Self { + status: None, + _status_timeout: None, + _rrd_timeout: None, + _async_pool: AsyncPool::new(), + last_rrd_error: None, + last_status_error: None, + + rrd_time_frame: RRDTimeframe::load(), + + time: Rc::new(Vec::new()), + disk: Rc::new(Series::new("", Vec::new())), + disk_max: Rc::new(Series::new("", Vec::new())), + } + } + + fn update(&mut self, ctx: &Context, msg: Self::Message) -> bool { + let link = ctx.link().clone(); + let props = ctx.props(); + let remote = props.remote.clone(); + let node = props.node.clone(); + let id = props.info.storage.clone(); + match msg { + Msg::ReloadStatus => { + self._async_pool.send_future(link, async move { + Msg::StatusResult(Self::reload_status(&remote, &node, &id).await) + }); + false + } + Msg::ReloadRrd => { + let timeframe = self.rrd_time_frame; + self._async_pool.send_future(link, async move { + Msg::RrdResult(Self::reload_rrd(&remote, &node, &id, timeframe).await) + }); + false + } + Msg::StatusResult(res) => { + match res { + Ok(status) => { + self.last_status_error = None; + self.status = Some(status); + } + Err(err) => { + self.last_status_error = Some(err); + } + } + + self._status_timeout = Some(Timeout::new(props.status_interval, move || { + link.send_message(Msg::ReloadStatus) + })); + true + } + Msg::RrdResult(res) => { + match res { + Ok(rrd) => { + self.last_rrd_error = None; + + let mut disk = Vec::new(); + let mut disk_max = Vec::new(); + let mut time = Vec::new(); + for data in rrd { + disk.push(data.disk_used.unwrap_or(f64::NAN)); + disk_max.push(data.disk_total.unwrap_or(f64::NAN)); + time.push(data.time as i64); + } + + self.disk = Rc::new(Series::new(tr!("Usage"), disk)); + self.disk_max = Rc::new(Series::new(tr!("Total"), disk_max)); + self.time = Rc::new(time); + } + Err(err) => self.last_rrd_error = Some(err), + } + self._status_timeout = Some(Timeout::new(props.rrd_interval, move || { + link.send_message(Msg::ReloadRrd) + })); + true + } + Msg::UpdateRrdTimeframe(rrd_time_frame) => { + self.rrd_time_frame = rrd_time_frame; + ctx.link().send_message(Msg::ReloadRrd); + false + } + } + } + + fn changed(&mut self, ctx: &Context, old_props: &Self::Properties) -> bool { + let props = ctx.props(); + + if props.remote != old_props.remote || props.info != old_props.info { + self.status = None; + self.last_status_error = None; + self.last_rrd_error = None; + + self.time = Rc::new(Vec::new()); + self.disk = Rc::new(Series::new("", Vec::new())); + self.disk_max = Rc::new(Series::new("", Vec::new())); + self._async_pool = AsyncPool::new(); + ctx.link() + .send_message_batch(vec![Msg::ReloadStatus, Msg::ReloadRrd]); + true + } else { + false + } + } + + fn view(&self, ctx: &yew::Context) -> yew::Html { + let props = ctx.props(); + let title: Html = Row::new() + .gap(2) + .class(AlignItems::Baseline) + .with_child(Fa::new("database")) + .with_child(tr! {"Storage '{0}'", props.info.storage}) + .into(); + + let mut status_comp = Column::new().gap(2).padding(4); + let status = match &self.status { + Some(status) => status, + None => &PveStorageStatus { + active: None, + avail: Some(props.info.maxdisk as i64 - props.info.disk as i64), + content: vec![], + enabled: None, + shared: None, + total: Some(props.info.maxdisk as i64), + ty: String::new(), + used: Some(props.info.disk as i64), + }, + }; + + status_comp = status_comp + .with_child(make_row( + tr!("Enabled"), + Fa::new(if status.enabled.unwrap_or_default() { + "toggle-on" + } else { + "toggle-off" + }), + String::new(), + )) + .with_child(make_row( + tr!("Active"), + Fa::from(if status.active.unwrap_or_default() { + Status::Success + } else { + Status::Error + }), + String::new(), + )) + .with_child(make_row( + tr!("Content"), + Fa::new("list"), + status + .content + .iter() + .map(|c| render_content_type(&c)) + .collect::>() + .join(", "), + )) + .with_child(make_row( + tr!("Type"), + Fa::new("database"), + render_storage_type(&status.ty), + )); + + status_comp.add_child(Container::new().padding(1)); // spacer + + let disk = status.used.unwrap_or_default(); + let maxdisk = status.total.unwrap_or_default(); + let disk_usage = disk as f64 / maxdisk as f64; + status_comp.add_child(crate::renderer::status_row( + tr!("Usage"), + Fa::new("database"), + tr!( + "{0}% ({1} of {2})", + format!("{:.2}", disk_usage * 100.0), + HumanByte::from(disk as u64), + HumanByte::from(maxdisk as u64), + ), + Some(disk_usage as f32), + false, + )); + + let loading = self.status.is_none() && self.last_status_error.is_none(); + + Panel::new() + .class(FlexFit) + .title(title) + .class(ColorScheme::Neutral) + .with_child( + // FIXME: add some 'visible' or 'active' property to the progress + Progress::new() + .value((!loading).then_some(0.0)) + .style("opacity", (!loading).then_some("0")), + ) + .with_child(status_comp) + .with_child(separator().padding_x(4)) + .with_child( + Row::new() + .padding_x(4) + .padding_y(1) + .class(JustifyContent::FlexEnd) + .with_child( + RRDTimeframeSelector::new() + .on_change(ctx.link().callback(Msg::UpdateRrdTimeframe)), + ), + ) + .with_child( + Container::new().class(FlexFit).with_child( + Column::new().padding(4).gap(4).with_child( + RRDGraph::new(self.time.clone()) + .title(tr!("Usage")) + .render_value(|v: &f64| { + if v.is_finite() { + proxmox_human_byte::HumanByte::from(*v as u64).to_string() + } else { + v.to_string() + } + }) + .serie0(Some(self.disk.clone())) + .serie1(Some(self.disk_max.clone())), + ), + ), + ) + .into() + } +} + +fn make_row(title: String, icon: Fa, text: String) -> Column { + crate::renderer::status_row(title, icon, text, None, true) +} diff --git a/ui/src/pve/tree.rs b/ui/src/pve/tree.rs index c4d1322..168e322 100644 --- a/ui/src/pve/tree.rs +++ b/ui/src/pve/tree.rs @@ -22,7 +22,7 @@ use pwt::widget::{ use pwt::{prelude::*, widget::Button}; use pdm_api_types::{ - resource::{PveLxcResource, PveNodeResource, PveQemuResource, PveResource}, + resource::{PveLxcResource, PveNodeResource, PveQemuResource, PveResource, PveStorageResource}, RemoteUpid, }; @@ -39,6 +39,7 @@ pub enum PveTreeNode { Node(PveNodeResource), Lxc(PveLxcResource), Qemu(PveQemuResource), + Storage(PveStorageResource), } impl ExtractPrimaryKey for PveTreeNode { @@ -48,6 +49,7 @@ impl ExtractPrimaryKey for PveTreeNode { PveTreeNode::Node(node) => node.id.as_str(), PveTreeNode::Lxc(lxc) => lxc.id.as_str(), PveTreeNode::Qemu(qemu) => qemu.id.as_str(), + PveTreeNode::Storage(storage) => storage.id.as_str(), }) } } @@ -59,6 +61,9 @@ impl PveTreeNode { PveTreeNode::Node(node) => format!("node+{}", node.node), PveTreeNode::Lxc(lxc) => format!("guest+{}", lxc.vmid), PveTreeNode::Qemu(qemu) => format!("guest+{}", qemu.vmid), + PveTreeNode::Storage(storage) => { + format!("storage+{}+{}", storage.node, storage.storage) + } } } } @@ -181,7 +186,19 @@ impl PveTreeComp { } node.append(PveTreeNode::Lxc(lxc_info.clone())); } - _ => {} //PveResource::Storage(pve_storage_resource) => todo!(), + PveResource::Storage(storage) => { + let node_id = format!("remote/{}/node/{}", remote, storage.node); + let key = Key::from(node_id.as_str()); + let mut node = match root.find_node_by_key_mut(&key) { + Some(node) => node, + None => root.append(create_empty_node(node_id)), + }; + + if !self.loaded { + node.set_expanded(true); + } + node.append(PveTreeNode::Storage(storage.clone())); + } } } if !self.loaded { @@ -212,6 +229,13 @@ impl PveTreeComp { (PveTreeNode::Qemu(a), PveTreeNode::Qemu(b)) => { cmp_guests(a.template, b.template, a.vmid, b.vmid) } + (PveTreeNode::Lxc(_) | PveTreeNode::Qemu(_), PveTreeNode::Storage(_)) => { + std::cmp::Ordering::Less + } + (PveTreeNode::Storage(_), PveTreeNode::Lxc(_) | PveTreeNode::Qemu(_)) => { + std::cmp::Ordering::Greater + } + (PveTreeNode::Storage(a), PveTreeNode::Storage(b)) => a.id.cmp(&b.id), }); let first_id = root .children() @@ -549,6 +573,9 @@ fn columns( PveTreeNode::Lxc(r) => { (utils::render_lxc_status_icon(r), render_lxc_name(r, true)) } + PveTreeNode::Storage(r) => { + (utils::render_storage_status_icon(r), r.storage.clone()) + } }; Row::new() @@ -604,6 +631,12 @@ fn columns( None, Some(r.node.clone()), ), + PveTreeNode::Storage(r) => ( + r.id.as_str(), + format!("storage/{}/{}", r.node, r.storage), + None, + Some(r.node.clone()), + ), }; Row::new() diff --git a/ui/src/pve/utils.rs b/ui/src/pve/utils.rs index d0c8ccc..7663734 100644 --- a/ui/src/pve/utils.rs +++ b/ui/src/pve/utils.rs @@ -4,13 +4,14 @@ use pdm_api_types::resource::{ }; use pdm_client::types::{ LxcConfig, LxcConfigMp, LxcConfigRootfs, LxcConfigUnused, PveQmIde, QemuConfig, QemuConfigSata, - QemuConfigScsi, QemuConfigUnused, QemuConfigVirtio, + QemuConfigScsi, QemuConfigUnused, QemuConfigVirtio, StorageContent, }; use proxmox_schema::property_string::PropertyString; use proxmox_yew_comp::{GuestState, NodeState, StorageState}; use pwt::{ css::Opacity, props::{ContainerBuilder, WidgetBuilder, WidgetStyleBuilder}, + tr, widget::{Container, Fa, Row}, }; @@ -238,3 +239,41 @@ where f(&key, res.map_err(Error::from)); } } + +/// Renders the backend types of storages from PVE to a human understandable type +pub(crate) fn render_storage_type(ty: &str) -> String { + if ty == "dir" { + return tr!("Directory"); + } + String::from(match ty { + "lvm" => "LVM", + "lvmthin" => "LVM-Thin", + "btrfs" => "BTRFS", + "nfs" => "NFS", + "cifs" => "SMB/CIFS", + "iscsi" => "iSCSI", + "cephfs" => "CephFS", + "pvecephfs" => "CephFS (PVE)", + "rbd" => "RBD", + "pveceph" => "RBD (PVE)", + "zfs" => "ZFS over iSCSI", + "zfspool" => "ZFS", + "pbs" => "Proxmox Backup Server", + "esxi" => "ESXi", + _ => ty, + }) +} + +/// Renders the backend content type of PVE into a human understandable type +pub(crate) fn render_content_type(ty: &StorageContent) -> String { + match ty { + StorageContent::Backup => tr!("Backup"), + StorageContent::Images => tr!("Disk Image"), + StorageContent::Import => tr!("Import"), + StorageContent::Iso => tr!("ISO image"), + StorageContent::Rootdir => tr!("Container"), + StorageContent::Snippets => tr!("Snippets"), + StorageContent::Vztmpl => tr!("Container template"), + StorageContent::None => tr!("None"), + } +} -- 2.47.2 _______________________________________________ pdm-devel mailing list pdm-devel@lists.proxmox.com https://lists.proxmox.com/cgi-bin/mailman/listinfo/pdm-devel