public inbox for pdm-devel@lists.proxmox.com
 help / color / mirror / Atom feed
* [pdm-devel] [PATCH datacenter-manager] server: fake remote: make the data a bit more dynamic and realistic
@ 2025-05-15  7:12 Dominik Csapak
  0 siblings, 0 replies; only message in thread
From: Dominik Csapak @ 2025-05-15  7:12 UTC (permalink / raw)
  To: pdm-devel

* fixes a "wrong" value for maxdisk (was 100 MiB while disk was 42 GiB)
* factoring out the (max)mem/disk values to constants (so we can change
  them at once if we want to)
* make the cpu value dynamic with a very basic sinus curve
* make the rrd data also a bit mor dynamic and realistic
  all values were '10.0' but that did not looked well
  e.g. cpu usage was always 1000%,
  change these values to something more dynamic and/or realistic
* use different worker types, determined by the the starttime

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
---
note that this patch is based on Lukas last remote task series that includes
patches for the fake remotes.

0: https://lore.proxmox.com/pdm-devel/20250512114144.118545-1-l.wagner@proxmox.com/

 server/src/test_support/fake_remote.rs | 137 ++++++++++++++-----------
 1 file changed, 79 insertions(+), 58 deletions(-)

diff --git a/server/src/test_support/fake_remote.rs b/server/src/test_support/fake_remote.rs
index 27ba7c5..a571fb7 100644
--- a/server/src/test_support/fake_remote.rs
+++ b/server/src/test_support/fake_remote.rs
@@ -29,6 +29,12 @@ pub struct FakeRemoteConfig {
     pub api_delay: u32,
 }
 
+// generate a pseudo random looking values, stretched so that we can see
+// the period in a graph that is averaged over a minute
+fn pseudo_random_sin(x: f64) -> f64 {
+    ((x / 600.0).sin() * ((5.0 * x) / 600.0).sin() + 1.0) / 2.0
+}
+
 impl RemoteConfig for FakeRemoteConfig {
     fn config(&self) -> Result<(SectionConfigData<Remote>, ConfigDigest), Error> {
         let mut section_config = SectionConfigData::default();
@@ -118,6 +124,13 @@ struct FakePveClient {
     api_delay_ms: u32,
 }
 
+const MAX_MEM: i64 = 8 * 1024 * 1024 * 1024;
+const MEM: u64 = 3 * 1024 * 1024 * 1024;
+const DISK: u64 = 42 * 1024 * 1024 * 1024;
+const MAX_DISK: u64 = 80 * 1024 * 1024 * 1024;
+
+const WORKER_TYPES: &[&str] = &["stopall", "startall", "vzdump", "aptupdate", "vncshell"];
+
 #[async_trait::async_trait]
 impl pve_api_types::client::PveClient for FakePveClient {
     async fn cluster_resources(
@@ -128,22 +141,25 @@ impl pve_api_types::client::PveClient for FakePveClient {
 
         let mut vmid = 100;
 
+        let now = proxmox_time::epoch_i64();
+        let cpu = pseudo_random_sin(now as f64);
+
         for _ in 0..self.nr_of_vms {
             vmid += 1;
             result.push(ClusterResource {
                 cgroup_mode: None,
                 content: None,
-                cpu: Some(0.1),
+                cpu: Some(cpu),
                 diskread: Some(1034),
                 diskwrite: Some(1034),
-                disk: Some(42 * 1024 * 1024 * 1024),
+                disk: Some(DISK),
                 hastate: None,
                 id: format!("qemu/{vmid}"),
                 level: Some("".into()),
                 maxcpu: Some(4.),
-                maxdisk: Some(100 * 1024 * 1024),
-                maxmem: Some(8 * 1024 * 1024 * 1024),
-                mem: Some(3 * 1024 * 1024 * 1024),
+                maxdisk: Some(MAX_DISK),
+                maxmem: Some(MAX_MEM),
+                mem: Some(MEM),
                 name: Some(format!("vm-{vmid}")),
                 netin: Some(1034),
                 netout: Some(1034),
@@ -166,17 +182,17 @@ impl pve_api_types::client::PveClient for FakePveClient {
             result.push(ClusterResource {
                 cgroup_mode: None,
                 content: None,
-                cpu: Some(0.1),
-                disk: Some(42 * 1024 * 1024 * 1024),
+                cpu: Some(cpu),
+                disk: Some(DISK),
                 diskread: Some(1034),
                 diskwrite: Some(1034),
                 hastate: None,
                 id: format!("lxc/{vmid}"),
                 level: Some("".into()),
                 maxcpu: Some(4.),
-                maxdisk: Some(100 * 1024 * 1024),
-                maxmem: Some(8 * 1024 * 1024 * 1024),
-                mem: Some(3 * 1024 * 1024 * 1024),
+                maxdisk: Some(MAX_DISK),
+                maxmem: Some(MAX_MEM),
+                mem: Some(MEM),
                 name: Some(format!("ct-{vmid}")),
                 netin: Some(1034),
                 netout: Some(1034),
@@ -198,17 +214,17 @@ impl pve_api_types::client::PveClient for FakePveClient {
             result.push(ClusterResource {
                 cgroup_mode: None,
                 content: None,
-                cpu: Some(0.1),
-                disk: Some(42 * 1024 * 1024 * 1024),
+                cpu: Some(cpu),
+                disk: Some(DISK),
                 diskread: None,
                 diskwrite: None,
                 hastate: None,
                 id: format!("node/node-{i}"),
                 level: Some("".into()),
                 maxcpu: Some(16.),
-                maxdisk: Some(100 * 1024 * 1024),
-                maxmem: Some(8 * 1024 * 1024 * 1024),
-                mem: Some(3 * 1024 * 1024 * 1024),
+                maxdisk: Some(MAX_DISK),
+                maxmem: Some(MAX_MEM),
+                mem: Some(MEM),
                 name: None,
                 netin: None,
                 netout: None,
@@ -231,14 +247,14 @@ impl pve_api_types::client::PveClient for FakePveClient {
                 cgroup_mode: None,
                 content: Some(vec![StorageContent::Images, StorageContent::Rootdir]),
                 cpu: None,
-                disk: Some(42 * 1024 * 1024 * 1024),
+                disk: Some(DISK),
                 diskread: None,
                 diskwrite: None,
                 hastate: None,
                 id: format!("storage/node-0/storage-{i}"),
                 level: None,
                 maxcpu: None,
-                maxdisk: Some(100 * 1024 * 1024),
+                maxdisk: Some(MAX_DISK),
                 maxmem: None,
                 mem: None,
                 name: None,
@@ -280,68 +296,72 @@ impl pve_api_types::client::PveClient for FakePveClient {
         use pve_api_types::ClusterMetricsDataType::*;
 
         while time < now {
-            let point = |id: &str, metric: &str, timestamp, ty| ClusterMetricsData {
+            let point = |id: &str, metric: &str, timestamp, ty, value| ClusterMetricsData {
                 id: id.into(),
                 metric: metric.into(),
                 timestamp,
                 ty,
-                // TODO: Generate random data?
-                value: 10.0,
+                value,
             };
 
+            let random = pseudo_random_sin(time as f64);
+            let uptime = (time - 1445378400) as f64; // up since 2015-10-21
+            let net = uptime * 1_000.0; // 100.0 bytes/s
+            let mem = random * 2_000_000_000.0; // between 0 and 2 GB
+
             for i in 0..self.nr_of_nodes {
                 let id = format!("node/node-{i}");
-                data.push(point(&id, "uptime", time, Gauge));
-                data.push(point(&id, "net_in", time, Derive));
-                data.push(point(&id, "net_out", time, Derive));
-                data.push(point(&id, "cpu_avg1", time, Gauge));
-                data.push(point(&id, "cpu_avg5", time, Gauge));
-                data.push(point(&id, "cpu_avg15", time, Gauge));
-                data.push(point(&id, "cpu_max", time, Gauge));
-                data.push(point(&id, "cpu_current", time, Gauge));
-                data.push(point(&id, "cpu_iowait", time, Gauge));
-                data.push(point(&id, "mem_used", time, Gauge));
-                data.push(point(&id, "mem_total", time, Gauge));
-                data.push(point(&id, "swap_total", time, Gauge));
-                data.push(point(&id, "swap_used", time, Gauge));
-                data.push(point(&id, "disk_total", time, Gauge));
-                data.push(point(&id, "disk_used", time, Gauge));
+                data.push(point(&id, "uptime", time, Gauge, uptime));
+                data.push(point(&id, "net_in", time, Derive, net));
+                data.push(point(&id, "net_out", time, Derive, net));
+                data.push(point(&id, "cpu_avg1", time, Gauge, random));
+                data.push(point(&id, "cpu_avg5", time, Gauge, random));
+                data.push(point(&id, "cpu_avg15", time, Gauge, random));
+                data.push(point(&id, "cpu_max", time, Gauge, 16.0));
+                data.push(point(&id, "cpu_current", time, Gauge, random));
+                data.push(point(&id, "cpu_iowait", time, Gauge, random));
+                data.push(point(&id, "mem_used", time, Gauge, mem));
+                data.push(point(&id, "mem_total", time, Gauge, MAX_MEM as f64));
+                data.push(point(&id, "swap_total", time, Gauge, 1000.0));
+                data.push(point(&id, "swap_used", time, Gauge, 500.0));
+                data.push(point(&id, "disk_total", time, Gauge, MAX_DISK as f64));
+                data.push(point(&id, "disk_used", time, Gauge, DISK as f64));
             }
 
             for i in 0..self.nr_of_vms {
                 let vmid = 100 + i;
                 let id = format!("qemu/{vmid}");
-                data.push(point(&id, "uptime", time, Gauge));
-                data.push(point(&id, "net_in", time, Derive));
-                data.push(point(&id, "net_out", time, Derive));
-                data.push(point(&id, "disk_read", time, Derive));
-                data.push(point(&id, "disk_write", time, Derive));
-                data.push(point(&id, "cpu_max", time, Gauge));
-                data.push(point(&id, "cpu_current", time, Gauge));
-                data.push(point(&id, "mem_used", time, Gauge));
-                data.push(point(&id, "mem_total", time, Gauge));
-                data.push(point(&id, "disk_total", time, Gauge));
+                data.push(point(&id, "uptime", time, Gauge, uptime));
+                data.push(point(&id, "net_in", time, Derive, net));
+                data.push(point(&id, "net_out", time, Derive, net));
+                data.push(point(&id, "disk_read", time, Derive, net));
+                data.push(point(&id, "disk_write", time, Derive, net));
+                data.push(point(&id, "cpu_max", time, Gauge, 4.0));
+                data.push(point(&id, "cpu_current", time, Gauge, random));
+                data.push(point(&id, "mem_used", time, Gauge, mem));
+                data.push(point(&id, "mem_total", time, Gauge, MAX_MEM as f64));
+                data.push(point(&id, "disk_total", time, Gauge, MAX_DISK as f64));
             }
 
             for i in 0..self.nr_of_cts {
                 let vmid = 100 + self.nr_of_vms + i;
                 let id = format!("lxc/{vmid}");
-                data.push(point(&id, "uptime", time, Gauge));
-                data.push(point(&id, "net_in", time, Derive));
-                data.push(point(&id, "net_out", time, Derive));
-                data.push(point(&id, "disk_read", time, Derive));
-                data.push(point(&id, "disk_write", time, Derive));
-                data.push(point(&id, "cpu_max", time, Gauge));
-                data.push(point(&id, "cpu_current", time, Gauge));
-                data.push(point(&id, "mem_used", time, Gauge));
-                data.push(point(&id, "mem_total", time, Gauge));
-                data.push(point(&id, "disk_total", time, Gauge));
+                data.push(point(&id, "uptime", time, Gauge, uptime));
+                data.push(point(&id, "net_in", time, Derive, net));
+                data.push(point(&id, "net_out", time, Derive, net));
+                data.push(point(&id, "disk_read", time, Derive, net));
+                data.push(point(&id, "disk_write", time, Derive, net));
+                data.push(point(&id, "cpu_max", time, Gauge, 4.0));
+                data.push(point(&id, "cpu_current", time, Gauge, random));
+                data.push(point(&id, "mem_used", time, Gauge, mem));
+                data.push(point(&id, "mem_total", time, Gauge, MAX_MEM as f64));
+                data.push(point(&id, "disk_total", time, Gauge, MAX_DISK as f64));
             }
 
             for i in 0..self.nr_of_storages {
                 let id = format!("storage/node-0/storage-{i}");
-                data.push(point(&id, "disk_total", time, Gauge));
-                data.push(point(&id, "disk_used", time, Gauge));
+                data.push(point(&id, "disk_total", time, Gauge, MAX_DISK as f64));
+                data.push(point(&id, "disk_used", time, Gauge, DISK as f64));
             }
 
             // Advance time by 10 seconds
@@ -375,10 +395,11 @@ impl pve_api_types::client::PveClient for FakePveClient {
     ) -> Result<Vec<ListTasksResponse>, proxmox_client::Error> {
         tokio::time::sleep(Duration::from_millis(self.api_delay_ms as u64)).await;
         let make_task = |starttime| {
+            let task_type = WORKER_TYPES[starttime as usize % WORKER_TYPES.len()];
             let endtime = Some(starttime + 4);
 
             let upid_str =
-                format!("UPID:{node}:0000C530:001C9BEC:{starttime:08X}:stopall::root@pam:",);
+                format!("UPID:{node}:0000C530:001C9BEC:{starttime:08X}:{task_type}::root@pam:",);
             let upid: PveUpid = upid_str.parse().unwrap();
 
             ListTasksResponse {
-- 
2.39.5



_______________________________________________
pdm-devel mailing list
pdm-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pdm-devel


^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2025-05-15  7:12 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2025-05-15  7:12 [pdm-devel] [PATCH datacenter-manager] server: fake remote: make the data a bit more dynamic and realistic Dominik Csapak

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox
Service provided by Proxmox Server Solutions GmbH | Privacy | Legal