From: Daniel Kral <d.kral@proxmox.com>
To: pve-devel@lists.proxmox.com
Subject: [pve-devel] [PATCH perl-rs v2 1/2] pve-rs: resource_scheduling: allow granular usage changes
Date: Mon, 20 Oct 2025 18:45:29 +0200 [thread overview]
Message-ID: <20251020164540.517231-4-d.kral@proxmox.com> (raw)
In-Reply-To: <20251020164540.517231-1-d.kral@proxmox.com>
Implements a simple bidirectional map to track which service usages have
been added to nodes, so that these can be removed later individually.
The `StaticNodeUsage` is newly initialized on every invocation of
score_nodes_to_start_service(...) instead of updating the values on
every call of `add_service_usage_to_node(...)` to reduce the likelihood
of introducing numerical instability caused by floating-point operations
done on the `cpu` field.
The StaticServiceUsage is added to the HashMap<> in StaticNodeInfo to
reduce unnecessary indirection when summing these values in
score_nodes_to_start_service(...).
Signed-off-by: Daniel Kral <d.kral@proxmox.com>
Reviewed-by: Fiona Ebner <f.ebner@proxmox.com>
---
Needs a build dependency bump for
librust-proxmox-resource-scheduling-dev and a versioned breaks for
pve-ha-manager.
changes since v1:
- added numerical stability bit to patch message
- changed service_nodes type from HashMap<String, Vec<String>> to
HashMap<String, HashSet<String>> and changed relevant methods
accordingly
- added consistency check in add_service_usage_to_node(...) for the
other way around too (whether service_nodes contains the node already)
- improve error messages
- use vm:{seqnum} instead of single letters
.../bindings/resource_scheduling_static.rs | 108 +++++++++++++++---
pve-rs/test/resource_scheduling.pl | 82 +++++++++++--
2 files changed, 167 insertions(+), 23 deletions(-)
diff --git a/pve-rs/src/bindings/resource_scheduling_static.rs b/pve-rs/src/bindings/resource_scheduling_static.rs
index 7cf2f35..5b91d36 100644
--- a/pve-rs/src/bindings/resource_scheduling_static.rs
+++ b/pve-rs/src/bindings/resource_scheduling_static.rs
@@ -6,7 +6,7 @@ pub mod pve_rs_resource_scheduling_static {
//!
//! See [`proxmox_resource_scheduling`].
- use std::collections::HashMap;
+ use std::collections::{HashMap, HashSet};
use std::sync::Mutex;
use anyhow::{Error, bail};
@@ -16,8 +16,16 @@ pub mod pve_rs_resource_scheduling_static {
perlmod::declare_magic!(Box<Scheduler> : &Scheduler as "PVE::RS::ResourceScheduling::Static");
+ struct StaticNodeInfo {
+ name: String,
+ maxcpu: usize,
+ maxmem: usize,
+ services: HashMap<String, StaticServiceUsage>,
+ }
+
struct Usage {
- nodes: HashMap<String, StaticNodeUsage>,
+ nodes: HashMap<String, StaticNodeInfo>,
+ service_nodes: HashMap<String, HashSet<String>>,
}
/// A scheduler instance contains the resource usage by node.
@@ -30,6 +38,7 @@ pub mod pve_rs_resource_scheduling_static {
pub fn new(#[raw] class: Value) -> Result<Value, Error> {
let inner = Usage {
nodes: HashMap::new(),
+ service_nodes: HashMap::new(),
};
Ok(perlmod::instantiate_magic!(
@@ -39,7 +48,7 @@ pub mod pve_rs_resource_scheduling_static {
/// Method: Add a node with its basic CPU and memory info.
///
- /// This inserts a [`StaticNodeUsage`] entry for the node into the scheduler instance.
+ /// This inserts a [`StaticNodeInfo`] entry for the node into the scheduler instance.
#[export]
pub fn add_node(
#[try_from_ref] this: &Scheduler,
@@ -53,12 +62,11 @@ pub mod pve_rs_resource_scheduling_static {
bail!("node {} already added", nodename);
}
- let node = StaticNodeUsage {
+ let node = StaticNodeInfo {
name: nodename.clone(),
- cpu: 0.0,
maxcpu,
- mem: 0,
maxmem,
+ services: HashMap::new(),
};
usage.nodes.insert(nodename, node);
@@ -67,10 +75,25 @@ pub mod pve_rs_resource_scheduling_static {
/// Method: Remove a node from the scheduler.
#[export]
- pub fn remove_node(#[try_from_ref] this: &Scheduler, nodename: &str) {
+ pub fn remove_node(#[try_from_ref] this: &Scheduler, nodename: &str) -> Result<(), Error> {
let mut usage = this.inner.lock().unwrap();
- usage.nodes.remove(nodename);
+ if let Some(node) = usage.nodes.remove(nodename) {
+ for (sid, _) in node.services.iter() {
+ match usage.service_nodes.get_mut(sid) {
+ Some(service_nodes) => {
+ service_nodes.remove(nodename);
+ }
+ None => bail!(
+ "service '{}' not present in service_nodes hashmap while removing node '{}'",
+ sid,
+ nodename
+ ),
+ }
+ }
+ }
+
+ Ok(())
}
/// Method: Get a list of all the nodes in the scheduler.
@@ -93,22 +116,63 @@ pub mod pve_rs_resource_scheduling_static {
usage.nodes.contains_key(nodename)
}
- /// Method: Add usage of `service` to the node's usage.
+ /// Method: Add service `sid` and its `service_usage` to the node.
#[export]
pub fn add_service_usage_to_node(
#[try_from_ref] this: &Scheduler,
nodename: &str,
- service: StaticServiceUsage,
+ sid: &str,
+ service_usage: StaticServiceUsage,
) -> Result<(), Error> {
let mut usage = this.inner.lock().unwrap();
match usage.nodes.get_mut(nodename) {
Some(node) => {
- node.add_service_usage(&service);
- Ok(())
+ if node.services.contains_key(sid) {
+ bail!("service '{}' already added to node '{}'", sid, nodename);
+ }
+
+ node.services.insert(sid.to_string(), service_usage);
}
None => bail!("node '{}' not present in usage hashmap", nodename),
}
+
+ if let Some(service_nodes) = usage.service_nodes.get_mut(sid) {
+ if service_nodes.contains(nodename) {
+ bail!("node '{}' already added to service '{}'", nodename, sid);
+ }
+
+ service_nodes.insert(nodename.to_string());
+ } else {
+ let mut service_nodes = HashSet::new();
+ service_nodes.insert(nodename.to_string());
+ usage.service_nodes.insert(sid.to_string(), service_nodes);
+ }
+
+ Ok(())
+ }
+
+ /// Method: Remove service `sid` and its usage from all assigned nodes.
+ #[export]
+ fn remove_service_usage(#[try_from_ref] this: &Scheduler, sid: &str) -> Result<(), Error> {
+ let mut usage = this.inner.lock().unwrap();
+
+ if let Some(nodes) = usage.service_nodes.remove(sid) {
+ for nodename in &nodes {
+ match usage.nodes.get_mut(nodename) {
+ Some(node) => {
+ node.services.remove(sid);
+ }
+ None => bail!(
+ "service '{}' not present in usage hashmap on node '{}'",
+ sid,
+ nodename
+ ),
+ }
+ }
+ }
+
+ Ok(())
}
/// Scores all previously added nodes for starting a `service` on.
@@ -126,7 +190,25 @@ pub mod pve_rs_resource_scheduling_static {
service: StaticServiceUsage,
) -> Result<Vec<(String, f64)>, Error> {
let usage = this.inner.lock().unwrap();
- let nodes = usage.nodes.values().collect::<Vec<&StaticNodeUsage>>();
+ let nodes = usage
+ .nodes
+ .values()
+ .map(|node| {
+ let mut node_usage = StaticNodeUsage {
+ name: node.name.to_string(),
+ cpu: 0.0,
+ maxcpu: node.maxcpu,
+ mem: 0,
+ maxmem: node.maxmem,
+ };
+
+ for service in node.services.values() {
+ node_usage.add_service_usage(service);
+ }
+
+ node_usage
+ })
+ .collect::<Vec<StaticNodeUsage>>();
proxmox_resource_scheduling::pve_static::score_nodes_to_start_service(&nodes, &service)
}
diff --git a/pve-rs/test/resource_scheduling.pl b/pve-rs/test/resource_scheduling.pl
index e3b7d2e..42556bd 100755
--- a/pve-rs/test/resource_scheduling.pl
+++ b/pve-rs/test/resource_scheduling.pl
@@ -7,6 +7,20 @@ use Test::More;
use PVE::RS::ResourceScheduling::Static;
+my sub score_nodes {
+ my ($static, $service) = @_;
+
+ my $score_list = $static->score_nodes_to_start_service($service);
+
+ # imitate HA manager
+ my $scores = { map { $_->[0] => -$_->[1] } $score_list->@* };
+ my @nodes = sort {
+ $scores->{$a} <=> $scores->{$b} || $a cmp $b
+ } keys $scores->%*;
+
+ return @nodes;
+}
+
sub test_basic {
my $static = PVE::RS::ResourceScheduling::Static->new();
is(scalar($static->list_nodes()->@*), 0, 'node list empty');
@@ -50,7 +64,54 @@ sub test_balance {
is($nodes[1], "A", 'second should be A');
}
- $static->add_service_usage_to_node($nodes[0], $service);
+ $static->add_service_usage_to_node($nodes[0], "vm:" . (100 + $i), $service);
+ }
+}
+
+sub test_balance_removal {
+ my $static = PVE::RS::ResourceScheduling::Static->new();
+ $static->add_node("A", 10, 100_000_000_000);
+ $static->add_node("B", 20, 200_000_000_000);
+ $static->add_node("C", 30, 300_000_000_000);
+
+ my $service = {
+ maxcpu => 4,
+ maxmem => 20_000_000_000,
+ };
+
+ $static->add_service_usage_to_node("A", "a", $service);
+ $static->add_service_usage_to_node("A", "b", $service);
+ $static->add_service_usage_to_node("B", "c", $service);
+ $static->add_service_usage_to_node("B", "d", $service);
+ $static->add_service_usage_to_node("C", "c", $service);
+
+ {
+ my @nodes = score_nodes($static, $service);
+
+ is($nodes[0], "C");
+ is($nodes[1], "B");
+ is($nodes[2], "A");
+ }
+
+ $static->remove_service_usage("d");
+ $static->remove_service_usage("c");
+ $static->add_service_usage_to_node("C", "c", $service);
+
+ {
+ my @nodes = score_nodes($static, $service);
+
+ is($nodes[0], "B");
+ is($nodes[1], "C");
+ is($nodes[2], "A");
+ }
+
+ $static->remove_node("B");
+
+ {
+ my @nodes = score_nodes($static, $service);
+
+ is($nodes[0], "C");
+ is($nodes[1], "A");
}
}
@@ -66,11 +127,11 @@ sub test_overcommitted {
maxmem => 536_870_912,
};
- $static->add_service_usage_to_node("A", $service);
- $static->add_service_usage_to_node("A", $service);
- $static->add_service_usage_to_node("A", $service);
- $static->add_service_usage_to_node("B", $service);
- $static->add_service_usage_to_node("A", $service);
+ $static->add_service_usage_to_node("A", "a", $service);
+ $static->add_service_usage_to_node("A", "b", $service);
+ $static->add_service_usage_to_node("A", "c", $service);
+ $static->add_service_usage_to_node("B", "d", $service);
+ $static->add_service_usage_to_node("A", "e", $service);
my $score_list = $static->score_nodes_to_start_service($service);
@@ -96,9 +157,9 @@ sub test_balance_small_memory_difference {
$static->add_node("C", 4, 8_000_000_000);
if ($with_start_load) {
- $static->add_service_usage_to_node("A", { maxcpu => 4, maxmem => 1_000_000_000 });
- $static->add_service_usage_to_node("B", { maxcpu => 2, maxmem => 1_000_000_000 });
- $static->add_service_usage_to_node("C", { maxcpu => 2, maxmem => 1_000_000_000 });
+ $static->add_service_usage_to_node("A", "vm:100", { maxcpu => 4, maxmem => 1_000_000_000 });
+ $static->add_service_usage_to_node("B", "vm:101", { maxcpu => 2, maxmem => 1_000_000_000 });
+ $static->add_service_usage_to_node("C", "vm:102", { maxcpu => 2, maxmem => 1_000_000_000 });
}
my $service = {
@@ -131,12 +192,13 @@ sub test_balance_small_memory_difference {
die "internal error, got $i % 4 == " . ($i % 4) . "\n";
}
- $static->add_service_usage_to_node($nodes[0], $service);
+ $static->add_service_usage_to_node($nodes[0], "vm:" . (103 + $i), $service);
}
}
test_basic();
test_balance();
+test_balance_removal();
test_overcommitted();
test_balance_small_memory_difference(1);
test_balance_small_memory_difference(0);
--
2.47.3
_______________________________________________
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel
next prev parent reply other threads:[~2025-10-20 16:46 UTC|newest]
Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-10-20 16:45 [pve-devel] [PATCH ha-manager/perl-rs/proxmox/qemu-server v2 00/12] Granular online_node_usage accounting Daniel Kral
2025-10-20 16:45 ` [pve-devel] [PATCH qemu-server v2 1/1] config: only fetch necessary default values in get_derived_property helper Daniel Kral
2025-10-21 11:47 ` [pve-devel] applied: " Fiona Ebner
2025-10-20 16:45 ` [pve-devel] [PATCH proxmox v2 1/1] resource-scheduling: change score_nodes_to_start_service signature Daniel Kral
2025-10-21 12:14 ` Fiona Ebner
2025-10-20 16:45 ` Daniel Kral [this message]
2025-10-20 16:45 ` [pve-devel] [PATCH perl-rs v2 2/2] test: resource_scheduling: use score_nodes helper to imitate HA Manager Daniel Kral
2025-10-21 12:14 ` Fiona Ebner
2025-10-20 16:45 ` [pve-devel] [PATCH ha-manager v2 1/8] manager: remove redundant recompute_online_node_usage from next_state_recovery Daniel Kral
2025-10-20 16:45 ` [pve-devel] [PATCH ha-manager v2 2/8] manager: remove redundant add_service_usage_to_node " Daniel Kral
2025-10-20 16:45 ` [pve-devel] [PATCH ha-manager v2 3/8] manager: remove redundant add_service_usage_to_node from next_state_started Daniel Kral
2025-10-20 16:45 ` [pve-devel] [PATCH ha-manager v2 4/8] rules: resource affinity: decouple get_resource_affinity helper from Usage class Daniel Kral
2025-10-21 13:02 ` Fiona Ebner
2025-10-23 8:20 ` Daniel Kral
2025-10-20 16:45 ` [pve-devel] [PATCH ha-manager v2 5/8] manager: make recompute_online_node_usage use add_service_usage helper Daniel Kral
2025-10-21 13:06 ` Fiona Ebner
2025-10-20 16:45 ` [pve-devel] [PATCH ha-manager v2 6/8] usage: allow granular changes to Usage implementations Daniel Kral
2025-10-20 16:45 ` [pve-devel] [PATCH ha-manager v2 7/8] manager: make online node usage computation granular Daniel Kral
2025-10-21 13:09 ` Fiona Ebner
2025-10-20 16:45 ` [pve-devel] [PATCH ha-manager v2 8/8] implement static service stats cache Daniel Kral
2025-10-21 13:23 ` Fiona Ebner
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251020164540.517231-4-d.kral@proxmox.com \
--to=d.kral@proxmox.com \
--cc=pve-devel@lists.proxmox.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.