all lists on lists.proxmox.com
 help / color / mirror / Atom feed
From: Alexandre Derumier <aderumier@odiso.com>
To: pve-devel@lists.proxmox.com
Subject: [pve-devel] [PATCH v2 qemu-server 2/9] add memory parser
Date: Wed,  4 Jan 2023 07:42:56 +0100	[thread overview]
Message-ID: <20230104064303.2898194-3-aderumier@odiso.com> (raw)
In-Reply-To: <20230104064303.2898194-1-aderumier@odiso.com>

Signed-off-by: Alexandre Derumier <aderumier@odiso.com>
---
 PVE/API2/Qemu.pm          | 12 ++++++-
 PVE/QemuConfig.pm         |  4 +--
 PVE/QemuMigrate.pm        |  6 ++--
 PVE/QemuServer.pm         | 24 ++++++-------
 PVE/QemuServer/Helpers.pm |  3 +-
 PVE/QemuServer/Memory.pm  | 74 ++++++++++++++++++++++++++++++++-------
 6 files changed, 91 insertions(+), 32 deletions(-)

diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm
index c87602d..4ffa973 100644
--- a/PVE/API2/Qemu.pm
+++ b/PVE/API2/Qemu.pm
@@ -32,6 +32,7 @@ use PVE::QemuServer::Drive;
 use PVE::QemuServer::ImportDisk;
 use PVE::QemuServer::Monitor qw(mon_cmd);
 use PVE::QemuServer::Machine;
+use PVE::QemuServer::Memory qw(get_current_memory);
 use PVE::QemuMigrate;
 use PVE::RPCEnvironment;
 use PVE::AccessControl;
@@ -1608,7 +1609,16 @@ my $update_vm_api  = sub {
 	}
 
 	if ($param->{memory} || defined($param->{balloon})) {
-	    my $maxmem = $param->{memory} || $conf->{pending}->{memory} || $conf->{memory} || $defaults->{memory};
+
+	    my $maxmem = undef;
+	    if ($param->{memory}) {
+	        $maxmem = get_current_memory($param->{memory});
+	    } elsif ($conf->{pending}->{memory}) {
+	        $maxmem = get_current_memory($conf->{pending}->{memory});
+	    } else {
+	        $maxmem = get_current_memory($conf->{memory});
+	    }
+
 	    my $balloon = defined($param->{balloon}) ? $param->{balloon} : $conf->{pending}->{balloon} || $conf->{balloon};
 
 	    die "balloon value too large (must be smaller than assigned memory)\n"
diff --git a/PVE/QemuConfig.pm b/PVE/QemuConfig.pm
index 051382c..999e658 100644
--- a/PVE/QemuConfig.pm
+++ b/PVE/QemuConfig.pm
@@ -12,6 +12,7 @@ use PVE::QemuServer::Helpers;
 use PVE::QemuServer::Monitor qw(mon_cmd);
 use PVE::QemuServer;
 use PVE::QemuServer::Machine;
+use PVE::QemuServer::Memory qw(get_current_memory);
 use PVE::Storage;
 use PVE::Tools;
 use PVE::Format qw(render_bytes render_duration);
@@ -208,8 +209,7 @@ sub __snapshot_save_vmstate {
 	$target = PVE::QemuServer::find_vmstate_storage($conf, $storecfg);
     }
 
-    my $defaults = PVE::QemuServer::load_defaults();
-    my $mem_size = $conf->{memory} // $defaults->{memory};
+    my $mem_size = get_current_memory($conf->{memory});
     my $driver_state_size = 500; # assume 500MB is enough to safe all driver state;
     # our savevm-start does live-save of the memory until the space left in the
     # volume is just enough for the remaining memory content + internal state
diff --git a/PVE/QemuMigrate.pm b/PVE/QemuMigrate.pm
index 5e466d9..2eccf67 100644
--- a/PVE/QemuMigrate.pm
+++ b/PVE/QemuMigrate.pm
@@ -26,6 +26,7 @@ use PVE::QemuServer::Drive;
 use PVE::QemuServer::Helpers qw(min_version);
 use PVE::QemuServer::Machine;
 use PVE::QemuServer::Monitor qw(mon_cmd);
+use PVE::QemuServer::Memory qw(get_current_memory);
 use PVE::QemuServer;
 
 use PVE::AbstractMigrate;
@@ -1024,7 +1025,8 @@ sub phase2_start_remote_cluster {
     my $remote_vmid = $self->{opts}->{remote}->{vmid};
 
     # like regular start but with some overhead accounted for
-    my $timeout = PVE::QemuServer::Helpers::config_aware_timeout($self->{vmconf}) + 10;
+    my $memory = get_current_memory($self->{vmconf}->{memory});
+    my $timeout = PVE::QemuServer::Helpers::config_aware_timeout($self->{vmconf}, $memory) + 10;
 
     my $res = PVE::Tunnel::write_tunnel($self->{tunnel}, $timeout, "start", $params);
 
@@ -1179,7 +1181,7 @@ sub phase2 {
     $qemu_migrate_params->{'downtime-limit'} = int($migrate_downtime);
 
     # set cachesize to 10% of the total memory
-    my $memory =  $conf->{memory} || $defaults->{memory};
+    my $memory = get_current_memory($conf->{memory});
     my $cachesize = int($memory * 1048576 / 10);
     $cachesize = round_powerof2($cachesize);
 
diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index 39fc6b0..5847a78 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -52,7 +52,7 @@ use PVE::QemuServer::CGroup;
 use PVE::QemuServer::CPUConfig qw(print_cpu_device get_cpu_options);
 use PVE::QemuServer::Drive qw(is_valid_drivename drive_is_cloudinit drive_is_cdrom drive_is_read_only parse_drive print_drive);
 use PVE::QemuServer::Machine;
-use PVE::QemuServer::Memory;
+use PVE::QemuServer::Memory qw(get_current_memory);
 use PVE::QemuServer::Monitor qw(mon_cmd);
 use PVE::QemuServer::PCI qw(print_pci_addr print_pcie_addr print_pcie_root_port parse_hostpci);
 use PVE::QemuServer::USB qw(parse_usb_device);
@@ -340,11 +340,8 @@ my $confdesc = {
     },
     memory => {
 	optional => 1,
-	type => 'integer',
-	description => "Amount of RAM for the VM in MB. This is the maximum available memory when"
-	    ." you use the balloon device.",
-	minimum => 16,
-	default => 512,
+	type => 'string',
+	format => $PVE::QemuServer::Memory::memory_fmt
     },
     balloon => {
 	optional => 1,
@@ -2928,8 +2925,7 @@ sub vmstatus {
 	$d->{cpus} = $conf->{vcpus} if $conf->{vcpus};
 
 	$d->{name} = $conf->{name} || "VM $vmid";
-	$d->{maxmem} = $conf->{memory} ? $conf->{memory}*(1024*1024)
-	    : $defaults->{memory}*(1024*1024);
+	$d->{maxmem} = get_current_memory($conf->{memory})*(1024*1024);
 
 	if ($conf->{balloon}) {
 	    $d->{balloon_min} = $conf->{balloon}*(1024*1024);
@@ -5025,7 +5021,7 @@ sub vmconfig_hotplug_pending {
 		# enable balloon device is not hotpluggable
 		die "skip\n" if defined($conf->{balloon}) && $conf->{balloon} == 0;
 		# here we reset the ballooning value to memory
-		my $balloon = $conf->{memory} || $defaults->{memory};
+		my $balloon = get_current_memory($conf->{memory});
 		mon_cmd($vmid, "balloon", value => $balloon*1024*1024);
 	    } elsif ($fast_plug_option->{$opt}) {
 		# do nothing
@@ -5038,7 +5034,7 @@ sub vmconfig_hotplug_pending {
 		vmconfig_delete_or_detach_drive($vmid, $storecfg, $conf, $opt, $force);
 	    } elsif ($opt =~ m/^memory$/) {
 		die "skip\n" if !$hotplug_features->{memory};
-		PVE::QemuServer::Memory::qemu_memory_hotplug($vmid, $conf, $defaults, $opt);
+		PVE::QemuServer::Memory::qemu_memory_hotplug($vmid, $conf, $defaults);
 	    } elsif ($opt eq 'cpuunits') {
 		$cgroup->change_cpu_shares(undef);
 	    } elsif ($opt eq 'cpulimit') {
@@ -5093,7 +5089,8 @@ sub vmconfig_hotplug_pending {
 
 		# allow manual ballooning if shares is set to zero
 		if ((defined($conf->{shares}) && ($conf->{shares} == 0))) {
-		    my $balloon = $conf->{pending}->{balloon} || $conf->{memory} || $defaults->{memory};
+		    my $memory = get_current_memory($conf->{memory});
+		    my $balloon = $conf->{pending}->{balloon} || $memory;
 		    mon_cmd($vmid, "balloon", value => $balloon*1024*1024);
 		}
 	    } elsif ($opt =~ m/^net(\d+)$/) {
@@ -5113,7 +5110,7 @@ sub vmconfig_hotplug_pending {
 				     $vmid, $opt, $value, $arch, $machine_type);
 	    } elsif ($opt =~ m/^memory$/) { #dimms
 		die "skip\n" if !$hotplug_features->{memory};
-		$value = PVE::QemuServer::Memory::qemu_memory_hotplug($vmid, $conf, $defaults, $opt, $value);
+		PVE::QemuServer::Memory::qemu_memory_hotplug($vmid, $conf, $defaults, $conf->{pending}->{$opt});
 	    } elsif ($opt eq 'cpuunits') {
 		my $new_cpuunits = PVE::CGroup::clamp_cpu_shares($conf->{pending}->{$opt}); #clamp
 		$cgroup->change_cpu_shares($new_cpuunits);
@@ -5790,7 +5787,8 @@ sub vm_start_nolock {
 	push @$cmd, '-S';
     }
 
-    my $start_timeout = $params->{timeout} // config_aware_timeout($conf, $resume);
+    my $memory = get_current_memory($conf->{memory});
+    my $start_timeout = $params->{timeout} // config_aware_timeout($conf, $memory, $resume);
 
     my $pci_devices = {}; # host pci devices
     for (my $i = 0; $i < $PVE::QemuServer::PCI::MAX_HOSTPCI_DEVICES; $i++)  {
diff --git a/PVE/QemuServer/Helpers.pm b/PVE/QemuServer/Helpers.pm
index e91f906..9115d50 100644
--- a/PVE/QemuServer/Helpers.pm
+++ b/PVE/QemuServer/Helpers.pm
@@ -143,8 +143,7 @@ sub version_cmp {
 }
 
 sub config_aware_timeout {
-    my ($config, $is_suspended) = @_;
-    my $memory = $config->{memory};
+    my ($config, $memory, $is_suspended) = @_;
     my $timeout = 30;
 
     # Based on user reported startup time for vm with 512GiB @ 4-5 minutes
diff --git a/PVE/QemuServer/Memory.pm b/PVE/QemuServer/Memory.pm
index 6c1cd94..59e51c8 100644
--- a/PVE/QemuServer/Memory.pm
+++ b/PVE/QemuServer/Memory.pm
@@ -8,10 +8,45 @@ use PVE::Exception qw(raise raise_param_exc);
 
 use PVE::QemuServer;
 use PVE::QemuServer::Monitor qw(mon_cmd);
+use base qw(Exporter);
+
+our @EXPORT_OK = qw(
+get_current_memory
+);
 
 my $MAX_NUMA = 8;
 my $STATICMEM = 1024;
 
+my $memory_fmt = {
+    current => {
+	description => "Current amount of online RAM for the VM in MB. This is the maximum available memory when"
+	    ." you use the balloon device.",
+	type => 'integer',
+	default_key => 1,
+	optional => 1,
+	minimum => 16,
+	default => 512,
+    }
+};
+
+sub print_memory {
+    my $memory = shift;
+
+    return PVE::JSONSchema::print_property_string($memory, $memory_fmt);
+}
+
+sub parse_memory {
+    my ($value) = @_;
+
+    my $current_default = $memory_fmt->{current}->{default};
+    my $res = { current => $current_default };
+    return $res if !defined($value);
+
+    $res = eval { PVE::JSONSchema::parse_property_string($memory_fmt, $value) };
+    die $@ if $@;
+    return $res;
+}
+
 my $_host_bits;
 my sub get_host_phys_address_bits {
     return $_host_bits if defined($_host_bits);
@@ -63,6 +98,13 @@ my sub get_max_mem {
     return $bits_to_max_mem > 4*1024*1024 ? 4*1024*1024 : $bits_to_max_mem;
 }
 
+sub get_current_memory {
+    my ($value) = @_;
+
+    my $memory = parse_memory($value);
+    return $memory->{current};
+}
+
 sub get_numa_node_list {
     my ($conf) = @_;
     my @numa_map;
@@ -155,17 +197,21 @@ sub foreach_reverse_dimm {
 }
 
 sub qemu_memory_hotplug {
-    my ($vmid, $conf, $defaults, $opt, $value) = @_;
+    my ($vmid, $conf, $defaults, $value) = @_;
+
+    return if !PVE::QemuServer::check_running($vmid);
 
-    return $value if !PVE::QemuServer::check_running($vmid);
+    my $oldmem = parse_memory($conf->{memory});
+    my $newmem = parse_memory($value);
+
+    my $memory = $oldmem->{current};
+    $value = $newmem->{current};
+
+    return if $value == $memory;
 
     my $sockets = 1;
     $sockets = $conf->{sockets} if $conf->{sockets};
 
-    my $memory = $conf->{memory} || $defaults->{memory};
-    $value = $defaults->{memory} if !$value;
-    return $value if $value == $memory;
-
     my $static_memory = $STATICMEM;
     $static_memory = $static_memory * $sockets if ($conf->{hugepages} && $conf->{hugepages} == 1024);
 
@@ -180,7 +226,7 @@ sub qemu_memory_hotplug {
 	foreach_dimm($conf, $vmid, $value, $sockets, sub {
 	    my ($conf, $vmid, $name, $dimm_size, $numanode, $current_size, $memory) = @_;
 
-		return if $current_size <= $conf->{memory};
+		return if $current_size <= get_current_memory($conf->{memory});
 
 		if ($conf->{hugepages}) {
 		    $numa_hostmap = get_numa_guest_to_host_map($conf) if !$numa_hostmap;
@@ -219,7 +265,8 @@ sub qemu_memory_hotplug {
 		    die $err;
 		}
 		#update conf after each succesful module hotplug
-		$conf->{memory} = $current_size;
+		$newmem->{current} = $current_size;
+		$conf->{memory} = print_memory($newmem);
 		PVE::QemuConfig->write_config($vmid, $conf);
 	});
 
@@ -228,7 +275,8 @@ sub qemu_memory_hotplug {
 	foreach_reverse_dimm($conf, $vmid, $value, $sockets, sub {
 	    my ($conf, $vmid, $name, $dimm_size, $numanode, $current_size, $memory) = @_;
 
-		return if $current_size >= $conf->{memory};
+		return if $current_size >= get_current_memory($conf->{memory});
+
 		print "try to unplug memory dimm $name\n";
 
 		my $retry = 0;
@@ -242,7 +290,8 @@ sub qemu_memory_hotplug {
 		}
 
 		#update conf after each succesful module unplug
-		$conf->{memory} = $current_size;
+		$newmem->{current} = $current_size;
+		$conf->{memory} = print_memory($newmem);
 
 		eval { PVE::QemuServer::qemu_objectdel($vmid, "mem-$name"); };
 		PVE::QemuConfig->write_config($vmid, $conf);
@@ -270,7 +319,8 @@ sub qemu_dimm_list {
 sub config {
     my ($conf, $vmid, $sockets, $cores, $defaults, $hotplug_features, $cmd) = @_;
 
-    my $memory = $conf->{memory} || $defaults->{memory};
+    my $memory = get_current_memory($conf->{memory});
+
     my $static_memory = 0;
 
     if ($hotplug_features->{memory}) {
@@ -493,7 +543,7 @@ sub hugepages_topology {
     return if !$conf->{numa};
 
     my $defaults = PVE::QemuServer::load_defaults();
-    my $memory = $conf->{memory} || $defaults->{memory};
+    my $memory = get_current_memory($conf->{memory});
     my $static_memory = 0;
     my $sockets = 1;
     $sockets = $conf->{smp} if $conf->{smp}; # old style - no longer iused
-- 
2.30.2




  parent reply	other threads:[~2023-01-04  6:43 UTC|newest]

Thread overview: 30+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-01-04  6:42 [pve-devel] [PATCH v2 qemu-server 0/9] rework memory hotplug + virtiomem Alexandre Derumier
2023-01-04  6:42 ` [pve-devel] [PATCH v2 qemu-server 1/9] test: add memory tests Alexandre Derumier
2023-01-24 13:04   ` Fiona Ebner
2023-01-04  6:42 ` Alexandre Derumier [this message]
2023-01-24 13:04   ` [pve-devel] [PATCH v2 qemu-server 2/9] add memory parser Fiona Ebner
2023-01-04  6:42 ` [pve-devel] [PATCH v2 qemu-server 3/9] memory: add get_static_mem Alexandre Derumier
2023-01-24 13:04   ` Fiona Ebner
2023-01-04  6:42 ` [pve-devel] [PATCH v2 qemu-server 4/9] config: memory: add 'max' option Alexandre Derumier
2023-01-24 13:05   ` Fiona Ebner
2023-01-27 15:03     ` DERUMIER, Alexandre
2023-01-30  8:03       ` Fiona Ebner
2023-01-30  8:45         ` DERUMIER, Alexandre
2023-01-04  6:42 ` [pve-devel] [PATCH v2 qemu-server 5/9] memory: get_max_mem: use config memory max Alexandre Derumier
2023-01-24 13:05   ` Fiona Ebner
2023-01-27 15:15     ` DERUMIER, Alexandre
2023-01-30  8:04       ` Fiona Ebner
2023-01-04  6:43 ` [pve-devel] [PATCH v2 qemu-server 6/9] memory: use 64 slots && static dimm size when max is defined Alexandre Derumier
2023-01-24 13:06   ` Fiona Ebner
2023-01-27 15:52     ` DERUMIER, Alexandre
2023-01-04  6:43 ` [pve-devel] [PATCH v2 qemu-server 7/9] test: add memory-max tests Alexandre Derumier
2023-01-24 13:06   ` Fiona Ebner
2023-01-04  6:43 ` [pve-devel] [PATCH v2 qemu-server 8/9] memory: add virtio-mem support Alexandre Derumier
2023-01-24 13:06   ` Fiona Ebner
2023-01-25  9:00     ` DERUMIER, Alexandre
2023-01-25  9:54       ` Fiona Ebner
2023-01-25 10:28         ` DERUMIER, Alexandre
2023-01-25 10:52           ` Fiona Ebner
2023-01-04  6:43 ` [pve-devel] [PATCH v2 qemu-server 9/9] tests: add virtio-mem tests Alexandre Derumier
2023-01-24 13:08   ` Fiona Ebner
2023-01-24 13:08 ` [pve-devel] [PATCH v2 qemu-server 0/9] rework memory hotplug + virtiomem Fiona Ebner

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230104064303.2898194-3-aderumier@odiso.com \
    --to=aderumier@odiso.com \
    --cc=pve-devel@lists.proxmox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.
Service provided by Proxmox Server Solutions GmbH | Privacy | Legal