From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [212.224.123.68]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits)) (No client certificate requested) by lists.proxmox.com (Postfix) with ESMTPS id 0B4B5932E5 for ; Wed, 4 Jan 2023 07:43:14 +0100 (CET) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id E95071B6E0 for ; Wed, 4 Jan 2023 07:43:13 +0100 (CET) Received: from bastionodiso.odiso.net (bastionodiso.odiso.net [IPv6:2a0a:1580:2000::2d]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits) server-digest SHA256) (No client certificate requested) by firstgate.proxmox.com (Proxmox) with ESMTPS for ; Wed, 4 Jan 2023 07:43:11 +0100 (CET) Received: from kvmformation3.odiso.net (formationkvm3.odiso.net [10.3.94.12]) by bastionodiso.odiso.net (Postfix) with ESMTP id B15C67D7E; Wed, 4 Jan 2023 07:43:04 +0100 (CET) Received: by kvmformation3.odiso.net (Postfix, from userid 0) id A69872248F3; Wed, 4 Jan 2023 07:43:04 +0100 (CET) From: Alexandre Derumier To: pve-devel@lists.proxmox.com Date: Wed, 4 Jan 2023 07:42:56 +0100 Message-Id: <20230104064303.2898194-3-aderumier@odiso.com> X-Mailer: git-send-email 2.30.2 In-Reply-To: <20230104064303.2898194-1-aderumier@odiso.com> References: <20230104064303.2898194-1-aderumier@odiso.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-SPAM-LEVEL: Spam detection results: 0 AWL 0.030 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% HEADER_FROM_DIFFERENT_DOMAINS 0.249 From and EnvelopeFrom 2nd level mail domains are different KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment KAM_LAZY_DOMAIN_SECURITY 1 Sending domain does not have any anti-forgery methods NO_DNS_FOR_FROM 0.001 Envelope sender has no MX or A DNS records SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_NONE 0.001 SPF: sender does not publish an SPF Record Subject: [pve-devel] [PATCH v2 qemu-server 2/9] add memory parser X-BeenThere: pve-devel@lists.proxmox.com X-Mailman-Version: 2.1.29 Precedence: list List-Id: Proxmox VE development discussion List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 04 Jan 2023 06:43:14 -0000 Signed-off-by: Alexandre Derumier --- PVE/API2/Qemu.pm | 12 ++++++- PVE/QemuConfig.pm | 4 +-- PVE/QemuMigrate.pm | 6 ++-- PVE/QemuServer.pm | 24 ++++++------- PVE/QemuServer/Helpers.pm | 3 +- PVE/QemuServer/Memory.pm | 74 ++++++++++++++++++++++++++++++++------- 6 files changed, 91 insertions(+), 32 deletions(-) diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm index c87602d..4ffa973 100644 --- a/PVE/API2/Qemu.pm +++ b/PVE/API2/Qemu.pm @@ -32,6 +32,7 @@ use PVE::QemuServer::Drive; use PVE::QemuServer::ImportDisk; use PVE::QemuServer::Monitor qw(mon_cmd); use PVE::QemuServer::Machine; +use PVE::QemuServer::Memory qw(get_current_memory); use PVE::QemuMigrate; use PVE::RPCEnvironment; use PVE::AccessControl; @@ -1608,7 +1609,16 @@ my $update_vm_api = sub { } if ($param->{memory} || defined($param->{balloon})) { - my $maxmem = $param->{memory} || $conf->{pending}->{memory} || $conf->{memory} || $defaults->{memory}; + + my $maxmem = undef; + if ($param->{memory}) { + $maxmem = get_current_memory($param->{memory}); + } elsif ($conf->{pending}->{memory}) { + $maxmem = get_current_memory($conf->{pending}->{memory}); + } else { + $maxmem = get_current_memory($conf->{memory}); + } + my $balloon = defined($param->{balloon}) ? $param->{balloon} : $conf->{pending}->{balloon} || $conf->{balloon}; die "balloon value too large (must be smaller than assigned memory)\n" diff --git a/PVE/QemuConfig.pm b/PVE/QemuConfig.pm index 051382c..999e658 100644 --- a/PVE/QemuConfig.pm +++ b/PVE/QemuConfig.pm @@ -12,6 +12,7 @@ use PVE::QemuServer::Helpers; use PVE::QemuServer::Monitor qw(mon_cmd); use PVE::QemuServer; use PVE::QemuServer::Machine; +use PVE::QemuServer::Memory qw(get_current_memory); use PVE::Storage; use PVE::Tools; use PVE::Format qw(render_bytes render_duration); @@ -208,8 +209,7 @@ sub __snapshot_save_vmstate { $target = PVE::QemuServer::find_vmstate_storage($conf, $storecfg); } - my $defaults = PVE::QemuServer::load_defaults(); - my $mem_size = $conf->{memory} // $defaults->{memory}; + my $mem_size = get_current_memory($conf->{memory}); my $driver_state_size = 500; # assume 500MB is enough to safe all driver state; # our savevm-start does live-save of the memory until the space left in the # volume is just enough for the remaining memory content + internal state diff --git a/PVE/QemuMigrate.pm b/PVE/QemuMigrate.pm index 5e466d9..2eccf67 100644 --- a/PVE/QemuMigrate.pm +++ b/PVE/QemuMigrate.pm @@ -26,6 +26,7 @@ use PVE::QemuServer::Drive; use PVE::QemuServer::Helpers qw(min_version); use PVE::QemuServer::Machine; use PVE::QemuServer::Monitor qw(mon_cmd); +use PVE::QemuServer::Memory qw(get_current_memory); use PVE::QemuServer; use PVE::AbstractMigrate; @@ -1024,7 +1025,8 @@ sub phase2_start_remote_cluster { my $remote_vmid = $self->{opts}->{remote}->{vmid}; # like regular start but with some overhead accounted for - my $timeout = PVE::QemuServer::Helpers::config_aware_timeout($self->{vmconf}) + 10; + my $memory = get_current_memory($self->{vmconf}->{memory}); + my $timeout = PVE::QemuServer::Helpers::config_aware_timeout($self->{vmconf}, $memory) + 10; my $res = PVE::Tunnel::write_tunnel($self->{tunnel}, $timeout, "start", $params); @@ -1179,7 +1181,7 @@ sub phase2 { $qemu_migrate_params->{'downtime-limit'} = int($migrate_downtime); # set cachesize to 10% of the total memory - my $memory = $conf->{memory} || $defaults->{memory}; + my $memory = get_current_memory($conf->{memory}); my $cachesize = int($memory * 1048576 / 10); $cachesize = round_powerof2($cachesize); diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm index 39fc6b0..5847a78 100644 --- a/PVE/QemuServer.pm +++ b/PVE/QemuServer.pm @@ -52,7 +52,7 @@ use PVE::QemuServer::CGroup; use PVE::QemuServer::CPUConfig qw(print_cpu_device get_cpu_options); use PVE::QemuServer::Drive qw(is_valid_drivename drive_is_cloudinit drive_is_cdrom drive_is_read_only parse_drive print_drive); use PVE::QemuServer::Machine; -use PVE::QemuServer::Memory; +use PVE::QemuServer::Memory qw(get_current_memory); use PVE::QemuServer::Monitor qw(mon_cmd); use PVE::QemuServer::PCI qw(print_pci_addr print_pcie_addr print_pcie_root_port parse_hostpci); use PVE::QemuServer::USB qw(parse_usb_device); @@ -340,11 +340,8 @@ my $confdesc = { }, memory => { optional => 1, - type => 'integer', - description => "Amount of RAM for the VM in MB. This is the maximum available memory when" - ." you use the balloon device.", - minimum => 16, - default => 512, + type => 'string', + format => $PVE::QemuServer::Memory::memory_fmt }, balloon => { optional => 1, @@ -2928,8 +2925,7 @@ sub vmstatus { $d->{cpus} = $conf->{vcpus} if $conf->{vcpus}; $d->{name} = $conf->{name} || "VM $vmid"; - $d->{maxmem} = $conf->{memory} ? $conf->{memory}*(1024*1024) - : $defaults->{memory}*(1024*1024); + $d->{maxmem} = get_current_memory($conf->{memory})*(1024*1024); if ($conf->{balloon}) { $d->{balloon_min} = $conf->{balloon}*(1024*1024); @@ -5025,7 +5021,7 @@ sub vmconfig_hotplug_pending { # enable balloon device is not hotpluggable die "skip\n" if defined($conf->{balloon}) && $conf->{balloon} == 0; # here we reset the ballooning value to memory - my $balloon = $conf->{memory} || $defaults->{memory}; + my $balloon = get_current_memory($conf->{memory}); mon_cmd($vmid, "balloon", value => $balloon*1024*1024); } elsif ($fast_plug_option->{$opt}) { # do nothing @@ -5038,7 +5034,7 @@ sub vmconfig_hotplug_pending { vmconfig_delete_or_detach_drive($vmid, $storecfg, $conf, $opt, $force); } elsif ($opt =~ m/^memory$/) { die "skip\n" if !$hotplug_features->{memory}; - PVE::QemuServer::Memory::qemu_memory_hotplug($vmid, $conf, $defaults, $opt); + PVE::QemuServer::Memory::qemu_memory_hotplug($vmid, $conf, $defaults); } elsif ($opt eq 'cpuunits') { $cgroup->change_cpu_shares(undef); } elsif ($opt eq 'cpulimit') { @@ -5093,7 +5089,8 @@ sub vmconfig_hotplug_pending { # allow manual ballooning if shares is set to zero if ((defined($conf->{shares}) && ($conf->{shares} == 0))) { - my $balloon = $conf->{pending}->{balloon} || $conf->{memory} || $defaults->{memory}; + my $memory = get_current_memory($conf->{memory}); + my $balloon = $conf->{pending}->{balloon} || $memory; mon_cmd($vmid, "balloon", value => $balloon*1024*1024); } } elsif ($opt =~ m/^net(\d+)$/) { @@ -5113,7 +5110,7 @@ sub vmconfig_hotplug_pending { $vmid, $opt, $value, $arch, $machine_type); } elsif ($opt =~ m/^memory$/) { #dimms die "skip\n" if !$hotplug_features->{memory}; - $value = PVE::QemuServer::Memory::qemu_memory_hotplug($vmid, $conf, $defaults, $opt, $value); + PVE::QemuServer::Memory::qemu_memory_hotplug($vmid, $conf, $defaults, $conf->{pending}->{$opt}); } elsif ($opt eq 'cpuunits') { my $new_cpuunits = PVE::CGroup::clamp_cpu_shares($conf->{pending}->{$opt}); #clamp $cgroup->change_cpu_shares($new_cpuunits); @@ -5790,7 +5787,8 @@ sub vm_start_nolock { push @$cmd, '-S'; } - my $start_timeout = $params->{timeout} // config_aware_timeout($conf, $resume); + my $memory = get_current_memory($conf->{memory}); + my $start_timeout = $params->{timeout} // config_aware_timeout($conf, $memory, $resume); my $pci_devices = {}; # host pci devices for (my $i = 0; $i < $PVE::QemuServer::PCI::MAX_HOSTPCI_DEVICES; $i++) { diff --git a/PVE/QemuServer/Helpers.pm b/PVE/QemuServer/Helpers.pm index e91f906..9115d50 100644 --- a/PVE/QemuServer/Helpers.pm +++ b/PVE/QemuServer/Helpers.pm @@ -143,8 +143,7 @@ sub version_cmp { } sub config_aware_timeout { - my ($config, $is_suspended) = @_; - my $memory = $config->{memory}; + my ($config, $memory, $is_suspended) = @_; my $timeout = 30; # Based on user reported startup time for vm with 512GiB @ 4-5 minutes diff --git a/PVE/QemuServer/Memory.pm b/PVE/QemuServer/Memory.pm index 6c1cd94..59e51c8 100644 --- a/PVE/QemuServer/Memory.pm +++ b/PVE/QemuServer/Memory.pm @@ -8,10 +8,45 @@ use PVE::Exception qw(raise raise_param_exc); use PVE::QemuServer; use PVE::QemuServer::Monitor qw(mon_cmd); +use base qw(Exporter); + +our @EXPORT_OK = qw( +get_current_memory +); my $MAX_NUMA = 8; my $STATICMEM = 1024; +my $memory_fmt = { + current => { + description => "Current amount of online RAM for the VM in MB. This is the maximum available memory when" + ." you use the balloon device.", + type => 'integer', + default_key => 1, + optional => 1, + minimum => 16, + default => 512, + } +}; + +sub print_memory { + my $memory = shift; + + return PVE::JSONSchema::print_property_string($memory, $memory_fmt); +} + +sub parse_memory { + my ($value) = @_; + + my $current_default = $memory_fmt->{current}->{default}; + my $res = { current => $current_default }; + return $res if !defined($value); + + $res = eval { PVE::JSONSchema::parse_property_string($memory_fmt, $value) }; + die $@ if $@; + return $res; +} + my $_host_bits; my sub get_host_phys_address_bits { return $_host_bits if defined($_host_bits); @@ -63,6 +98,13 @@ my sub get_max_mem { return $bits_to_max_mem > 4*1024*1024 ? 4*1024*1024 : $bits_to_max_mem; } +sub get_current_memory { + my ($value) = @_; + + my $memory = parse_memory($value); + return $memory->{current}; +} + sub get_numa_node_list { my ($conf) = @_; my @numa_map; @@ -155,17 +197,21 @@ sub foreach_reverse_dimm { } sub qemu_memory_hotplug { - my ($vmid, $conf, $defaults, $opt, $value) = @_; + my ($vmid, $conf, $defaults, $value) = @_; + + return if !PVE::QemuServer::check_running($vmid); - return $value if !PVE::QemuServer::check_running($vmid); + my $oldmem = parse_memory($conf->{memory}); + my $newmem = parse_memory($value); + + my $memory = $oldmem->{current}; + $value = $newmem->{current}; + + return if $value == $memory; my $sockets = 1; $sockets = $conf->{sockets} if $conf->{sockets}; - my $memory = $conf->{memory} || $defaults->{memory}; - $value = $defaults->{memory} if !$value; - return $value if $value == $memory; - my $static_memory = $STATICMEM; $static_memory = $static_memory * $sockets if ($conf->{hugepages} && $conf->{hugepages} == 1024); @@ -180,7 +226,7 @@ sub qemu_memory_hotplug { foreach_dimm($conf, $vmid, $value, $sockets, sub { my ($conf, $vmid, $name, $dimm_size, $numanode, $current_size, $memory) = @_; - return if $current_size <= $conf->{memory}; + return if $current_size <= get_current_memory($conf->{memory}); if ($conf->{hugepages}) { $numa_hostmap = get_numa_guest_to_host_map($conf) if !$numa_hostmap; @@ -219,7 +265,8 @@ sub qemu_memory_hotplug { die $err; } #update conf after each succesful module hotplug - $conf->{memory} = $current_size; + $newmem->{current} = $current_size; + $conf->{memory} = print_memory($newmem); PVE::QemuConfig->write_config($vmid, $conf); }); @@ -228,7 +275,8 @@ sub qemu_memory_hotplug { foreach_reverse_dimm($conf, $vmid, $value, $sockets, sub { my ($conf, $vmid, $name, $dimm_size, $numanode, $current_size, $memory) = @_; - return if $current_size >= $conf->{memory}; + return if $current_size >= get_current_memory($conf->{memory}); + print "try to unplug memory dimm $name\n"; my $retry = 0; @@ -242,7 +290,8 @@ sub qemu_memory_hotplug { } #update conf after each succesful module unplug - $conf->{memory} = $current_size; + $newmem->{current} = $current_size; + $conf->{memory} = print_memory($newmem); eval { PVE::QemuServer::qemu_objectdel($vmid, "mem-$name"); }; PVE::QemuConfig->write_config($vmid, $conf); @@ -270,7 +319,8 @@ sub qemu_dimm_list { sub config { my ($conf, $vmid, $sockets, $cores, $defaults, $hotplug_features, $cmd) = @_; - my $memory = $conf->{memory} || $defaults->{memory}; + my $memory = get_current_memory($conf->{memory}); + my $static_memory = 0; if ($hotplug_features->{memory}) { @@ -493,7 +543,7 @@ sub hugepages_topology { return if !$conf->{numa}; my $defaults = PVE::QemuServer::load_defaults(); - my $memory = $conf->{memory} || $defaults->{memory}; + my $memory = get_current_memory($conf->{memory}); my $static_memory = 0; my $sockets = 1; $sockets = $conf->{smp} if $conf->{smp}; # old style - no longer iused -- 2.30.2