public inbox for pve-devel@lists.proxmox.com
 help / color / mirror / Atom feed
* [pve-devel] [PATCH v2 pve-manager 0/2] add bulk hibernation action
@ 2021-02-09 10:31 Hannes Laimer
  2021-02-09 10:31 ` [pve-devel] [PATCH v2 pve-manager 1/2] api2: add suspendall endpoint Hannes Laimer
  2021-02-09 10:31 ` [pve-devel] [PATCH v2 pve-manager 2/2] ui: add bulk hibernate action Hannes Laimer
  0 siblings, 2 replies; 4+ messages in thread
From: Hannes Laimer @ 2021-02-09 10:31 UTC (permalink / raw)
  To: pve-devel

Adds suspendall endpoint which is resposible for hibernation and pause
of multiple VMs, this was done to mirror the behaviour of the suspend 
endpoint for single VMs(which also handels hibernation and pause). The
UI buttons for bulk hibernation were added.

Hannes Laimer (2):
  api2: add suspendall endpoint
  ui: add bulk hibernate action

 PVE/API2/Nodes.pm                 | 119 ++++++++++++++++++++++++++++++
 www/manager6/Utils.js             |   2 +
 www/manager6/form/VMSelector.js   |  34 ++++++---
 www/manager6/node/CmdMenu.js      |  15 ++++
 www/manager6/node/Config.js       |  13 ++++
 www/manager6/window/BulkAction.js |   7 ++
 6 files changed, 180 insertions(+), 10 deletions(-)

-- 
2.20.1





^ permalink raw reply	[flat|nested] 4+ messages in thread

* [pve-devel] [PATCH v2 pve-manager 1/2] api2: add suspendall endpoint
  2021-02-09 10:31 [pve-devel] [PATCH v2 pve-manager 0/2] add bulk hibernation action Hannes Laimer
@ 2021-02-09 10:31 ` Hannes Laimer
  2021-02-19 16:41   ` Thomas Lamprecht
  2021-02-09 10:31 ` [pve-devel] [PATCH v2 pve-manager 2/2] ui: add bulk hibernate action Hannes Laimer
  1 sibling, 1 reply; 4+ messages in thread
From: Hannes Laimer @ 2021-02-09 10:31 UTC (permalink / raw)
  To: pve-devel

Handels pause and hibernation, the reason for not splitting it was to mirror
the behaviour of the already existing suspend endpoint for single VMs.

Signed-off-by: Hannes Laimer <h.laimer@proxmox.com>
---

Endpoint code is mostly taken from already existing ednpoints, namely
stopall and startall.

 PVE/API2/Nodes.pm | 119 ++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 119 insertions(+)

diff --git a/PVE/API2/Nodes.pm b/PVE/API2/Nodes.pm
index 8172231e..3e6e9fa2 100644
--- a/PVE/API2/Nodes.pm
+++ b/PVE/API2/Nodes.pm
@@ -1943,6 +1943,125 @@ __PACKAGE__->register_method ({
 	return $rpcenv->fork_worker('stopall', undef, $authuser, $code);
     }});
 
+my $create_suspend_worker = sub {
+    my ($nodename, $type, $vmid, $down_timeout, $todisk) = @_;
+
+    my $upid;
+    if ($type eq 'qemu') {
+	return if !PVE::QemuServer::check_running($vmid, 1);
+	my $timeout =  defined($down_timeout) ? int($down_timeout) : 60*3;
+	print STDERR "Suspending VM $vmid (timeout = $timeout seconds)\n";
+	$upid = PVE::API2::Qemu->vm_suspend({node => $nodename, vmid => $vmid, todisk => $todisk});
+    } else {
+	die "suspension is only supported on VMs, not on '$type'\n";
+    }
+
+    return $upid;
+};
+
+__PACKAGE__->register_method ({
+    name => 'suspendall',
+    path => 'suspendall',
+    method => 'POST',
+    protected => 1,
+    permissions => {
+	check => ['perm', '/', [ 'VM.PowerMgmt' ]],
+    },
+    proxyto => 'node',
+    description => "Suspend all VMs.",
+    parameters => {
+    	additionalProperties => 0,
+	properties => {
+	    node => get_standard_option('pve-node'),
+	    vms => {
+		description => "Only consider Guests with these IDs.",
+		type => 'string',  format => 'pve-vmid-list',
+		optional => 1,
+	    },
+	    todisk => {
+		type => 'boolean',
+		default => 0,
+		optional => 1,
+		description => 'If set, suspends the VM to disk. Will be resumed on next VM start.',
+	    },
+	},
+    },
+    returns => {
+	type => 'string',
+    },
+    code => sub {
+	my ($param) = @_;
+
+	my $rpcenv = PVE::RPCEnvironment::get();
+	my $authuser = $rpcenv->get_user();
+
+	my $nodename = $param->{node};
+	$nodename = PVE::INotify::nodename() if $nodename eq 'localhost';
+
+	my $code = sub {
+
+	    $rpcenv->{type} = 'priv'; # to start tasks in background
+
+	    my $stopList = &$get_start_stop_list($nodename, undef, $param->{vms});
+
+	    my $cpuinfo = PVE::ProcFSTools::read_cpuinfo();
+	    my $datacenterconfig = cfs_read_file('datacenter.cfg');
+	    # if not set by user spawn max cpu count number of workers
+	    my $maxWorkers =  $datacenterconfig->{max_workers} || $cpuinfo->{cpus};
+
+	    foreach my $order (sort {$b <=> $a} keys %$stopList) {
+		my $vmlist = $stopList->{$order};
+		my $workers = {};
+
+		my $finish_worker = sub {
+		    my $pid = shift;
+		    my $d = $workers->{$pid};
+		    return if !$d;
+		    delete $workers->{$pid};
+
+		    syslog('info', "end task $d->{upid}");
+		};
+
+		foreach my $vmid (sort {$b <=> $a} keys %$vmlist) {
+		    my $d = $vmlist->{$vmid};
+		    my $upid;
+		    eval { $upid = &$create_suspend_worker($nodename, $d->{type}, $vmid, $d->{down}, $param->{todisk}); };
+		    warn $@ if $@;
+		    next if !$upid;
+
+		    my $res = PVE::Tools::upid_decode($upid, 1);
+		    next if !$res;
+
+		    my $pid = $res->{pid};
+
+		    $workers->{$pid} = { type => $d->{type}, upid => $upid, vmid => $vmid };
+		    while (scalar(keys %$workers) >= $maxWorkers) {
+			foreach my $p (keys %$workers) {
+			    if (!PVE::ProcFSTools::check_process_running($p)) {
+				&$finish_worker($p);
+			    }
+			}
+			sleep(1);
+		    }
+		}
+		while (scalar(keys %$workers)) {
+		    foreach my $p (keys %$workers) {
+			if (!PVE::ProcFSTools::check_process_running($p)) {
+			    &$finish_worker($p);
+			}
+		    }
+		    sleep(1);
+		}
+	    }
+
+	    syslog('info', "all VMs suspended");
+
+	    return;
+	};
+
+	return $rpcenv->fork_worker('suspendall', undef, $authuser, $code);
+    }});
+
 my $create_migrate_worker = sub {
     my ($nodename, $type, $vmid, $target, $with_local_disks) = @_;
 
-- 
2.20.1





^ permalink raw reply	[flat|nested] 4+ messages in thread

* [pve-devel] [PATCH v2 pve-manager 2/2] ui: add bulk hibernate action
  2021-02-09 10:31 [pve-devel] [PATCH v2 pve-manager 0/2] add bulk hibernation action Hannes Laimer
  2021-02-09 10:31 ` [pve-devel] [PATCH v2 pve-manager 1/2] api2: add suspendall endpoint Hannes Laimer
@ 2021-02-09 10:31 ` Hannes Laimer
  1 sibling, 0 replies; 4+ messages in thread
From: Hannes Laimer @ 2021-02-09 10:31 UTC (permalink / raw)
  To: pve-devel

Signed-off-by: Hannes Laimer <h.laimer@proxmox.com>
---

The bulk action name usually matches the endpoint it will call, here,
however, the corresponding endpoint is responsible for hibernate and pause,
therefore in order to distinguish both actions the name does not match
the endpoint here. This also allows to possible add another bulk action
for pausing VMs later.

 www/manager6/Utils.js             |  2 ++
 www/manager6/form/VMSelector.js   | 34 ++++++++++++++++++++++---------
 www/manager6/node/CmdMenu.js      | 15 ++++++++++++++
 www/manager6/node/Config.js       | 13 ++++++++++++
 www/manager6/window/BulkAction.js |  7 +++++++
 5 files changed, 61 insertions(+), 10 deletions(-)

diff --git a/www/manager6/Utils.js b/www/manager6/Utils.js
index ab4988b0..d3a44fa9 100644
--- a/www/manager6/Utils.js
+++ b/www/manager6/Utils.js
@@ -1807,6 +1807,8 @@ Ext.define('PVE.Utils', {
 	    spiceshell: ['', gettext('Shell') + ' (Spice)'],
 	    startall: ['', gettext('Start all VMs and Containers')],
 	    stopall: ['', gettext('Stop all VMs and Containers')],
+	    hibernateall: ['', gettext('Hibernate all VMs')],
+	    suspendall: ['', gettext('Suspend all VMs')],
 	    unknownimgdel: ['', gettext('Destroy image from unknown guest')],
 	    vncproxy: ['VM/CT', gettext('Console')],
 	    vncshell: ['', gettext('Shell')],
diff --git a/www/manager6/form/VMSelector.js b/www/manager6/form/VMSelector.js
index 6a51a73d..b5cc0781 100644
--- a/www/manager6/form/VMSelector.js
+++ b/www/manager6/form/VMSelector.js
@@ -180,6 +180,7 @@ Ext.define('PVE.form.VMSelector', {
 	// only show the relevant guests by default
 	if (me.action) {
 	    var statusfilter = '';
+	    var typefilter = '';
 	    switch (me.action) {
 		case 'startall':
 		    statusfilter = 'stopped';
@@ -187,17 +188,30 @@ Ext.define('PVE.form.VMSelector', {
 		case 'stopall':
 		    statusfilter = 'running';
 		    break;
+		case 'suspendall':
+		    statusfilter = 'running';
+		    typefilter = 'qemu';
+		    break;
 	    }
-	    if (statusfilter !== '') {
-		me.store.filters.add({
-		    property: 'template',
-		    value: 0,
-		}, {
-		    id: 'x-gridfilter-status',
-		    operator: 'in',
-		    property: 'status',
-		    value: [statusfilter],
-		});
+	    if (statusfilter !== '' || typefilter !== '') {
+
+		if (statusfilter !== '') {
+		    me.store.filters.add({
+			id: 'x-gridfilter-status',
+			operator: 'in',
+			property: 'status',
+			value: [statusfilter],
+		    });
+		}
+
+		if (typefilter !== '') {
+		    me.store.filters.add({
+			id: 'x-gridfilter-type',
+			operator: 'in',
+			property: 'type',
+			value: [typefilter],
+		    });
+		}
 	    }
 	}
 
diff --git a/www/manager6/node/CmdMenu.js b/www/manager6/node/CmdMenu.js
index b650bfa0..b91c2efe 100644
--- a/www/manager6/node/CmdMenu.js
+++ b/www/manager6/node/CmdMenu.js
@@ -60,6 +60,21 @@ Ext.define('PVE.node.CmdMenu', {
 		win.show();
 	    },
 	},
+	{
+	    text: gettext('Bulk Hibernate'),
+	    itemId: 'bulksuspend',
+	    iconCls: 'fa fa-fw fa-download',
+	    handler: function() {
+		var me = this.up('menu');
+		var win = Ext.create('PVE.window.BulkAction', {
+		    nodename: me.nodename,
+		    title: gettext('Bulk Hibernate'),
+		    btnText: gettext('Hibernate'),
+		    action: 'hibernateall',
+		});
+		win.show();
+	    },
+	},
 	{
 	    text: gettext('Bulk Migrate'),
 	    itemId: 'bulkmigrate',
diff --git a/www/manager6/node/Config.js b/www/manager6/node/Config.js
index ef3ac32c..db3f0c2e 100644
--- a/www/manager6/node/Config.js
+++ b/www/manager6/node/Config.js
@@ -63,6 +63,19 @@ Ext.define('PVE.node.Config', {
 			    win.show();
 			},
 		    },
+		    {
+			text: gettext('Bulk Hibernate'),
+			iconCls: 'fa fa-fw fa-download',
+			handler: function() {
+			    var win = Ext.create('PVE.window.BulkAction', {
+				nodename: nodename,
+				title: gettext('Bulk Hibernate'),
+				btnText: gettext('Hibernate'),
+				action: 'hibernateall',
+			    });
+			    win.show();
+			},
+		    },
 		    {
 			text: gettext('Bulk Migrate'),
 			iconCls: 'fa fa-fw fa-send-o',
diff --git a/www/manager6/window/BulkAction.js b/www/manager6/window/BulkAction.js
index 135f570f..488b82ff 100644
--- a/www/manager6/window/BulkAction.js
+++ b/www/manager6/window/BulkAction.js
@@ -117,6 +117,13 @@ Ext.define('PVE.window.BulkAction', {
 		name: 'force',
 		value: 1,
 	    });
+	} else if (me.action === 'hibernateall') {
+	    me.action = 'suspendall';
+	    items.push({
+		xtype: 'hiddenfield',
+		name: 'todisk',
+		value: 1,
+	    });
 	}
 
 	items.push({
-- 
2.20.1





^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [pve-devel] [PATCH v2 pve-manager 1/2] api2: add suspendall endpoint
  2021-02-09 10:31 ` [pve-devel] [PATCH v2 pve-manager 1/2] api2: add suspendall endpoint Hannes Laimer
@ 2021-02-19 16:41   ` Thomas Lamprecht
  0 siblings, 0 replies; 4+ messages in thread
From: Thomas Lamprecht @ 2021-02-19 16:41 UTC (permalink / raw)
  To: Proxmox VE development discussion, Hannes Laimer

On 09.02.21 11:31, Hannes Laimer wrote:
> Handels pause and hibernation, the reason for not splitting it was to mirror
> the behaviour of the already existing suspend endpoint for single VMs.
> 
> Signed-off-by: Hannes Laimer <h.laimer@proxmox.com>
> ---
> 
> Endpoint code is mostly taken from already existing ednpoints, namely
> stopall and startall.
> 
>  PVE/API2/Nodes.pm | 119 ++++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 119 insertions(+)
> 
> diff --git a/PVE/API2/Nodes.pm b/PVE/API2/Nodes.pm
> index 8172231e..3e6e9fa2 100644
> --- a/PVE/API2/Nodes.pm
> +++ b/PVE/API2/Nodes.pm
> @@ -1943,6 +1943,125 @@ __PACKAGE__->register_method ({
>  	return $rpcenv->fork_worker('stopall', undef, $authuser, $code);
>      }});
>  
> +my $create_suspend_worker = sub {
> +    my ($nodename, $type, $vmid, $down_timeout, $todisk) = @_;
> +
> +    my $upid;
> +    if ($type eq 'qemu') {
> +	return if !PVE::QemuServer::check_running($vmid, 1);
> +	my $timeout =  defined($down_timeout) ? int($down_timeout) : 60*3;
> +	print STDERR "Suspending VM $vmid (timeout = $timeout seconds)\n";
> +	$upid = PVE::API2::Qemu->vm_suspend({node => $nodename, vmid => $vmid, todisk => $todisk});
> +    } else {
> +	die "suspension is only supported on VMs, not on '$type'\n";
> +    }
> +
> +    return $upid;
> +};
> +
> +__PACKAGE__->register_method ({
> +    name => 'suspendall',
> +    path => 'suspendall',
> +    method => 'POST',
> +    protected => 1,
> +    permissions => {
> +	check => ['perm', '/', [ 'VM.PowerMgmt' ]],

permissions are still unchanged?

From:
https://pve.proxmox.com/pve-docs/api-viewer/index.html#/nodes/{node}/qemu/{vmid}/status/suspend

"You need 'VM.PowerMgmt' on /vms/{vmid}, and if you have set 'todisk', you need also
'VM.Config.Disk' on /vms/{vmid} and 'Datastore.AllocateSpace' on the storage for the
vmstate."

But you call PVE::API2::Qemu->vm_suspend directly, so all schema based checks, i.e., those not
done there in code directly, get circumvented.

Did you checked that this is OK??


> +    },
> +    proxyto => 'node',
> +    description => "Suspend all VMs.",
> +    parameters => {
> +    	additionalProperties => 0,
> +	properties => {
> +	    node => get_standard_option('pve-node'),
> +	    vms => {
> +		description => "Only consider Guests with these IDs.",
> +		type => 'string',  format => 'pve-vmid-list',
> +		optional => 1,
> +	    },
> +	    todisk => {
> +		type => 'boolean',
> +		default => 0,

I'd even enable this by defaults, prime use case.
Could be defaulted to true via gui to, as alternative.

> +		optional => 1,
> +		description => 'If set, suspends the VM to disk. Will be resumed on next VM start.',
> +	    },

a state storage maybe also useful, some may even want a mapping per VM? could be overkill
though as people can already configure state storage in the VM options.

> +	},
> +    },
> +    returns => {
> +	type => 'string',
> +    },
> +    code => sub {
> +	my ($param) = @_;
> +
> +	my $rpcenv = PVE::RPCEnvironment::get();
> +	my $authuser = $rpcenv->get_user();
> +
> +	my $nodename = $param->{node};
> +	$nodename = PVE::INotify::nodename() if $nodename eq 'localhost';
> +
> +	my $code = sub {
> +
> +	    $rpcenv->{type} = 'priv'; # to start tasks in background
> +
> +	    my $stopList = &$get_start_stop_list($nodename, undef, $param->{vms});
> +
> +	    my $cpuinfo = PVE::ProcFSTools::read_cpuinfo();
> +	    my $datacenterconfig = cfs_read_file('datacenter.cfg');
> +	    # if not set by user spawn max cpu count number of workers
> +	    my $maxWorkers =  $datacenterconfig->{max_workers} || $cpuinfo->{cpus};
> +
> +	    foreach my $order (sort {$b <=> $a} keys %$stopList) {
> +		my $vmlist = $stopList->{$order};
> +		my $workers = {};
> +
> +		my $finish_worker = sub {
> +		    my $pid = shift;
> +		    my $d = $workers->{$pid};
> +		    return if !$d;
> +		    delete $workers->{$pid};
> +
> +		    syslog('info', "end task $d->{upid}");
> +		};

still not factored out

> +
> +		foreach my $vmid (sort {$b <=> $a} keys %$vmlist) {
> +		    my $d = $vmlist->{$vmid};
> +		    my $upid;
> +		    eval { $upid = &$create_suspend_worker($nodename, $d->{type}, $vmid, $d->{down}, $param->{todisk}); };
> +		    warn $@ if $@;
> +		    next if !$upid;
> +
> +		    my $res = PVE::Tools::upid_decode($upid, 1);
> +		    next if !$res;
> +
> +		    my $pid = $res->{pid};
> +
> +		    $workers->{$pid} = { type => $d->{type}, upid => $upid, vmid => $vmid };
> +		    while (scalar(keys %$workers) >= $maxWorkers) {
> +			foreach my $p (keys %$workers) {
> +			    if (!PVE::ProcFSTools::check_process_running($p)) {
> +				&$finish_worker($p);
> +			    }
> +			}
> +			sleep(1);

still not factored out

> +		    }
> +		}
> +		while (scalar(keys %$workers)) {
> +		    foreach my $p (keys %$workers) {
> +			if (!PVE::ProcFSTools::check_process_running($p)) {
> +			    &$finish_worker($p);
> +			}
> +		    }
> +		    sleep(1);
> +		}

still not factored out...

did you even read my last review?!

> +	    }
> +
> +	    syslog('info', "all VMs suspended");
> +
> +	    return;
> +	};
> +
> +	return $rpcenv->fork_worker('suspendall', undef, $authuser, $code);
> +    }});
> +
>  my $create_migrate_worker = sub {
>      my ($nodename, $type, $vmid, $target, $with_local_disks) = @_;
>  
> 





^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2021-02-19 16:42 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-02-09 10:31 [pve-devel] [PATCH v2 pve-manager 0/2] add bulk hibernation action Hannes Laimer
2021-02-09 10:31 ` [pve-devel] [PATCH v2 pve-manager 1/2] api2: add suspendall endpoint Hannes Laimer
2021-02-19 16:41   ` Thomas Lamprecht
2021-02-09 10:31 ` [pve-devel] [PATCH v2 pve-manager 2/2] ui: add bulk hibernate action Hannes Laimer

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox
Service provided by Proxmox Server Solutions GmbH | Privacy | Legal