* [pve-devel] [PATCH manager v4 2/3] api: implement node-independent bulk actions
2025-11-14 14:59 [pve-devel] [PATCH apiclient/common/manager v4 0/5] implement cluster-wide bulk-actions for guests Dominik Csapak
` (2 preceding siblings ...)
2025-11-14 14:59 ` [pve-devel] [PATCH manager v4 1/3] http server/pvesh: set credentials if necessary Dominik Csapak
@ 2025-11-14 14:59 ` Dominik Csapak
2025-11-14 21:02 ` [pve-devel] applied: " Thomas Lamprecht
2025-11-14 14:59 ` [pve-devel] [PATCH manager v4 3/3] ui: add bulk actions to the datacenter level Dominik Csapak
4 siblings, 1 reply; 10+ messages in thread
From: Dominik Csapak @ 2025-11-14 14:59 UTC (permalink / raw)
To: pve-devel
To achieve this, start a worker task and use our generic api client
to start the tasks on the relevant nodes. The client always points
to 'localhost' so we let the pveproxy worry about the proxying etc.
We reuse some logic from the startall/stopall/etc. calls, like getting
the ordered guest info list. For that to work, we must convert some of
the private subs into proper subs. We also fix handling loading configs
from other nodes.
In each worker, for each task, we check if the target is in the desired
state (e.g. stopped when wanting to start, etc.). If that is the case,
start the task and put the UPID in a queue to check. This is done until
the parallel count is at 'max_workers', at which point we wait until at
least one task is finished before starting the next one.
Failures (e.g. task errors or failure to fetch the tasks) are printed,
and the vmid is saved and they're collectively printed at the end for
convenience.
Special handling is required for checking the permissions for suspend:
We have to load the config of the VM and find the target state storage.
We can then check the privileges for that storage.
Further improvements can be:
* filters (I'd prefer starting out with front end filters)
* failure mode resolution (I'd wait until someone requests that)
* token handling (probably not necessary since we do check the
permissions upfront for the correct token.)
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
---
PVE/API2/Cluster.pm | 7 +
PVE/API2/Cluster/BulkAction.pm | 45 ++
PVE/API2/Cluster/BulkAction/Guest.pm | 774 +++++++++++++++++++++++++++
PVE/API2/Cluster/BulkAction/Makefile | 17 +
PVE/API2/Cluster/Makefile | 4 +-
PVE/API2/Nodes.pm | 24 +-
6 files changed, 859 insertions(+), 12 deletions(-)
create mode 100644 PVE/API2/Cluster/BulkAction.pm
create mode 100644 PVE/API2/Cluster/BulkAction/Guest.pm
create mode 100644 PVE/API2/Cluster/BulkAction/Makefile
diff --git a/PVE/API2/Cluster.pm b/PVE/API2/Cluster.pm
index 83227c8c..d6003a7d 100644
--- a/PVE/API2/Cluster.pm
+++ b/PVE/API2/Cluster.pm
@@ -25,6 +25,7 @@ use PVE::API2::ACMEAccount;
use PVE::API2::ACMEPlugin;
use PVE::API2::Backup;
use PVE::API2::Cluster::BackupInfo;
+use PVE::API2::Cluster::BulkAction;
use PVE::API2::Cluster::Ceph;
use PVE::API2::Cluster::Mapping;
use PVE::API2::Cluster::Jobs;
@@ -103,6 +104,11 @@ __PACKAGE__->register_method({
path => 'mapping',
});
+__PACKAGE__->register_method({
+ subclass => "PVE::API2::Cluster::BulkAction",
+ path => 'bulk-action',
+});
+
if ($have_sdn) {
__PACKAGE__->register_method({
subclass => "PVE::API2::Network::SDN",
@@ -148,6 +154,7 @@ __PACKAGE__->register_method({
{ name => 'acme' },
{ name => 'backup' },
{ name => 'backup-info' },
+ { name => 'bulk-action' },
{ name => 'ceph' },
{ name => 'config' },
{ name => 'firewall' },
diff --git a/PVE/API2/Cluster/BulkAction.pm b/PVE/API2/Cluster/BulkAction.pm
new file mode 100644
index 00000000..df650514
--- /dev/null
+++ b/PVE/API2/Cluster/BulkAction.pm
@@ -0,0 +1,45 @@
+package PVE::API2::Cluster::BulkAction;
+
+use strict;
+use warnings;
+
+use PVE::API2::Cluster::BulkAction::Guest;
+
+use base qw(PVE::RESTHandler);
+
+__PACKAGE__->register_method({
+ subclass => "PVE::API2::Cluster::BulkAction::Guest",
+ path => 'guest',
+});
+
+__PACKAGE__->register_method({
+ name => 'index',
+ path => '',
+ method => 'GET',
+ description => "List resource types.",
+ permissions => {
+ user => 'all',
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {},
+ },
+ returns => {
+ type => 'array',
+ items => {
+ type => "object",
+ },
+ links => [{ rel => 'child', href => "{name}" }],
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $result = [
+ { name => 'guest' },
+ ];
+
+ return $result;
+ },
+});
+
+1;
diff --git a/PVE/API2/Cluster/BulkAction/Guest.pm b/PVE/API2/Cluster/BulkAction/Guest.pm
new file mode 100644
index 00000000..844913b1
--- /dev/null
+++ b/PVE/API2/Cluster/BulkAction/Guest.pm
@@ -0,0 +1,774 @@
+package PVE::API2::Cluster::BulkAction::Guest;
+
+use strict;
+use warnings;
+
+use PVE::APIClient::LWP;
+use PVE::AccessControl;
+use PVE::Cluster;
+use PVE::Exception qw(raise raise_perm_exc raise_param_exc);
+use PVE::INotify;
+use PVE::JSONSchema qw(get_standard_option);
+use PVE::RESTEnvironment qw(log_warn);
+use PVE::RESTHandler;
+use PVE::RPCEnvironment;
+use PVE::Storage;
+use PVE::Tools qw();
+
+use PVE::API2::Nodes;
+
+use base qw(PVE::RESTHandler);
+
+__PACKAGE__->register_method({
+ name => 'index',
+ path => '',
+ method => 'GET',
+ description => "Bulk action index.",
+ permissions => { user => 'all' },
+ parameters => {
+ additionalProperties => 0,
+ properties => {},
+ },
+ returns => {
+ type => 'array',
+ items => {
+ type => "object",
+ properties => {},
+ },
+ links => [{ rel => 'child', href => "{name}" }],
+ },
+ code => sub {
+ my ($param) = @_;
+
+ return [
+ { name => 'start' },
+ { name => 'shutdown' },
+ { name => 'migrate' },
+ { name => 'suspend' },
+ ];
+ },
+});
+
+sub create_client {
+ my ($request_timeout) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+ my $authuser = $rpcenv->get_user();
+ my $credentials = $rpcenv->get_credentials();
+
+ my $api_token = $credentials->{api_token};
+ if (defined($api_token)) {
+ # this is the format the client expects it, but we don't save it such in the rpcenv
+ $api_token = "PVEAPIToken=${api_token}";
+ }
+ my $ticket = $credentials->{ticket};
+ my $csrf_token = $credentials->{token};
+
+ my $node = PVE::INotify::nodename();
+ my $fingerprint = PVE::Cluster::get_node_fingerprint($node);
+
+ my $conn_args = {
+ protocol => 'https',
+ host => 'localhost', # always call the api locally, let pveproxy handle the proxying
+ port => 8006,
+ username => $authuser,
+ ticket => $ticket,
+ apitoken => $api_token,
+ timeout => $request_timeout // 25, # default slightly shorter than the proxy->daemon timeout
+ cached_fingerprints => {
+ $fingerprint => 1,
+ },
+ };
+
+ my $api_client = PVE::APIClient::LWP->new($conn_args->%*);
+ if (defined($csrf_token)) {
+ $api_client->update_csrftoken($csrf_token);
+ }
+
+ return $api_client;
+}
+
+sub make_get_request {
+ my ($client, $path, $retry_count) = @_;
+
+ $retry_count //= 0;
+
+ my $res = eval { $client->get($path) };
+ my $err = $@;
+ if ($err && $retry_count > 0) {
+ my $retries = 0;
+ while ($err && $retries < $retry_count) {
+ $res = eval { $client->get($path) };
+ $err = $@;
+ $retries++;
+ sleep 1;
+ }
+ }
+ die $err if $err;
+ return $res;
+}
+
+# starts and awaits a task for each guest given via $startlist.
+#
+# takes a vm list in the form of
+# {
+# 0 => {
+# 100 => { .. guest info ..},
+# 101 => { .. guest info ..},
+# },
+# 1 => {
+# 102 => { .. guest info ..},
+# 103 => { .. guest info ..},
+# },
+# }
+#
+# max_workers: how many parallel tasks should be started.
+# start_task: a sub that returns eiter a upid or 1 (undef means failure)
+# check_task: if start_task returned a upid, will wait for that to finish and
+# call check_task with the resulting task status
+sub handle_task_foreach_guest {
+ my ($startlist, $max_workers, $start_task, $check_task) = @_;
+
+ my $api_client = create_client();
+
+ my $failed = [];
+ for my $order (sort { $a <=> $b } keys $startlist->%*) {
+ my $vmlist = $startlist->{$order};
+ my $workers = {};
+
+ for my $vmid (sort { $a <=> $b } keys $vmlist->%*) {
+
+ # wait until at least one slot is free
+ while (scalar(keys($workers->%*)) >= $max_workers) {
+ for my $upid (keys($workers->%*)) {
+ my $worker = $workers->{$upid};
+ my $node = $worker->{guest}->{node};
+
+ my $task =
+ eval { make_get_request($api_client, "/nodes/$node/tasks/$upid/status", 3) };
+ if (my $err = $@) {
+ push $failed->@*, $worker->{vmid};
+
+ $check_task->($api_client, $worker->{vmid}, $worker->{guest}, 1, undef);
+
+ delete $workers->{$upid};
+ } elsif ($task->{status} ne 'running') {
+ my $is_error = PVE::Tools::upid_status_is_error($task->{exitstatus});
+ push $failed->@*, $worker->{vmid} if $is_error;
+
+ $check_task->(
+ $api_client, $worker->{vmid}, $worker->{guest}, $is_error, $task,
+ );
+
+ delete $workers->{$upid};
+ }
+ }
+ sleep(1); # How much?
+ }
+
+ my $guest = $vmlist->{$vmid};
+ my $upid = eval { $start_task->($api_client, $vmid, $guest) };
+ log_warn("$@") if $@;
+
+ # success but no task necessary
+ next if defined($upid) && "$upid" eq "1";
+
+ if (!defined($upid)) {
+ push $failed->@*, $vmid;
+ next;
+ }
+
+ $workers->{$upid} = {
+ vmid => $vmid,
+ guest => $guest,
+ };
+ }
+
+ # wait until current order is finished
+ for my $upid (keys($workers->%*)) {
+ my $worker = $workers->{$upid};
+ my $node = $worker->{guest}->{node};
+
+ my $task = eval { wait_for_task_finished($api_client, $node, $upid) };
+ my $err = $@;
+ my $is_error = ($err || PVE::Tools::upid_status_is_error($task->{exitstatus})) ? 1 : 0;
+ push $failed->@*, $worker->{vmid} if $is_error;
+
+ $check_task->($api_client, $worker->{vmid}, $worker->{guest}, $is_error, $task);
+
+ delete $workers->{$upid};
+ }
+ }
+
+ return $failed;
+}
+
+sub get_type_text {
+ my ($type) = @_;
+
+ if ($type eq 'lxc') {
+ return 'CT';
+ } elsif ($type eq 'qemu') {
+ return 'VM';
+ } else {
+ die "unknown guest type $type\n";
+ }
+}
+
+sub wait_for_task_finished {
+ my ($client, $node, $upid) = @_;
+
+ while (1) {
+ my $task = make_get_request($client, "/nodes/$node/tasks/$upid/status", 3);
+ return $task if $task->{status} ne 'running';
+ sleep(1); # How much time?
+ }
+}
+
+sub check_guest_permissions {
+ my ($rpcenv, $authuser, $vmlist, $priv_list) = @_;
+
+ if (scalar($vmlist->@*) > 0) {
+ $rpcenv->check($authuser, "/vms/$_", $priv_list) for $vmlist->@*;
+ } elsif (!$rpcenv->check($authuser, "/", $priv_list, 1)) {
+ raise_perm_exc("/, " . join(', ', $priv_list->@*));
+ }
+}
+
+sub extract_vmlist {
+ my ($param) = @_;
+
+ if (my $vmlist = $param->{vms}) {
+ my $vmlist_string = join(',', $vmlist->@*);
+ return ($vmlist, $vmlist_string);
+ }
+ return ([], undef);
+}
+
+sub print_start_action {
+ my ($vmlist, $prefix, $suffix) = @_;
+
+ $suffix = defined($suffix) ? " $suffix" : "";
+
+ if (scalar($vmlist->@*)) {
+ print "$prefix guests$suffix: " . join(', ', $vmlist->@*) . "\n";
+ } else {
+ print "$prefix all guests$suffix\n";
+ }
+}
+
+__PACKAGE__->register_method({
+ name => 'start',
+ path => 'start',
+ method => 'POST',
+ description => "Bulk start or resume all guests on the cluster.",
+ permissions => {
+ description => "The 'VM.PowerMgmt' permission is required on '/' or on '/vms/<ID>' for "
+ . "each ID passed via the 'vms' parameter.",
+ user => 'all',
+ },
+ protected => 1,
+ expose_credentials => 1,
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ vms => {
+ description => "Only consider guests from this list of VMIDs.",
+ type => 'array',
+ items => get_standard_option('pve-vmid'),
+ optional => 1,
+ },
+ timeout => {
+ description =>
+ "Default start timeout in seconds. Only valid for VMs. (default depends on the guest configuration).",
+ type => 'integer',
+ optional => 1,
+ },
+ maxworkers => {
+ description => "How many parallel tasks at maximum should be started.",
+ optional => 1,
+ default => 1,
+ type => 'integer',
+ },
+ # TODO:
+ # Failure resolution mode (fail, warn, retry?)
+ # mode-limits (offline only, suspend only, ?)
+ # filter (tags, name, ?)
+ },
+ },
+ returns => {
+ type => 'string',
+ description => "UPID of the worker",
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+ my $authuser = $rpcenv->get_user();
+
+ my ($vmlist, $vmlist_string) = extract_vmlist($param);
+
+ check_guest_permissions($rpcenv, $authuser, $vmlist, ['VM.PowerMgmt']);
+
+ my $code = sub {
+ my $startlist =
+ PVE::API2::Nodes::Nodeinfo::get_start_stop_list(undef, undef, $vmlist_string);
+
+ print_start_action($vmlist, "Starting");
+
+ my $start_task = sub {
+ my ($api_client, $vmid, $guest) = @_;
+ my $node = $guest->{node};
+
+ my $type = $guest->{type};
+ my $type_text = get_type_text($type);
+ my $operation = 'start';
+ my $status = eval {
+ make_get_request($api_client, "/nodes/$node/$type/$vmid/status/current");
+ };
+ if (defined($status) && $status->{status} eq 'running') {
+ if (defined($status->{qmpstatus}) && $status->{qmpstatus} ne 'paused') {
+ log_warn("Skipping $type_text $vmid, already running.\n");
+ return 1;
+ } else {
+ $operation = 'resume';
+ }
+ }
+
+ my $params = {};
+ if (defined($param->{timeout}) && $operation eq 'start' && $type eq 'qemu') {
+ $params->{timeout} = $param->{timeout};
+ }
+
+ my $url = "/nodes/$node/$type/$vmid/status/$operation";
+ print "Starting $type_text $vmid\n";
+ return $api_client->post($url, $params);
+ };
+
+ my $check_task = sub {
+ my ($api_client, $vmid, $guest, $is_error, $task) = @_;
+ my $node = $guest->{node};
+
+ my $default_delay = 0;
+
+ if (!$is_error) {
+ my $delay = defined($guest->{up}) ? int($guest->{up}) : $default_delay;
+ if ($delay > 0) {
+ print "Waiting for $delay seconds (startup delay)\n"
+ if $guest->{up};
+ for (my $i = 0; $i < $delay; $i++) {
+ sleep(1);
+ }
+ }
+ } else {
+ my $err =
+ defined($task) ? $task->{exitstatus} : "could not query task status";
+ my $type_text = get_type_text($guest->{type});
+ log_warn("Starting $type_text $vmid failed: $err\n");
+ }
+ };
+
+ my $max_workers = $param->{maxworkers} // 1;
+ my $failed =
+ handle_task_foreach_guest($startlist, $max_workers, $start_task, $check_task);
+
+ if (scalar($failed->@*)) {
+ die "Some guests failed to start: " . join(', ', $failed->@*) . "\n";
+ }
+ };
+
+ return $rpcenv->fork_worker('bulkstart', undef, $authuser, $code);
+ },
+});
+
+__PACKAGE__->register_method({
+ name => 'shutdown',
+ path => 'shutdown',
+ method => 'POST',
+ description => "Bulk shutdown all guests on the cluster.",
+ permissions => {
+ description => "The 'VM.PowerMgmt' permission is required on '/' or on '/vms/<ID>' for "
+ . "each ID passed via the 'vms' parameter.",
+ user => 'all',
+ },
+ protected => 1,
+ expose_credentials => 1,
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ vms => {
+ description => "Only consider guests from this list of VMIDs.",
+ type => 'array',
+ items => get_standard_option('pve-vmid'),
+ optional => 1,
+ },
+ timeout => {
+ description =>
+ "Default shutdown timeout in seconds if none is configured for the guest.",
+ type => 'integer',
+ default => 180,
+ optional => 1,
+ },
+ 'force-stop' => {
+ description => "Makes sure the Guest stops after the timeout.",
+ type => 'boolean',
+ default => 1,
+ optional => 1,
+ },
+ maxworkers => {
+ description => "How many parallel tasks at maximum should be started.",
+ optional => 1,
+ default => 1,
+ type => 'integer',
+ },
+ # TODO:
+ # Failure resolution mode (fail, warn, retry?)
+ # mode-limits (offline only, suspend only, ?)
+ # filter (tags, name, ?)
+ },
+ },
+ returns => {
+ type => 'string',
+ description => "UPID of the worker",
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+ my $authuser = $rpcenv->get_user();
+
+ my ($vmlist, $vmlist_string) = extract_vmlist($param);
+
+ check_guest_permissions($rpcenv, $authuser, $vmlist, ['VM.PowerMgmt']);
+
+ my $code = sub {
+ my $startlist =
+ PVE::API2::Nodes::Nodeinfo::get_start_stop_list(undef, undef, $vmlist_string);
+
+ print_start_action($vmlist, "Shutting down");
+
+ # reverse order for shutdown
+ for my $order (keys $startlist->%*) {
+ my $list = delete $startlist->{$order};
+ $order = $order * -1;
+ $startlist->{$order} = $list;
+ }
+
+ my $start_task = sub {
+ my ($api_client, $vmid, $guest) = @_;
+ my $node = $guest->{node};
+
+ my $type = $guest->{type};
+ my $type_text = get_type_text($type);
+
+ my $status = eval {
+ make_get_request($api_client, "/nodes/$node/$type/$vmid/status/current");
+ };
+ if (defined($status) && $status->{status} ne 'running') {
+ log_warn("Skipping $type_text $vmid, not running.\n");
+ return 1;
+ }
+
+ if (
+ defined($status)
+ && defined($status->{qmpstatus})
+ && $status->{qmpstatus} eq 'paused'
+ && !$param->{'force-stop'}
+ ) {
+ log_warn("Skipping $type_text $vmid, resume paused VM before shutdown.\n");
+ return 1;
+ }
+
+ my $timeout = int($guest->{down} // $param->{timeout} // 180);
+ my $forceStop = $param->{'force-stop'} // 1;
+
+ my $params = {
+ forceStop => $forceStop,
+ timeout => $timeout,
+ };
+
+ my $url = "/nodes/$node/$type/$vmid/status/shutdown";
+ print "Shutting down $type_text $vmid (Timeout = $timeout seconds)\n";
+ return $api_client->post($url, $params);
+ };
+
+ my $check_task = sub {
+ my ($api_client, $vmid, $guest, $is_error, $task) = @_;
+ my $node = $guest->{node};
+ if ($is_error) {
+ my $err =
+ defined($task) ? $task->{exitstatus} : "could not query task status";
+ my $type_text = get_type_text($guest->{type});
+ log_warn("Stopping $type_text $vmid failed: $err\n");
+ }
+ };
+
+ my $max_workers = $param->{maxworkers} // 1;
+ my $failed =
+ handle_task_foreach_guest($startlist, $max_workers, $start_task, $check_task);
+
+ if (scalar($failed->@*)) {
+ die "Some guests failed to shutdown " . join(', ', $failed->@*) . "\n";
+ }
+ };
+
+ return $rpcenv->fork_worker('bulkshutdown', undef, $authuser, $code);
+ },
+});
+
+__PACKAGE__->register_method({
+ name => 'suspend',
+ path => 'suspend',
+ method => 'POST',
+ description => "Bulk suspend all guests on the cluster.",
+ permissions => {
+ description =>
+ "The 'VM.PowerMgmt' permission is required on '/' or on '/vms/<ID>' for each"
+ . " ID passed via the 'vms' parameter. Additionally, you need 'VM.Config.Disk' on the"
+ . " '/vms/{vmid}' path and 'Datastore.AllocateSpace' for the configured state-storage(s)",
+ user => 'all',
+ },
+ protected => 1,
+ expose_credentials => 1,
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ vms => {
+ description => "Only consider guests from this list of VMIDs.",
+ type => 'array',
+ items => get_standard_option('pve-vmid'),
+ optional => 1,
+ },
+ statestorage => get_standard_option(
+ 'pve-storage-id',
+ {
+ description => "The storage for the VM state.",
+ requires => 'to-disk',
+ optional => 1,
+ completion => \&PVE::Storage::complete_storage_enabled,
+ },
+ ),
+ 'to-disk' => {
+ description =>
+ "If set, suspends the guests to disk. Will be resumed on next start.",
+ type => 'boolean',
+ default => 0,
+ optional => 1,
+ },
+ maxworkers => {
+ description => "How many parallel tasks at maximum should be started.",
+ optional => 1,
+ default => 1,
+ type => 'integer',
+ },
+ # TODO:
+ # Failure resolution mode (fail, warn, retry?)
+ # mode-limits (offline only, suspend only, ?)
+ # filter (tags, name, ?)
+ },
+ },
+ returns => {
+ type => 'string',
+ description => "UPID of the worker",
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+ my $authuser = $rpcenv->get_user();
+
+ my ($vmlist, $vmlist_string) = extract_vmlist($param);
+
+ check_guest_permissions($rpcenv, $authuser, $vmlist, ['VM.PowerMgmt']);
+
+ if ($param->{'to-disk'}) {
+ check_guest_permissions($rpcenv, $authuser, $vmlist, ['VM.Config.Disk']);
+ if (my $statestorage = $param->{statestorage}) {
+ $rpcenv->check($authuser, "/storage/$statestorage",
+ ['Datastore.AllocateSpace']);
+ } else {
+ # storage access check will be done by api call itself later
+ }
+ }
+
+ my $code = sub {
+ my $startlist =
+ PVE::API2::Nodes::Nodeinfo::get_start_stop_list(undef, undef, $vmlist_string);
+
+ print_start_action($vmlist, "Suspending");
+
+ # reverse order for suspend
+ for my $order (keys $startlist->%*) {
+ my $list = delete $startlist->{$order};
+ $order = $order * -1;
+ $startlist->{$order} = $list;
+ }
+
+ my $start_task = sub {
+ my ($api_client, $vmid, $guest) = @_;
+ my $node = $guest->{node};
+
+ if ($guest->{type} ne 'qemu') {
+ log_warn("skipping $vmid, only VMs can be suspended");
+ return 1;
+ }
+
+ my $status =
+ eval { make_get_request($api_client, "/nodes/$node/qemu/$vmid/status/current") };
+ if (defined($status) && $status->{status} ne 'running') {
+ log_warn("Skipping VM $vmid, not running.\n");
+ return 1;
+ }
+
+ my $params = {};
+ $params->{'todisk'} = $param->{'to-disk'} // 0;
+ $params->{statestorage} = $param->{statestorage}
+ if $param->{'to-disk'} && defined($param->{statestorage});
+
+ my $url = "/nodes/$node/qemu/$vmid/status/suspend";
+ print "Suspending VM $vmid\n";
+ return $api_client->post($url, $params);
+ };
+
+ my $check_task = sub {
+ my ($api_client, $vmid, $guest, $is_error, $task) = @_;
+ my $node = $guest->{node};
+ if ($is_error) {
+ my $err =
+ defined($task) ? $task->{exitstatus} : "could not query task status";
+ my $type_text = get_type_text($guest->{type});
+ log_warn("Stopping $type_text $vmid failed: $err\n");
+ }
+ };
+
+ my $max_workers = $param->{maxworkers} // 1;
+ my $failed =
+ handle_task_foreach_guest($startlist, $max_workers, $start_task, $check_task);
+
+ if (scalar($failed->@*)) {
+ die "Some guests failed to suspend " . join(', ', $failed->@*) . "\n";
+ }
+ };
+
+ return $rpcenv->fork_worker('bulksuspend', undef, $authuser, $code);
+ },
+});
+
+__PACKAGE__->register_method({
+ name => 'migrate',
+ path => 'migrate',
+ method => 'POST',
+ description => "Bulk migrate all guests on the cluster.",
+ permissions => {
+ description =>
+ "The 'VM.Migrate' permission is required on '/' or on '/vms/<ID>' for each "
+ . "ID passed via the 'vms' parameter.",
+ user => 'all',
+ },
+ protected => 1,
+ expose_credentials => 1,
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ vms => {
+ description => "Only consider guests from this list of VMIDs.",
+ type => 'array',
+ items => get_standard_option('pve-vmid'),
+ optional => 1,
+ },
+ target => get_standard_option('pve-node', { description => "Target node." }),
+ online => {
+ type => 'boolean',
+ description => "Enable live migration for VMs and restart migration for CTs.",
+ optional => 1,
+ },
+ "with-local-disks" => {
+ type => 'boolean',
+ description => "Enable live storage migration for local disk",
+ optional => 1,
+ },
+ maxworkers => {
+ description => "How many parallel tasks at maximum should be started.",
+ optional => 1,
+ default => 1,
+ type => 'integer',
+ },
+ # TODO:
+ # Failure resolution mode (fail, warn, retry?)
+ # mode-limits (offline only, suspend only, ?)
+ # filter (tags, name, ?)
+ },
+ },
+ returns => {
+ type => 'string',
+ description => "UPID of the worker",
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+ my $authuser = $rpcenv->get_user();
+
+ my ($vmlist, $vmlist_string) = extract_vmlist($param);
+
+ check_guest_permissions($rpcenv, $authuser, $vmlist, ['VM.Migrate']);
+
+ my $code = sub {
+ my $list =
+ PVE::API2::Nodes::Nodeinfo::get_filtered_vmlist(undef, $vmlist_string, 1, 1);
+
+ print_start_action($vmlist, "Migrating", "to $param->{target}");
+
+ my $start_task = sub {
+ my ($api_client, $vmid, $guest) = @_;
+ my $node = $guest->{node};
+
+ my $type = $guest->{type};
+ my $type_text = get_type_text($type);
+
+ if ($node eq $param->{target}) {
+ log_warn("$type_text $vmid already on $node, skipping.\n");
+ return 1;
+ }
+
+ my $params = {
+ target => $param->{target},
+ };
+
+ if ($type eq 'lxc') {
+ $params->{restart} = $param->{online} if defined($param->{online});
+ } elsif ($type eq 'qemu') {
+ $params->{online} = $param->{online} if defined($param->{online});
+ $params->{'with-local-disks'} = $param->{'with-local-disks'}
+ if defined($param->{'with-local-disks'});
+ }
+
+ my $url = "/nodes/$node/$type/$vmid/migrate";
+ print "Migrating $type_text $vmid\n";
+ return $api_client->post($url, $params);
+ };
+
+ my $check_task = sub {
+ my ($api_client, $vmid, $guest, $is_error, $task) = @_;
+ if ($is_error) {
+ my $err =
+ defined($task) ? $task->{exitstatus} : "could not query task status";
+ my $type_text = get_type_text($guest->{type});
+ log_warn("Migrating $type_text $vmid failed: $err\n");
+ }
+ };
+
+ my $max_workers = $param->{maxworkers} // 1;
+ my $failed =
+ handle_task_foreach_guest({ '0' => $list }, $max_workers, $start_task, $check_task);
+
+ if (scalar($failed->@*)) {
+ die "Some guests failed to migrate " . join(', ', $failed->@*) . "\n";
+ }
+ };
+
+ return $rpcenv->fork_worker('bulkmigrate', undef, $authuser, $code);
+ },
+});
+
+1;
diff --git a/PVE/API2/Cluster/BulkAction/Makefile b/PVE/API2/Cluster/BulkAction/Makefile
new file mode 100644
index 00000000..822c1c15
--- /dev/null
+++ b/PVE/API2/Cluster/BulkAction/Makefile
@@ -0,0 +1,17 @@
+include ../../../../defines.mk
+
+# for node independent, cluster-wide applicable, API endpoints
+# ensure we do not conflict with files shipped by pve-cluster!!
+PERLSOURCE= \
+ Guest.pm
+
+all:
+
+.PHONY: clean
+clean:
+ rm -rf *~
+
+.PHONY: install
+install: ${PERLSOURCE}
+ install -d ${PERLLIBDIR}/PVE/API2/Cluster/BulkAction
+ install -m 0644 ${PERLSOURCE} ${PERLLIBDIR}/PVE/API2/Cluster/BulkAction
diff --git a/PVE/API2/Cluster/Makefile b/PVE/API2/Cluster/Makefile
index b109e5cb..6cffe4c9 100644
--- a/PVE/API2/Cluster/Makefile
+++ b/PVE/API2/Cluster/Makefile
@@ -1,11 +1,13 @@
include ../../../defines.mk
-SUBDIRS=Mapping
+SUBDIRS=Mapping \
+ BulkAction
# for node independent, cluster-wide applicable, API endpoints
# ensure we do not conflict with files shipped by pve-cluster!!
PERLSOURCE= \
BackupInfo.pm \
+ BulkAction.pm \
MetricServer.pm \
Mapping.pm \
Notifications.pm \
diff --git a/PVE/API2/Nodes.pm b/PVE/API2/Nodes.pm
index d0a78b95..66a006ba 100644
--- a/PVE/API2/Nodes.pm
+++ b/PVE/API2/Nodes.pm
@@ -1867,7 +1867,7 @@ __PACKAGE__->register_method({
# * vmid whitelist
# * guest is a template (default: skip)
# * guest is HA manged (default: skip)
-my $get_filtered_vmlist = sub {
+sub get_filtered_vmlist {
my ($nodename, $vmfilter, $templates, $ha_managed) = @_;
my $vmlist = PVE::Cluster::get_vmlist();
@@ -1894,28 +1894,29 @@ my $get_filtered_vmlist = sub {
die "unknown virtual guest type '$d->{type}'\n";
}
- my $conf = $class->load_config($vmid);
+ my $conf = $class->load_config($vmid, $d->{node});
return if !$templates && $class->is_template($conf);
return if !$ha_managed && PVE::HA::Config::vm_is_ha_managed($vmid);
$res->{$vmid}->{conf} = $conf;
$res->{$vmid}->{type} = $d->{type};
$res->{$vmid}->{class} = $class;
+ $res->{$vmid}->{node} = $d->{node};
};
warn $@ if $@;
}
return $res;
-};
+}
# return all VMs which should get started/stopped on power up/down
-my $get_start_stop_list = sub {
+sub get_start_stop_list {
my ($nodename, $autostart, $vmfilter) = @_;
# do not skip HA vms on force or if a specific VMID set is wanted
my $include_ha_managed = defined($vmfilter) ? 1 : 0;
- my $vmlist = $get_filtered_vmlist->($nodename, $vmfilter, undef, $include_ha_managed);
+ my $vmlist = get_filtered_vmlist($nodename, $vmfilter, undef, $include_ha_managed);
my $resList = {};
foreach my $vmid (keys %$vmlist) {
@@ -1928,15 +1929,16 @@ my $get_start_stop_list = sub {
$resList->{$order}->{$vmid} = $startup;
$resList->{$order}->{$vmid}->{type} = $vmlist->{$vmid}->{type};
+ $resList->{$order}->{$vmid}->{node} = $vmlist->{$vmid}->{node};
}
return $resList;
-};
+}
my $remove_locks_on_startup = sub {
my ($nodename) = @_;
- my $vmlist = &$get_filtered_vmlist($nodename, undef, undef, 1);
+ my $vmlist = get_filtered_vmlist($nodename, undef, undef, 1);
foreach my $vmid (keys %$vmlist) {
my $conf = $vmlist->{$vmid}->{conf};
@@ -2028,7 +2030,7 @@ __PACKAGE__->register_method({
warn $@ if $@;
my $autostart = $force ? undef : 1;
- my $startList = $get_start_stop_list->($nodename, $autostart, $param->{vms});
+ my $startList = get_start_stop_list($nodename, $autostart, $param->{vms});
# Note: use numeric sorting with <=>
for my $order (sort { $a <=> $b } keys %$startList) {
@@ -2174,7 +2176,7 @@ __PACKAGE__->register_method({
$rpcenv->{type} = 'priv'; # to start tasks in background
- my $stopList = $get_start_stop_list->($nodename, undef, $param->{vms});
+ my $stopList = get_start_stop_list($nodename, undef, $param->{vms});
my $cpuinfo = PVE::ProcFSTools::read_cpuinfo();
my $datacenterconfig = cfs_read_file('datacenter.cfg');
@@ -2303,7 +2305,7 @@ __PACKAGE__->register_method({
$rpcenv->{type} = 'priv'; # to start tasks in background
- my $toSuspendList = $get_start_stop_list->($nodename, undef, $param->{vms});
+ my $toSuspendList = get_start_stop_list($nodename, undef, $param->{vms});
my $cpuinfo = PVE::ProcFSTools::read_cpuinfo();
my $datacenterconfig = cfs_read_file('datacenter.cfg');
@@ -2508,7 +2510,7 @@ __PACKAGE__->register_method({
my $code = sub {
$rpcenv->{type} = 'priv'; # to start tasks in background
- my $vmlist = &$get_filtered_vmlist($nodename, $param->{vms}, 1, 1);
+ my $vmlist = get_filtered_vmlist($nodename, $param->{vms}, 1, 1);
if (!scalar(keys %$vmlist)) {
warn "no virtual guests matched, nothing to do..\n";
return;
--
2.47.3
_______________________________________________
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel
^ permalink raw reply [flat|nested] 10+ messages in thread* [pve-devel] [PATCH manager v4 3/3] ui: add bulk actions to the datacenter level
2025-11-14 14:59 [pve-devel] [PATCH apiclient/common/manager v4 0/5] implement cluster-wide bulk-actions for guests Dominik Csapak
` (3 preceding siblings ...)
2025-11-14 14:59 ` [pve-devel] [PATCH manager v4 2/3] api: implement node-independent bulk actions Dominik Csapak
@ 2025-11-14 14:59 ` Dominik Csapak
2025-11-14 21:02 ` [pve-devel] applied: " Thomas Lamprecht
4 siblings, 1 reply; 10+ messages in thread
From: Dominik Csapak @ 2025-11-14 14:59 UTC (permalink / raw)
To: pve-devel
reuse the BulkAction window as much as possible. There are only a few
adaptions needed:
* the api call endpoinst are slightly different:
(e.g. start vs startall)
* we have to handle a missing nodename
* some parameters are different:
- start does not have a 'force'
- migrate needs the 'online' parameter
- the 'vms' list is expected to be an array for the new api calls
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
---
www/manager6/dc/Config.js | 68 +++++++++++++++++++++++++++++++
www/manager6/window/BulkAction.js | 50 +++++++++++++++++++----
2 files changed, 109 insertions(+), 9 deletions(-)
diff --git a/www/manager6/dc/Config.js b/www/manager6/dc/Config.js
index 60fea4b6..b5e27a21 100644
--- a/www/manager6/dc/Config.js
+++ b/www/manager6/dc/Config.js
@@ -15,9 +15,77 @@ Ext.define('PVE.dc.Config', {
me.items = [];
+ let actionBtn = Ext.create('Ext.Button', {
+ text: gettext('Bulk Actions'),
+ iconCls: 'fa fa-fw fa-ellipsis-v',
+ disabled: !caps.vms['VM.PowerMgmt'] && !caps.vms['VM.Migrate'],
+ menu: new Ext.menu.Menu({
+ items: [
+ {
+ text: gettext('Bulk Start'),
+ iconCls: 'fa fa-fw fa-play',
+ disabled: !caps.vms['VM.PowerMgmt'],
+ handler: function () {
+ Ext.create('PVE.window.BulkAction', {
+ autoShow: true,
+ vmsAsArray: true,
+ title: gettext('Bulk Start'),
+ btnText: gettext('Start'),
+ action: 'start',
+ });
+ },
+ },
+ {
+ text: gettext('Bulk Shutdown'),
+ iconCls: 'fa fa-fw fa-stop',
+ disabled: !caps.vms['VM.PowerMgmt'],
+ handler: function () {
+ Ext.create('PVE.window.BulkAction', {
+ autoShow: true,
+ vmsAsArray: true,
+ title: gettext('Bulk Shutdown'),
+ btnText: gettext('Shutdown'),
+ action: 'shutdown',
+ });
+ },
+ },
+ {
+ text: gettext('Bulk Suspend'),
+ iconCls: 'fa fa-fw fa-download',
+ disabled: !caps.vms['VM.PowerMgmt'],
+ handler: function () {
+ Ext.create('PVE.window.BulkAction', {
+ autoShow: true,
+ vmsAsArray: true,
+ title: gettext('Bulk Suspend'),
+ btnText: gettext('Suspend'),
+ action: 'suspend',
+ });
+ },
+ },
+ {
+ text: gettext('Bulk Migrate'),
+ iconCls: 'fa fa-fw fa-send-o',
+ disabled: !caps.vms['VM.Migrate'],
+ hidden: PVE.Utils.isStandaloneNode(),
+ handler: function () {
+ Ext.create('PVE.window.BulkAction', {
+ autoShow: true,
+ vmsAsArray: true,
+ title: gettext('Bulk Migrate'),
+ btnText: gettext('Migrate'),
+ action: 'migrate',
+ });
+ },
+ },
+ ],
+ }),
+ });
+
Ext.apply(me, {
title: gettext('Datacenter'),
hstateid: 'dctab',
+ tbar: [actionBtn],
});
if (caps.dc['Sys.Audit']) {
diff --git a/www/manager6/window/BulkAction.js b/www/manager6/window/BulkAction.js
index 0223740f..b2d82bd3 100644
--- a/www/manager6/window/BulkAction.js
+++ b/www/manager6/window/BulkAction.js
@@ -13,12 +13,27 @@ Ext.define('PVE.window.BulkAction', {
// the action to set, currently there are: `startall`, `migrateall`, `stopall`, `suspendall`
action: undefined,
+ // if set to true, the 'vms' parameter will be sent as an array'
+ // necessary for the cluster-wide api call
+ vmsAsArray: false,
+
submit: function (params) {
let me = this;
+ let url;
+ if (me.nodename) {
+ url = `/nodes/${me.nodename}/${me.action}`;
+ } else {
+ url = `/cluster/bulk-action/guest/${me.action}`;
+ }
+
+ if (me.vmsAsArray) {
+ params.vms = params.vms.split(/[,; ]/);
+ }
+
Proxmox.Utils.API2Request({
params: params,
- url: `/nodes/${me.nodename}/${me.action}`,
+ url,
waitMsgTarget: me,
method: 'POST',
failure: (response) => Ext.Msg.alert('Error', response.htmlStatus),
@@ -38,9 +53,6 @@ Ext.define('PVE.window.BulkAction', {
initComponent: function () {
let me = this;
- if (!me.nodename) {
- throw 'no node name specified';
- }
if (!me.action) {
throw 'no action specified';
}
@@ -52,7 +64,11 @@ Ext.define('PVE.window.BulkAction', {
}
let items = [];
- if (me.action === 'migrateall') {
+ if (me.action === 'migrateall' || me.action === 'migrate') {
+ let disallowedNodes = [];
+ if (me.nodename) {
+ disallowedNodes.push(me.nodename);
+ }
items.push(
{
xtype: 'fieldcontainer',
@@ -62,7 +78,7 @@ Ext.define('PVE.window.BulkAction', {
flex: 1,
xtype: 'pveNodeSelector',
name: 'target',
- disallowedNodes: [me.nodename],
+ disallowedNodes,
fieldLabel: gettext('Target node'),
labelWidth: 200,
allowBlank: false,
@@ -106,13 +122,20 @@ Ext.define('PVE.window.BulkAction', {
],
},
);
+ if (me.action === 'migrate') {
+ items.push({
+ xtype: 'hiddenfield',
+ name: 'online',
+ value: 1,
+ });
+ }
} else if (me.action === 'startall') {
items.push({
xtype: 'hiddenfield',
name: 'force',
value: 1,
});
- } else if (me.action === 'stopall') {
+ } else if (me.action === 'stopall' || me.action === 'shutdown') {
items.push({
xtype: 'fieldcontainer',
layout: 'hbox',
@@ -152,8 +175,17 @@ Ext.define('PVE.window.BulkAction', {
me.down('#lxcwarning').setVisible(showWarning);
};
- let defaultStatus =
- me.action === 'migrateall' ? '' : me.action === 'startall' ? 'stopped' : 'running';
+ let defaulStatusMap = {
+ migrateall: '',
+ migrate: '',
+ startall: 'stopped',
+ start: 'stopped',
+ stopall: 'running',
+ shutdown: 'running',
+ suspendall: 'running',
+ suspend: 'running',
+ };
+ let defaultStatus = defaulStatusMap[me.action] ?? '';
let defaultType = me.action === 'suspendall' ? 'qemu' : '';
let statusMap = [];
--
2.47.3
_______________________________________________
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel
^ permalink raw reply [flat|nested] 10+ messages in thread