From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [212.224.123.68]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits)) (No client certificate requested) by lists.proxmox.com (Postfix) with ESMTPS id 816E577F1C for ; Mon, 25 Oct 2021 15:48:11 +0200 (CEST) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id A5ECA2383F for ; Mon, 25 Oct 2021 15:48:10 +0200 (CEST) Received: from proxmox-new.maurer-it.com (proxmox-new.maurer-it.com [94.136.29.106]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits)) (No client certificate requested) by firstgate.proxmox.com (Proxmox) with ESMTPS id D89132364C for ; Mon, 25 Oct 2021 15:48:00 +0200 (CEST) Received: from proxmox-new.maurer-it.com (localhost.localdomain [127.0.0.1]) by proxmox-new.maurer-it.com (Proxmox) with ESMTP id B10D545F64 for ; Mon, 25 Oct 2021 15:48:00 +0200 (CEST) From: Fabian Ebner To: pve-devel@lists.proxmox.com Date: Mon, 25 Oct 2021 15:47:49 +0200 Message-Id: <20211025134755.169491-7-f.ebner@proxmox.com> X-Mailer: git-send-email 2.30.2 In-Reply-To: <20211025134755.169491-1-f.ebner@proxmox.com> References: <20211025134755.169491-1-f.ebner@proxmox.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-SPAM-LEVEL: Spam detection results: 0 AWL 0.261 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record Subject: [pve-devel] [PATCH storage 6/6] api: disks: delete: add flag for cleaning up storage config X-BeenThere: pve-devel@lists.proxmox.com X-Mailman-Version: 2.1.29 Precedence: list List-Id: Proxmox VE development discussion List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Mon, 25 Oct 2021 13:48:11 -0000 Update node restrictions to reflect that the storage is not available anymore on the particular node. If the storage was only configured for that node, remove it altogether. Signed-off-by: Fabian Ebner --- PVE/API2/Disks/Directory.pm | 20 ++++++++++++++++++++ PVE/API2/Disks/LVM.pm | 20 ++++++++++++++++++++ PVE/API2/Disks/LVMThin.pm | 21 +++++++++++++++++++++ PVE/API2/Disks/ZFS.pm | 20 ++++++++++++++++++++ PVE/API2/Storage/Config.pm | 27 +++++++++++++++++++++++++++ 5 files changed, 108 insertions(+) diff --git a/PVE/API2/Disks/Directory.pm b/PVE/API2/Disks/Directory.pm index c9dcb52..df63ba9 100644 --- a/PVE/API2/Disks/Directory.pm +++ b/PVE/API2/Disks/Directory.pm @@ -314,6 +314,13 @@ __PACKAGE__->register_method ({ properties => { node => get_standard_option('pve-node'), name => get_standard_option('pve-storage-id'), + 'cleanup-config' => { + description => "Marks associated storage(s) as not available on this node anymore ". + "or removes them from the configuration (if configured for this node only).", + type => 'boolean', + optional => 1, + default => 0, + }, 'cleanup-disks' => { description => "Also wipe disk so it can be repurposed afterwards.", type => 'boolean', @@ -330,6 +337,7 @@ __PACKAGE__->register_method ({ my $user = $rpcenv->get_user(); my $name = $param->{name}; + my $node = $param->{node}; my $worker = sub { my $path = "/mnt/pve/$name"; @@ -357,10 +365,22 @@ __PACKAGE__->register_method ({ unlink $mountunitpath or $! == ENOENT or die "cannot remove $mountunitpath - $!\n"; + my $config_err; + if ($param->{'cleanup-config'}) { + my $match = sub { + my ($scfg) = @_; + return $scfg->{type} eq 'dir' && $scfg->{path} eq $path; + }; + eval { PVE::API2::Storage::Config->cleanup_storages_for_node($match, $node); }; + warn $config_err = $@ if $@; + } + if ($to_wipe) { PVE::Diskmanage::wipe_blockdev($to_wipe); PVE::Diskmanage::udevadm_trigger($to_wipe); } + + die "config cleanup failed - $config_err" if $config_err; }); }; diff --git a/PVE/API2/Disks/LVM.pm b/PVE/API2/Disks/LVM.pm index 1af3d43..6e4331a 100644 --- a/PVE/API2/Disks/LVM.pm +++ b/PVE/API2/Disks/LVM.pm @@ -198,6 +198,13 @@ __PACKAGE__->register_method ({ properties => { node => get_standard_option('pve-node'), name => get_standard_option('pve-storage-id'), + 'cleanup-config' => { + description => "Marks associated storage(s) as not available on this node anymore ". + "or removes them from the configuration (if configured for this node only).", + type => 'boolean', + optional => 1, + default => 0, + }, 'cleanup-disks' => { description => "Also wipe disks so they can be repurposed afterwards.", type => 'boolean', @@ -214,6 +221,7 @@ __PACKAGE__->register_method ({ my $user = $rpcenv->get_user(); my $name = $param->{name}; + my $node = $param->{node}; my $worker = sub { PVE::Diskmanage::locked_disk_action(sub { @@ -222,6 +230,16 @@ __PACKAGE__->register_method ({ PVE::Storage::LVMPlugin::lvm_destroy_volume_group($name); + my $config_err; + if ($param->{'cleanup-config'}) { + my $match = sub { + my ($scfg) = @_; + return $scfg->{type} eq 'lvm' && $scfg->{vgname} eq $name; + }; + eval { PVE::API2::Storage::Config->cleanup_storages_for_node($match, $node); }; + warn $config_err = $@ if $@; + } + if ($param->{'cleanup-disks'}) { my $wiped = []; eval { @@ -235,6 +253,8 @@ __PACKAGE__->register_method ({ PVE::Diskmanage::udevadm_trigger($wiped->@*); die "cleanup failed - $err" if $err; } + + die "config cleanup failed - $config_err" if $config_err; }); }; diff --git a/PVE/API2/Disks/LVMThin.pm b/PVE/API2/Disks/LVMThin.pm index ea36ce2..a82ab15 100644 --- a/PVE/API2/Disks/LVMThin.pm +++ b/PVE/API2/Disks/LVMThin.pm @@ -177,6 +177,13 @@ __PACKAGE__->register_method ({ node => get_standard_option('pve-node'), name => get_standard_option('pve-storage-id'), 'volume-group' => get_standard_option('pve-storage-id'), + 'cleanup-config' => { + description => "Marks associated storage(s) as not available on this node anymore ". + "or removes them from the configuration (if configured for this node only).", + type => 'boolean', + optional => 1, + default => 0, + }, 'cleanup-disks' => { description => "Also wipe disks so they can be repurposed afterwards.", type => 'boolean', @@ -194,6 +201,7 @@ __PACKAGE__->register_method ({ my $vg = $param->{'volume-group'}; my $lv = $param->{name}; + my $node = $param->{node}; my $worker = sub { PVE::Diskmanage::locked_disk_action(sub { @@ -204,6 +212,17 @@ __PACKAGE__->register_method ({ run_command(['lvremove', '-y', "${vg}/${lv}"]); + my $config_err; + if ($param->{'cleanup-config'}) { + my $match = sub { + my ($scfg) = @_; + return if $scfg->{type} ne 'lvmthin'; + return $scfg->{vgname} eq $vg && $scfg->{thinpool} eq $lv; + }; + eval { PVE::API2::Storage::Config->cleanup_storages_for_node($match, $node); }; + warn $config_err = $@ if $@; + } + if ($param->{'cleanup-disks'}) { my $vgs = PVE::Storage::LVMPlugin::lvm_vgs(1); @@ -222,6 +241,8 @@ __PACKAGE__->register_method ({ PVE::Diskmanage::udevadm_trigger($wiped->@*); die "cleanup failed - $err" if $err; } + + die "config cleanup failed - $config_err" if $config_err; }); }; diff --git a/PVE/API2/Disks/ZFS.pm b/PVE/API2/Disks/ZFS.pm index 10b73a5..63bc435 100644 --- a/PVE/API2/Disks/ZFS.pm +++ b/PVE/API2/Disks/ZFS.pm @@ -460,6 +460,13 @@ __PACKAGE__->register_method ({ properties => { node => get_standard_option('pve-node'), name => get_standard_option('pve-storage-id'), + 'cleanup-config' => { + description => "Marks associated storage(s) as not available on this node anymore ". + "or removes them from the configuration (if configured for this node only).", + type => 'boolean', + optional => 1, + default => 0, + }, 'cleanup-disks' => { description => "Also wipe disks so they can be repurposed afterwards.", type => 'boolean', @@ -476,6 +483,7 @@ __PACKAGE__->register_method ({ my $user = $rpcenv->get_user(); my $name = $param->{name}; + my $node = $param->{node}; my $worker = sub { PVE::Diskmanage::locked_disk_action(sub { @@ -516,10 +524,22 @@ __PACKAGE__->register_method ({ run_command(['zpool', 'destroy', $name]); + my $config_err; + if ($param->{'cleanup-config'}) { + my $match = sub { + my ($scfg) = @_; + return $scfg->{type} eq 'zfspool' && $scfg->{pool} eq $name; + }; + eval { PVE::API2::Storage::Config->cleanup_storages_for_node($match, $node); }; + warn $config_err = $@ if $@; + } + eval { PVE::Diskmanage::wipe_blockdev($_) for $to_wipe->@*; }; my $err = $@; PVE::Diskmanage::udevadm_trigger($to_wipe->@*); die "cleanup failed - $err" if $err; + + die "config cleanup failed - $config_err" if $config_err; }); }; diff --git a/PVE/API2/Storage/Config.pm b/PVE/API2/Storage/Config.pm index bf38df3..6bd770e 100755 --- a/PVE/API2/Storage/Config.pm +++ b/PVE/API2/Storage/Config.pm @@ -38,6 +38,33 @@ my $api_storage_config = sub { return $scfg; }; +# For storages that $match->($scfg), update node restrictions to not include $node anymore and +# in case no node remains, remove the storage altogether. +sub cleanup_storages_for_node { + my ($self, $match, $node) = @_; + + my $config = PVE::Storage::config(); + my $cluster_nodes = PVE::Cluster::get_nodelist(); + + for my $storeid (keys $config->{ids}->%*) { + my $scfg = PVE::Storage::storage_config($config, $storeid); + next if !$match->($scfg); + + my $nodes = $scfg->{nodes} || { map { $_ => 1 } $cluster_nodes->@* }; + next if !$nodes->{$node}; # not configured on $node, so nothing to do + delete $nodes->{$node}; + + if (scalar(keys $nodes->%*) > 0) { + $self->update({ + nodes => join(',', sort keys $nodes->%*), + storage => $storeid, + }); + } else { + $self->delete({storage => $storeid}); + } + } +} + __PACKAGE__->register_method ({ name => 'index', path => '', -- 2.30.2