From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <f.ebner@proxmox.com>
Received: from firstgate.proxmox.com (firstgate.proxmox.com [212.224.123.68])
 (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)
 key-exchange X25519 server-signature RSA-PSS (2048 bits))
 (No client certificate requested)
 by lists.proxmox.com (Postfix) with ESMTPS id 9C46E77F09
 for <pve-devel@lists.proxmox.com>; Mon, 25 Oct 2021 15:48:10 +0200 (CEST)
Received: from firstgate.proxmox.com (localhost [127.0.0.1])
 by firstgate.proxmox.com (Proxmox) with ESMTP id 53B15237EC
 for <pve-devel@lists.proxmox.com>; Mon, 25 Oct 2021 15:48:09 +0200 (CEST)
Received: from proxmox-new.maurer-it.com (proxmox-new.maurer-it.com
 [94.136.29.106])
 (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)
 key-exchange X25519 server-signature RSA-PSS (2048 bits))
 (No client certificate requested)
 by firstgate.proxmox.com (Proxmox) with ESMTPS id 876F223645
 for <pve-devel@lists.proxmox.com>; Mon, 25 Oct 2021 15:48:00 +0200 (CEST)
Received: from proxmox-new.maurer-it.com (localhost.localdomain [127.0.0.1])
 by proxmox-new.maurer-it.com (Proxmox) with ESMTP id 61BAE45F7D
 for <pve-devel@lists.proxmox.com>; Mon, 25 Oct 2021 15:48:00 +0200 (CEST)
From: Fabian Ebner <f.ebner@proxmox.com>
To: pve-devel@lists.proxmox.com
Date: Mon, 25 Oct 2021 15:47:48 +0200
Message-Id: <20211025134755.169491-6-f.ebner@proxmox.com>
X-Mailer: git-send-email 2.30.2
In-Reply-To: <20211025134755.169491-1-f.ebner@proxmox.com>
References: <20211025134755.169491-1-f.ebner@proxmox.com>
MIME-Version: 1.0
Content-Transfer-Encoding: 8bit
X-SPAM-LEVEL: Spam detection results:  0
 AWL 0.261 Adjusted score from AWL reputation of From: address
 BAYES_00                 -1.9 Bayes spam probability is 0 to 1%
 KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment
 SPF_HELO_NONE           0.001 SPF: HELO does not publish an SPF Record
 SPF_PASS               -0.001 SPF: sender matches SPF record
Subject: [pve-devel] [PATCH storage 5/6] api: disks: delete: add flag for
 wiping disks
X-BeenThere: pve-devel@lists.proxmox.com
X-Mailman-Version: 2.1.29
Precedence: list
List-Id: Proxmox VE development discussion <pve-devel.lists.proxmox.com>
List-Unsubscribe: <https://lists.proxmox.com/cgi-bin/mailman/options/pve-devel>, 
 <mailto:pve-devel-request@lists.proxmox.com?subject=unsubscribe>
List-Archive: <http://lists.proxmox.com/pipermail/pve-devel/>
List-Post: <mailto:pve-devel@lists.proxmox.com>
List-Help: <mailto:pve-devel-request@lists.proxmox.com?subject=help>
List-Subscribe: <https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel>, 
 <mailto:pve-devel-request@lists.proxmox.com?subject=subscribe>
X-List-Received-Date: Mon, 25 Oct 2021 13:48:10 -0000

For ZFS and directory storages, clean up the whole disk when the
layout is as usual to avoid left-overs.

Signed-off-by: Fabian Ebner <f.ebner@proxmox.com>
---
 PVE/API2/Disks/Directory.pm | 26 +++++++++++++++++++++++
 PVE/API2/Disks/LVM.pm       | 23 +++++++++++++++++++++
 PVE/API2/Disks/LVMThin.pm   | 25 ++++++++++++++++++++++
 PVE/API2/Disks/ZFS.pm       | 41 +++++++++++++++++++++++++++++++++++++
 4 files changed, 115 insertions(+)

diff --git a/PVE/API2/Disks/Directory.pm b/PVE/API2/Disks/Directory.pm
index e9b05be..c9dcb52 100644
--- a/PVE/API2/Disks/Directory.pm
+++ b/PVE/API2/Disks/Directory.pm
@@ -314,6 +314,12 @@ __PACKAGE__->register_method ({
 	properties => {
 	    node => get_standard_option('pve-node'),
 	    name => get_standard_option('pve-storage-id'),
+	    'cleanup-disks' => {
+		description => "Also wipe disk so it can be repurposed afterwards.",
+		type => 'boolean',
+		optional => 1,
+		default => 0,
+	    },
 	},
     },
     returns => { type => 'string' },
@@ -331,10 +337,30 @@ __PACKAGE__->register_method ({
 	    my $mountunitpath = "/etc/systemd/system/$mountunitname";
 
 	    PVE::Diskmanage::locked_disk_action(sub {
+		my $to_wipe;
+		if ($param->{'cleanup-disks'}) {
+		    my $unit = $read_ini->($mountunitpath);
+
+		    my $dev = PVE::Diskmanage::verify_blockdev_path($unit->{'Mount'}->{'What'});
+		    $to_wipe = $dev;
+
+		    # clean up whole device if this is the only partition
+		    $dev =~ s|^/dev/||;
+		    my $info = PVE::Diskmanage::get_disks($dev, 1, 1);
+		    die "unable to obtain information for disk '$dev'\n" if !$info->{$dev};
+		    $to_wipe = $info->{$dev}->{parent}
+			if $info->{$dev}->{parent} && scalar(keys $info->%*) == 2;
+		}
+
 		run_command(['systemctl', 'stop', $mountunitname]);
 		run_command(['systemctl', 'disable', $mountunitname]);
 
 		unlink $mountunitpath or $! == ENOENT or die "cannot remove $mountunitpath - $!\n";
+
+		if ($to_wipe) {
+		    PVE::Diskmanage::wipe_blockdev($to_wipe);
+		    PVE::Diskmanage::udevadm_trigger($to_wipe);
+		}
 	    });
 	};
 
diff --git a/PVE/API2/Disks/LVM.pm b/PVE/API2/Disks/LVM.pm
index 1b88af2..1af3d43 100644
--- a/PVE/API2/Disks/LVM.pm
+++ b/PVE/API2/Disks/LVM.pm
@@ -198,6 +198,12 @@ __PACKAGE__->register_method ({
 	properties => {
 	    node => get_standard_option('pve-node'),
 	    name => get_standard_option('pve-storage-id'),
+	    'cleanup-disks' => {
+		description => "Also wipe disks so they can be repurposed afterwards.",
+		type => 'boolean',
+		optional => 1,
+		default => 0,
+	    },
 	},
     },
     returns => { type => 'string' },
@@ -211,7 +217,24 @@ __PACKAGE__->register_method ({
 
 	my $worker = sub {
 	    PVE::Diskmanage::locked_disk_action(sub {
+		my $vgs = PVE::Storage::LVMPlugin::lvm_vgs(1);
+		die "no such volume group '$name'\n" if !$vgs->{$name};
+
 		PVE::Storage::LVMPlugin::lvm_destroy_volume_group($name);
+
+		if ($param->{'cleanup-disks'}) {
+		    my $wiped = [];
+		    eval {
+			for my $pv ($vgs->{$name}->{pvs}->@*) {
+			    my $dev = PVE::Diskmanage::verify_blockdev_path($pv->{name});
+			    PVE::Diskmanage::wipe_blockdev($dev);
+			    push $wiped->@*, $dev;
+			}
+		    };
+		    my $err = $@;
+		    PVE::Diskmanage::udevadm_trigger($wiped->@*);
+		    die "cleanup failed - $err" if $err;
+		}
 	    });
 	};
 
diff --git a/PVE/API2/Disks/LVMThin.pm b/PVE/API2/Disks/LVMThin.pm
index 23f262a..ea36ce2 100644
--- a/PVE/API2/Disks/LVMThin.pm
+++ b/PVE/API2/Disks/LVMThin.pm
@@ -177,6 +177,12 @@ __PACKAGE__->register_method ({
 	    node => get_standard_option('pve-node'),
 	    name => get_standard_option('pve-storage-id'),
 	    'volume-group' => get_standard_option('pve-storage-id'),
+	    'cleanup-disks' => {
+		description => "Also wipe disks so they can be repurposed afterwards.",
+		type => 'boolean',
+		optional => 1,
+		default => 0,
+	    },
 	},
     },
     returns => { type => 'string' },
@@ -197,6 +203,25 @@ __PACKAGE__->register_method ({
 		    if !grep { $_->{lv} eq $lv && $_->{vg} eq $vg } $thinpools->@*;
 
 		run_command(['lvremove', '-y', "${vg}/${lv}"]);
+
+		if ($param->{'cleanup-disks'}) {
+		    my $vgs = PVE::Storage::LVMPlugin::lvm_vgs(1);
+
+		    die "no such volume group '$vg'\n" if !$vgs->{$vg};
+		    die "volume group '$vg' still in use\n" if $vgs->{$vg}->{lvcount} > 0;
+
+		    my $wiped = [];
+		    eval {
+			for my $pv ($vgs->{$vg}->{pvs}->@*) {
+			    my $dev = PVE::Diskmanage::verify_blockdev_path($pv->{name});
+			    PVE::Diskmanage::wipe_blockdev($dev);
+			    push $wiped->@*, $dev;
+			}
+		    };
+		    my $err = $@;
+		    PVE::Diskmanage::udevadm_trigger($wiped->@*);
+		    die "cleanup failed - $err" if $err;
+		}
 	    });
 	};
 
diff --git a/PVE/API2/Disks/ZFS.pm b/PVE/API2/Disks/ZFS.pm
index e892712..10b73a5 100644
--- a/PVE/API2/Disks/ZFS.pm
+++ b/PVE/API2/Disks/ZFS.pm
@@ -460,6 +460,12 @@ __PACKAGE__->register_method ({
 	properties => {
 	    node => get_standard_option('pve-node'),
 	    name => get_standard_option('pve-storage-id'),
+	    'cleanup-disks' => {
+		description => "Also wipe disks so they can be repurposed afterwards.",
+		type => 'boolean',
+		optional => 1,
+		default => 0,
+	    },
 	},
     },
     returns => { type => 'string' },
@@ -473,12 +479,47 @@ __PACKAGE__->register_method ({
 
 	my $worker = sub {
 	    PVE::Diskmanage::locked_disk_action(sub {
+		my $to_wipe = [];
+		if ($param->{'cleanup-disks'}) {
+		    # Using -o name does not only output the name in combination with -v.
+		    run_command(['zpool', 'list', '-vHPL', $name], outfunc => sub {
+			my ($line) = @_;
+
+			my ($name) = PVE::Tools::split_list($line);
+			return if $name !~ m|^/dev/.+|;
+
+			my $dev = PVE::Diskmanage::verify_blockdev_path($name);
+			my $wipe = $dev;
+
+			$dev =~ s|^/dev/||;
+			my $info = PVE::Diskmanage::get_disks($dev, 1, 1);
+			die "unable to obtain information for disk '$dev'\n" if !$info->{$dev};
+
+			# Wipe whole disk if usual ZFS layout with partition 9 as ZFS reserved.
+			my $parent = $info->{$dev}->{parent};
+			if ($parent && scalar(keys $info->%*) == 3) {
+			    $parent =~ s|^/dev/||;
+			    my $info9 = $info->{"${parent}9"};
+
+			    $wipe = $info->{$dev}->{parent} # need leading /dev/
+				if $info9 && $info9->{used} && $info9->{used} =~ m/^ZFS reserved/;
+			}
+
+			push $to_wipe->@*, $wipe;
+		    });
+		}
+
 		if (-e '/lib/systemd/system/zfs-import@.service') {
 		    my $importunit = 'zfs-import@' . PVE::Systemd::escape_unit($name) . '.service';
 		    run_command(['systemctl', 'disable', $importunit]);
 		}
 
 		run_command(['zpool', 'destroy', $name]);
+
+		eval { PVE::Diskmanage::wipe_blockdev($_) for $to_wipe->@*; };
+		my $err = $@;
+		PVE::Diskmanage::udevadm_trigger($to_wipe->@*);
+		die "cleanup failed - $err" if $err;
 	    });
 	};
 
-- 
2.30.2