From: Dominik Csapak <d.csapak@proxmox.com>
To: pve-devel@lists.proxmox.com
Subject: [pve-devel] [PATCH manager v2 11/11] pveceph: add 'fs destroy' command
Date: Mon, 25 Oct 2021 16:01:38 +0200 [thread overview]
Message-ID: <20211025140139.2015470-13-d.csapak@proxmox.com> (raw)
In-Reply-To: <20211025140139.2015470-1-d.csapak@proxmox.com>
with 'remove-storages' and 'remove-pools' as optional parameters
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
---
PVE/CLI/pveceph.pm | 120 ++++++++++++++++++++++++++++++++++++++++++
PVE/Ceph/Tools.pm | 15 ++++++
www/manager6/Utils.js | 1 +
3 files changed, 136 insertions(+)
diff --git a/PVE/CLI/pveceph.pm b/PVE/CLI/pveceph.pm
index b04d1346..995cfcd5 100755
--- a/PVE/CLI/pveceph.pm
+++ b/PVE/CLI/pveceph.pm
@@ -221,6 +221,125 @@ __PACKAGE__->register_method ({
return undef;
}});
+my $get_storages = sub {
+ my ($fs, $is_default) = @_;
+
+ my $cfg = PVE::Storage::config();
+
+ my $storages = $cfg->{ids};
+ my $res = {};
+ foreach my $storeid (keys %$storages) {
+ my $curr = $storages->{$storeid};
+ next if $curr->{type} ne 'cephfs';
+ my $cur_fs = $curr->{'fs-name'};
+ $res->{$storeid} = $storages->{$storeid}
+ if (!defined($cur_fs) && $is_default) || (defined($cur_fs) && $fs eq $cur_fs);
+ }
+
+ return $res;
+};
+
+__PACKAGE__->register_method ({
+ name => 'destroyfs',
+ path => 'destroyfs',
+ method => 'DELETE',
+ description => "Destroy a Ceph filesystem",
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ name => {
+ description => "The ceph filesystem name.",
+ type => 'string',
+ },
+ 'remove-storages' => {
+ description => "Remove all pveceph-managed storages configured for this fs.",
+ type => 'boolean',
+ optional => 1,
+ default => 0,
+ },
+ 'remove-pools' => {
+ description => "Remove data and metadata pools configured for this fs.",
+ type => 'boolean',
+ optional => 1,
+ default => 0,
+ },
+ },
+ },
+ returns => { type => 'string' },
+ code => sub {
+ my ($param) = @_;
+
+ PVE::Ceph::Tools::check_ceph_inited();
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+ my $user = $rpcenv->get_user();
+
+ my $fs_name = $param->{name};
+
+ my $fs;
+ my $fs_list = PVE::Ceph::Tools::ls_fs();
+ for my $entry (@$fs_list) {
+ next if $entry->{name} ne $fs_name;
+ $fs = $entry;
+ last;
+ }
+ die "no such cephfs '$fs_name'\n" if !$fs;
+
+ my $worker = sub {
+ my $rados = PVE::RADOS->new();
+
+ if ($param->{'remove-storages'}) {
+ my $defaultfs;
+ my $fs_dump = $rados->mon_command({ prefix => "fs dump" });
+ for my $fs ($fs_dump->{filesystems}->@*) {
+ next if $fs->{id} != $fs_dump->{default_fscid};
+ $defaultfs = $fs->{mdsmap}->{fs_name};
+ }
+ warn "no default fs found, maybe not all relevant storages are removed\n"
+ if !defined($defaultfs);
+
+ my $storages = $get_storages->($fs_name, $fs_name eq ($defaultfs // ''));
+ for my $storeid (keys %$storages) {
+ my $store = $storages->{$storeid};
+ if (!$store->{disable}) {
+ die "storage '$storeid' is not disabled, make sure to disable ".
+ "and unmount the storage first\n";
+ }
+ }
+
+ my $err;
+ for my $storeid (keys %$storages) {
+ # skip external clusters, not managed by pveceph
+ next if $storages->{$storeid}->{monhost};
+ eval { PVE::API2::Storage::Config->delete({storage => $storeid}) };
+ if ($@) {
+ warn "failed to remove storage '$storeid': $@\n";
+ $err = 1;
+ }
+ }
+ die "failed to remove (some) storages - check log and remove manually!\n"
+ if $err;
+ }
+
+ PVE::Ceph::Tools::destroy_fs($fs_name, $rados);
+
+ if ($param->{'remove-pools'}) {
+ warn "removing metadata pool '$fs->{metadata_pool}'\n";
+ eval { PVE::Ceph::Tools::destroy_pool($fs->{metadata_pool}, $rados) };
+ warn "$@\n" if $@;
+
+ foreach my $pool ($fs->{data_pools}->@*) {
+ warn "removing data pool '$pool'\n";
+ eval { PVE::Ceph::Tools::destroy_pool($pool, $rados) };
+ warn "$@\n" if $@;
+ }
+ }
+
+ };
+ return $rpcenv->fork_worker('cephdestroyfs', $fs_name, $user, $worker);
+ }});
+
our $cmddef = {
init => [ 'PVE::API2::Ceph', 'init', [], { node => $nodename } ],
pool => {
@@ -256,6 +375,7 @@ our $cmddef = {
destroypool => { alias => 'pool destroy' },
fs => {
create => [ 'PVE::API2::Ceph::FS', 'createfs', [], { node => $nodename }],
+ destroy => [ __PACKAGE__, 'destroyfs', ['name'], { node => $nodename }],
},
osd => {
create => [ 'PVE::API2::Ceph::OSD', 'createosd', ['dev'], { node => $nodename }, $upid_exit],
diff --git a/PVE/Ceph/Tools.pm b/PVE/Ceph/Tools.pm
index 2f818276..36d7788a 100644
--- a/PVE/Ceph/Tools.pm
+++ b/PVE/Ceph/Tools.pm
@@ -340,6 +340,21 @@ sub create_fs {
});
}
+sub destroy_fs {
+ my ($fs, $rados) = @_;
+
+ if (!defined($rados)) {
+ $rados = PVE::RADOS->new();
+ }
+
+ $rados->mon_command({
+ prefix => "fs rm",
+ fs_name => $fs,
+ 'yes_i_really_mean_it' => JSON::true,
+ format => 'plain',
+ });
+}
+
sub setup_pve_symlinks {
# fail if we find a real file instead of a link
if (-f $ceph_cfgpath) {
diff --git a/www/manager6/Utils.js b/www/manager6/Utils.js
index 274d4db2..38615c30 100644
--- a/www/manager6/Utils.js
+++ b/www/manager6/Utils.js
@@ -1831,6 +1831,7 @@ Ext.define('PVE.Utils', {
cephdestroymon: ['Ceph Monitor', gettext('Destroy')],
cephdestroyosd: ['Ceph OSD', gettext('Destroy')],
cephdestroypool: ['Ceph Pool', gettext('Destroy')],
+ cephdestroyfs: ['CephFS', gettext('Destroy')],
cephfscreate: ['CephFS', gettext('Create')],
cephsetpool: ['Ceph Pool', gettext('Edit')],
cephsetflags: ['', gettext('Change global Ceph flags')],
--
2.30.2
next prev parent reply other threads:[~2021-10-25 14:01 UTC|newest]
Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-10-25 14:01 [pve-devel] [PATCH storage/manager/docs v2] fix #3616: support multiple ceph filesystems Dominik Csapak
2021-10-25 14:01 ` [pve-devel] [PATCH storage v2 1/1] cephfs: add support for " Dominik Csapak
2021-11-05 12:54 ` [pve-devel] applied: " Thomas Lamprecht
2021-10-25 14:01 ` [pve-devel] [PATCH manager v2 01/11] api: ceph-mds: get mds state when multple ceph filesystems exist Dominik Csapak
2021-10-25 14:01 ` [pve-devel] [PATCH manager v2 02/11] ui: ceph: catch missing version for service list Dominik Csapak
2021-10-25 14:01 ` [pve-devel] [PATCH manager v2 03/11] api: cephfs: refactor {ls, create}_fs Dominik Csapak
2021-10-25 14:01 ` [pve-devel] [PATCH manager v2 04/11] api: cephfs: more checks on fs create Dominik Csapak
2021-10-25 14:01 ` [pve-devel] [PATCH manager v2 05/11] api: cephfs: add fs_name to 'is mds active' check Dominik Csapak
2021-10-25 14:01 ` [pve-devel] [PATCH manager v2 06/11] ui: ceph/ServiceList: refactor controller out Dominik Csapak
2021-10-25 14:01 ` [pve-devel] [PATCH manager v2 07/11] ui: ceph/fs: show fs for active mds Dominik Csapak
2021-10-25 14:01 ` [pve-devel] [PATCH manager v2 08/11] api: cephfs: add 'fs-name' for cephfs storage Dominik Csapak
2021-10-25 14:01 ` [pve-devel] [PATCH manager v2 09/11] ui: storage/cephfs: make ceph fs selectable Dominik Csapak
2021-10-25 14:01 ` [pve-devel] [PATCH manager v2 10/11] ui: ceph/fs: allow creating multiple cephfs Dominik Csapak
2021-10-25 14:01 ` Dominik Csapak [this message]
2021-10-25 14:01 ` [pve-devel] [PATCH docs v2 1/1] pveceph: improve documentation for destroying cephfs Dominik Csapak
2021-10-27 10:15 ` Aaron Lauterer
2021-10-27 10:48 ` [pve-devel] [PATCH storage/manager/docs v2] fix #3616: support multiple ceph filesystems Aaron Lauterer
2021-11-11 17:04 ` [pve-devel] applied-series: " Thomas Lamprecht
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20211025140139.2015470-13-d.csapak@proxmox.com \
--to=d.csapak@proxmox.com \
--cc=pve-devel@lists.proxmox.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox