all lists on lists.proxmox.com
 help / color / mirror / Atom feed
From: Alwin Antreich <a.antreich@proxmox.com>
To: pve-devel@lists.proxmox.com
Subject: [pve-devel] [PATCH manager v3 01/10] api: ceph: subclass pools
Date: Tue, 12 Jan 2021 11:21:44 +0100	[thread overview]
Message-ID: <20210112102153.3215121-2-a.antreich@proxmox.com> (raw)
In-Reply-To: <20210112102153.3215121-1-a.antreich@proxmox.com>

for better handling and since the pool endpoints got more entries.

Signed-off-by: Alwin Antreich <a.antreich@proxmox.com>
---
 PVE/API2/Ceph/Makefile |   1 +
 PVE/API2/Ceph.pm       | 378 +--------------------------------------
 PVE/API2/Ceph/Pools.pm | 395 +++++++++++++++++++++++++++++++++++++++++
 PVE/CLI/pveceph.pm     |   8 +-
 4 files changed, 406 insertions(+), 376 deletions(-)
 create mode 100644 PVE/API2/Ceph/Pools.pm

diff --git a/PVE/API2/Ceph/Makefile b/PVE/API2/Ceph/Makefile
index 5b6493d5..45daafda 100644
--- a/PVE/API2/Ceph/Makefile
+++ b/PVE/API2/Ceph/Makefile
@@ -5,6 +5,7 @@ PERLSOURCE= 			\
 	MON.pm			\
 	OSD.pm			\
 	FS.pm			\
+	Pools.pm		\
 	MDS.pm
 
 all:
diff --git a/PVE/API2/Ceph.pm b/PVE/API2/Ceph.pm
index 0c647489..ad300b12 100644
--- a/PVE/API2/Ceph.pm
+++ b/PVE/API2/Ceph.pm
@@ -23,6 +23,7 @@ use PVE::API2::Ceph::FS;
 use PVE::API2::Ceph::MDS;
 use PVE::API2::Ceph::MGR;
 use PVE::API2::Ceph::MON;
+use PVE::API2::Ceph::Pools;
 use PVE::API2::Storage::Config;
 
 use base qw(PVE::RESTHandler);
@@ -54,6 +55,11 @@ __PACKAGE__->register_method ({
     path => 'fs',
 });
 
+__PACKAGE__->register_method ({
+    subclass => "PVE::API2::Ceph::Pools",
+    path => 'pools',
+});
+
 __PACKAGE__->register_method ({
     name => 'index',
     path => '',
@@ -239,35 +245,6 @@ __PACKAGE__->register_method ({
 	return $res;
     }});
 
-my $add_storage = sub {
-    my ($pool, $storeid) = @_;
-
-    my $storage_params = {
-	type => 'rbd',
-	pool => $pool,
-	storage => $storeid,
-	krbd => 0,
-	content => 'rootdir,images',
-    };
-
-    PVE::API2::Storage::Config->create($storage_params);
-};
-
-my $get_storages = sub {
-    my ($pool) = @_;
-
-    my $cfg = PVE::Storage::config();
-
-    my $storages = $cfg->{ids};
-    my $res = {};
-    foreach my $storeid (keys %$storages) {
-	my $curr = $storages->{$storeid};
-	$res->{$storeid} = $storages->{$storeid}
-	    if $curr->{type} eq 'rbd' && $pool eq $curr->{pool};
-    }
-
-    return $res;
-};
 
 __PACKAGE__->register_method ({
     name => 'init',
@@ -583,224 +560,6 @@ __PACKAGE__->register_method ({
 	return PVE::Ceph::Tools::ceph_cluster_status();
     }});
 
-__PACKAGE__->register_method ({
-    name => 'lspools',
-    path => 'pools',
-    method => 'GET',
-    description => "List all pools.",
-    proxyto => 'node',
-    protected => 1,
-    permissions => {
-	check => ['perm', '/', [ 'Sys.Audit', 'Datastore.Audit' ], any => 1],
-    },
-    parameters => {
-	additionalProperties => 0,
-	properties => {
-	    node => get_standard_option('pve-node'),
-	},
-    },
-    returns => {
-	type => 'array',
-	items => {
-	    type => "object",
-	    properties => {
-		pool => { type => 'integer', title => 'ID' },
-		pool_name => { type => 'string', title => 'Name' },
-		size => { type => 'integer', title => 'Size' },
-		min_size => { type => 'integer', title => 'Min Size' },
-		pg_num => { type => 'integer', title => 'PG Num' },
-		pg_autoscale_mode => { type => 'string', optional => 1, title => 'PG Autoscale Mode' },
-		crush_rule => { type => 'integer', title => 'Crush Rule' },
-		crush_rule_name => { type => 'string', title => 'Crush Rule Name' },
-		percent_used => { type => 'number', title => '%-Used' },
-		bytes_used => { type => 'integer', title => 'Used' },
-	    },
-	},
-	links => [ { rel => 'child', href => "{pool_name}" } ],
-    },
-    code => sub {
-	my ($param) = @_;
-
-	PVE::Ceph::Tools::check_ceph_inited();
-
-	my $rados = PVE::RADOS->new();
-
-	my $stats = {};
-	my $res = $rados->mon_command({ prefix => 'df' });
-
-	foreach my $d (@{$res->{pools}}) {
-	    next if !$d->{stats};
-	    next if !defined($d->{id});
-	    $stats->{$d->{id}} = $d->{stats};
-	}
-
-	$res = $rados->mon_command({ prefix => 'osd dump' });
-	my $rulestmp = $rados->mon_command({ prefix => 'osd crush rule dump'});
-
-	my $rules = {};
-	for my $rule (@$rulestmp) {
-	    $rules->{$rule->{rule_id}} = $rule->{rule_name};
-	}
-
-	my $data = [];
-	my $attr_list = [
-	    'pool',
-	    'pool_name',
-	    'size',
-	    'min_size',
-	    'pg_num',
-	    'crush_rule',
-	    'pg_autoscale_mode',
-	];
-
-	foreach my $e (@{$res->{pools}}) {
-	    my $d = {};
-	    foreach my $attr (@$attr_list) {
-		$d->{$attr} = $e->{$attr} if defined($e->{$attr});
-	    }
-
-	    if (defined($d->{crush_rule}) && defined($rules->{$d->{crush_rule}})) {
-		$d->{crush_rule_name} = $rules->{$d->{crush_rule}};
-	    }
-
-	    if (my $s = $stats->{$d->{pool}}) {
-		$d->{bytes_used} = $s->{bytes_used};
-		$d->{percent_used} = $s->{percent_used};
-	    }
-	    push @$data, $d;
-	}
-
-
-	return $data;
-    }});
-
-
-my $ceph_pool_common_options = sub {
-    my ($nodefault) = shift;
-    my $options = {
-	name => {
-	    description => "The name of the pool. It must be unique.",
-	    type => 'string',
-	},
-	size => {
-	    description => 'Number of replicas per object',
-	    type => 'integer',
-	    default => 3,
-	    optional => 1,
-	    minimum => 1,
-	    maximum => 7,
-	},
-	min_size => {
-	    description => 'Minimum number of replicas per object',
-	    type => 'integer',
-	    default => 2,
-	    optional => 1,
-	    minimum => 1,
-	    maximum => 7,
-	},
-	pg_num => {
-	    description => "Number of placement groups.",
-	    type => 'integer',
-	    default => 128,
-	    optional => 1,
-	    minimum => 8,
-	    maximum => 32768,
-	},
-	crush_rule => {
-	    description => "The rule to use for mapping object placement in the cluster.",
-	    type => 'string',
-	    optional => 1,
-	},
-	application => {
-	    description => "The application of the pool.",
-	    default => 'rbd',
-	    type => 'string',
-	    enum => ['rbd', 'cephfs', 'rgw'],
-	    optional => 1,
-	},
-	pg_autoscale_mode => {
-	    description => "The automatic PG scaling mode of the pool.",
-	    type => 'string',
-	    enum => ['on', 'off', 'warn'],
-	    default => 'warn',
-	    optional => 1,
-	},
-    };
-
-    if ($nodefault) {
-	delete $options->{$_}->{default} for keys %$options;
-    }
-    return $options;
-};
-
-
-__PACKAGE__->register_method ({
-    name => 'createpool',
-    path => 'pools',
-    method => 'POST',
-    description => "Create POOL",
-    proxyto => 'node',
-    protected => 1,
-    permissions => {
-	check => ['perm', '/', [ 'Sys.Modify' ]],
-    },
-    parameters => {
-	additionalProperties => 0,
-	properties => {
-	    node => get_standard_option('pve-node'),
-	    add_storages => {
-		description => "Configure VM and CT storage using the new pool.",
-		type => 'boolean',
-		optional => 1,
-	    },
-	    %{ $ceph_pool_common_options->() },
-	},
-    },
-    returns => { type => 'string' },
-    code => sub {
-	my ($param) = @_;
-
-	PVE::Cluster::check_cfs_quorum();
-	PVE::Ceph::Tools::check_ceph_configured();
-
-	my $pool = extract_param($param, 'name');
-	my $node = extract_param($param, 'node');
-	my $add_storages = extract_param($param, 'add_storages');
-
-	my $rpcenv = PVE::RPCEnvironment::get();
-	my $user = $rpcenv->get_user();
-
-	if ($add_storages) {
-	    $rpcenv->check($user, '/storage', ['Datastore.Allocate']);
-	    die "pool name contains characters which are illegal for storage naming\n"
-		if !PVE::JSONSchema::parse_storage_id($pool);
-	}
-
-	# pool defaults
-	$param->{pg_num} //= 128;
-	$param->{size} //= 3;
-	$param->{min_size} //= 2;
-	$param->{application} //= 'rbd';
-	$param->{pg_autoscale_mode} //= 'warn';
-
-	my $worker = sub {
-
-	    PVE::Ceph::Tools::create_pool($pool, $param);
-
-	    if ($add_storages) {
-		my $err;
-		eval { $add_storage->($pool, "${pool}"); };
-		if ($@) {
-		    warn "failed to add storage: $@";
-		    $err = 1;
-		}
-		die "adding storage for pool '$pool' failed, check log and add manually!\n"
-		    if $err;
-	    }
-	};
-
-	return $rpcenv->fork_worker('cephcreatepool', $pool,  $user, $worker);
-    }});
 
 my $possible_flags = PVE::Ceph::Tools::get_possible_osd_flags();
 my $possible_flags_list = [ sort keys %$possible_flags ];
@@ -910,131 +669,6 @@ __PACKAGE__->register_method ({
 	return undef;
     }});
 
-__PACKAGE__->register_method ({
-    name => 'destroypool',
-    path => 'pools/{name}',
-    method => 'DELETE',
-    description => "Destroy pool",
-    proxyto => 'node',
-    protected => 1,
-    permissions => {
-	check => ['perm', '/', [ 'Sys.Modify' ]],
-    },
-    parameters => {
-	additionalProperties => 0,
-	properties => {
-	    node => get_standard_option('pve-node'),
-	    name => {
-		description => "The name of the pool. It must be unique.",
-		type => 'string',
-	    },
-	    force => {
-		description => "If true, destroys pool even if in use",
-		type => 'boolean',
-		optional => 1,
-		default => 0,
-	    },
-	    remove_storages => {
-		description => "Remove all pveceph-managed storages configured for this pool",
-		type => 'boolean',
-		optional => 1,
-		default => 0,
-	    },
-	},
-    },
-    returns => { type => 'string' },
-    code => sub {
-	my ($param) = @_;
-
-	PVE::Ceph::Tools::check_ceph_inited();
-
-	my $rpcenv = PVE::RPCEnvironment::get();
-	my $user = $rpcenv->get_user();
-	$rpcenv->check($user, '/storage', ['Datastore.Allocate'])
-	    if $param->{remove_storages};
-
-	my $pool = $param->{name};
-
-	my $worker = sub {
-	    my $storages = $get_storages->($pool);
-
-	    # if not forced, destroy ceph pool only when no
-	    # vm disks are on it anymore
-	    if (!$param->{force}) {
-		my $storagecfg = PVE::Storage::config();
-		foreach my $storeid (keys %$storages) {
-		    my $storage = $storages->{$storeid};
-
-		    # check if any vm disks are on the pool
-		    print "checking storage '$storeid' for RBD images..\n";
-		    my $res = PVE::Storage::vdisk_list($storagecfg, $storeid);
-		    die "ceph pool '$pool' still in use by storage '$storeid'\n"
-			if @{$res->{$storeid}} != 0;
-		}
-	    }
-
-	    PVE::Ceph::Tools::destroy_pool($pool);
-
-	    if ($param->{remove_storages}) {
-		my $err;
-		foreach my $storeid (keys %$storages) {
-		    # skip external clusters, not managed by pveceph
-		    next if $storages->{$storeid}->{monhost};
-		    eval { PVE::API2::Storage::Config->delete({storage => $storeid}) };
-		    if ($@) {
-			warn "failed to remove storage '$storeid': $@\n";
-			$err = 1;
-		    }
-		}
-		die "failed to remove (some) storages - check log and remove manually!\n"
-		    if $err;
-	    }
-	};
-	return $rpcenv->fork_worker('cephdestroypool', $pool,  $user, $worker);
-    }});
-
-
-__PACKAGE__->register_method ({
-    name => 'setpool',
-    path => 'pools/{name}',
-    method => 'PUT',
-    description => "Change POOL settings",
-    proxyto => 'node',
-    protected => 1,
-    permissions => {
-	check => ['perm', '/', [ 'Sys.Modify' ]],
-    },
-    parameters => {
-	additionalProperties => 0,
-	properties => {
-	    node => get_standard_option('pve-node'),
-	    %{ $ceph_pool_common_options->('nodefault') },
-	},
-    },
-    returns => { type => 'string' },
-    code => sub {
-	my ($param) = @_;
-
-	PVE::Ceph::Tools::check_ceph_configured();
-
-	my $rpcenv = PVE::RPCEnvironment::get();
-	my $authuser = $rpcenv->get_user();
-
-	my $pool = $param->{name};
-	my $ceph_param = \%$param;
-	for my $item ('name', 'node') {
-	    # not ceph parameters
-	    delete $ceph_param->{$item};
-	}
-
-	my $worker = sub {
-	    PVE::Ceph::Tools::set_pool($pool, $ceph_param);
-	};
-
-	return $rpcenv->fork_worker('cephsetpool', $pool,  $authuser, $worker);
-    }});
-
-
 __PACKAGE__->register_method ({
     name => 'crush',
     path => 'crush',
diff --git a/PVE/API2/Ceph/Pools.pm b/PVE/API2/Ceph/Pools.pm
new file mode 100644
index 00000000..fac21301
--- /dev/null
+++ b/PVE/API2/Ceph/Pools.pm
@@ -0,0 +1,395 @@
+package PVE::API2::Ceph::Pools;
+
+use strict;
+use warnings;
+
+use PVE::Ceph::Tools;
+use PVE::Ceph::Services;
+use PVE::JSONSchema qw(get_standard_option);
+use PVE::RADOS;
+use PVE::RESTHandler;
+use PVE::RPCEnvironment;
+use PVE::Storage;
+use PVE::Tools qw(extract_param);
+
+use PVE::API2::Storage::Config;
+
+use base qw(PVE::RESTHandler);
+
+__PACKAGE__->register_method ({
+    name => 'lspools',
+    path => '',
+    method => 'GET',
+    description => "List all pools.",
+    proxyto => 'node',
+    protected => 1,
+    permissions => {
+	check => ['perm', '/', [ 'Sys.Audit', 'Datastore.Audit' ], any => 1],
+    },
+    parameters => {
+	additionalProperties => 0,
+	properties => {
+	    node => get_standard_option('pve-node'),
+	},
+    },
+    returns => {
+	type => 'array',
+	items => {
+	    type => "object",
+	    properties => {
+		pool => { type => 'integer', title => 'ID' },
+		pool_name => { type => 'string', title => 'Name' },
+		size => { type => 'integer', title => 'Size' },
+		min_size => { type => 'integer', title => 'Min Size' },
+		pg_num => { type => 'integer', title => 'PG Num' },
+		pg_autoscale_mode => { type => 'string', optional => 1, title => 'PG Autoscale Mode' },
+		crush_rule => { type => 'integer', title => 'Crush Rule' },
+		crush_rule_name => { type => 'string', title => 'Crush Rule Name' },
+		percent_used => { type => 'number', title => '%-Used' },
+		bytes_used => { type => 'integer', title => 'Used' },
+	    },
+	},
+	links => [ { rel => 'child', href => "{pool_name}" } ],
+    },
+    code => sub {
+	my ($param) = @_;
+
+	PVE::Ceph::Tools::check_ceph_inited();
+
+	my $rados = PVE::RADOS->new();
+
+	my $stats = {};
+	my $res = $rados->mon_command({ prefix => 'df' });
+
+	foreach my $d (@{$res->{pools}}) {
+	    next if !$d->{stats};
+	    next if !defined($d->{id});
+	    $stats->{$d->{id}} = $d->{stats};
+	}
+
+	$res = $rados->mon_command({ prefix => 'osd dump' });
+	my $rulestmp = $rados->mon_command({ prefix => 'osd crush rule dump'});
+
+	my $rules = {};
+	for my $rule (@$rulestmp) {
+	    $rules->{$rule->{rule_id}} = $rule->{rule_name};
+	}
+
+	my $data = [];
+	my $attr_list = [
+	    'pool',
+	    'pool_name',
+	    'size',
+	    'min_size',
+	    'pg_num',
+	    'crush_rule',
+	    'pg_autoscale_mode',
+	];
+
+	foreach my $e (@{$res->{pools}}) {
+	    my $d = {};
+	    foreach my $attr (@$attr_list) {
+		$d->{$attr} = $e->{$attr} if defined($e->{$attr});
+	    }
+
+	    if (defined($d->{crush_rule}) && defined($rules->{$d->{crush_rule}})) {
+		$d->{crush_rule_name} = $rules->{$d->{crush_rule}};
+	    }
+
+	    if (my $s = $stats->{$d->{pool}}) {
+		$d->{bytes_used} = $s->{bytes_used};
+		$d->{percent_used} = $s->{percent_used};
+	    }
+	    push @$data, $d;
+	}
+
+
+	return $data;
+    }});
+
+
+my $ceph_pool_common_options = sub {
+    my ($nodefault) = shift;
+    my $options = {
+	name => {
+	    description => "The name of the pool. It must be unique.",
+	    type => 'string',
+	},
+	size => {
+	    description => 'Number of replicas per object',
+	    type => 'integer',
+	    default => 3,
+	    optional => 1,
+	    minimum => 1,
+	    maximum => 7,
+	},
+	min_size => {
+	    description => 'Minimum number of replicas per object',
+	    type => 'integer',
+	    default => 2,
+	    optional => 1,
+	    minimum => 1,
+	    maximum => 7,
+	},
+	pg_num => {
+	    description => "Number of placement groups.",
+	    type => 'integer',
+	    default => 128,
+	    optional => 1,
+	    minimum => 8,
+	    maximum => 32768,
+	},
+	crush_rule => {
+	    description => "The rule to use for mapping object placement in the cluster.",
+	    type => 'string',
+	    optional => 1,
+	},
+	application => {
+	    description => "The application of the pool.",
+	    default => 'rbd',
+	    type => 'string',
+	    enum => ['rbd', 'cephfs', 'rgw'],
+	    optional => 1,
+	},
+	pg_autoscale_mode => {
+	    description => "The automatic PG scaling mode of the pool.",
+	    type => 'string',
+	    enum => ['on', 'off', 'warn'],
+	    default => 'warn',
+	    optional => 1,
+	},
+    };
+
+    if ($nodefault) {
+	delete $options->{$_}->{default} for keys %$options;
+    }
+    return $options;
+};
+
+
+my $add_storage = sub {
+    my ($pool, $storeid) = @_;
+
+    my $storage_params = {
+	type => 'rbd',
+	pool => $pool,
+	storage => $storeid,
+	krbd => 0,
+	content => 'rootdir,images',
+    };
+
+    PVE::API2::Storage::Config->create($storage_params);
+};
+
+my $get_storages = sub {
+    my ($pool) = @_;
+
+    my $cfg = PVE::Storage::config();
+
+    my $storages = $cfg->{ids};
+    my $res = {};
+    foreach my $storeid (keys %$storages) {
+	my $curr = $storages->{$storeid};
+	$res->{$storeid} = $storages->{$storeid}
+	    if $curr->{type} eq 'rbd' && $pool eq $curr->{pool};
+    }
+
+    return $res;
+};
+
+
+__PACKAGE__->register_method ({
+    name => 'createpool',
+    path => '',
+    method => 'POST',
+    description => "Create POOL",
+    proxyto => 'node',
+    protected => 1,
+    permissions => {
+	check => ['perm', '/', [ 'Sys.Modify' ]],
+    },
+    parameters => {
+	additionalProperties => 0,
+	properties => {
+	    node => get_standard_option('pve-node'),
+	    add_storages => {
+		description => "Configure VM and CT storage using the new pool.",
+		type => 'boolean',
+		optional => 1,
+	    },
+	    %{ $ceph_pool_common_options->() },
+	},
+    },
+    returns => { type => 'string' },
+    code => sub {
+	my ($param) = @_;
+
+	PVE::Cluster::check_cfs_quorum();
+	PVE::Ceph::Tools::check_ceph_configured();
+
+	my $pool = extract_param($param, 'name');
+	my $node = extract_param($param, 'node');
+	my $add_storages = extract_param($param, 'add_storages');
+
+	my $rpcenv = PVE::RPCEnvironment::get();
+	my $user = $rpcenv->get_user();
+
+	if ($add_storages) {
+	    $rpcenv->check($user, '/storage', ['Datastore.Allocate']);
+	    die "pool name contains characters which are illegal for storage naming\n"
+		if !PVE::JSONSchema::parse_storage_id($pool);
+	}
+
+	# pool defaults
+	$param->{pg_num} //= 128;
+	$param->{size} //= 3;
+	$param->{min_size} //= 2;
+	$param->{application} //= 'rbd';
+	$param->{pg_autoscale_mode} //= 'warn';
+
+	my $worker = sub {
+
+	    PVE::Ceph::Tools::create_pool($pool, $param);
+
+	    if ($add_storages) {
+		my $err;
+		eval { $add_storage->($pool, "${pool}"); };
+		if ($@) {
+		    warn "failed to add storage: $@";
+		    $err = 1;
+		}
+		die "adding storage for pool '$pool' failed, check log and add manually!\n"
+		    if $err;
+	    }
+	};
+
+	return $rpcenv->fork_worker('cephcreatepool', $pool,  $user, $worker);
+    }});
+
+
+__PACKAGE__->register_method ({
+    name => 'destroypool',
+    path => '{name}',
+    method => 'DELETE',
+    description => "Destroy pool",
+    proxyto => 'node',
+    protected => 1,
+    permissions => {
+	check => ['perm', '/', [ 'Sys.Modify' ]],
+    },
+    parameters => {
+	additionalProperties => 0,
+	properties => {
+	    node => get_standard_option('pve-node'),
+	    name => {
+		description => "The name of the pool. It must be unique.",
+		type => 'string',
+	    },
+	    force => {
+		description => "If true, destroys pool even if in use",
+		type => 'boolean',
+		optional => 1,
+		default => 0,
+	    },
+	    remove_storages => {
+		description => "Remove all pveceph-managed storages configured for this pool",
+		type => 'boolean',
+		optional => 1,
+		default => 0,
+	    },
+	},
+    },
+    returns => { type => 'string' },
+    code => sub {
+	my ($param) = @_;
+
+	PVE::Ceph::Tools::check_ceph_inited();
+
+	my $rpcenv = PVE::RPCEnvironment::get();
+	my $user = $rpcenv->get_user();
+	$rpcenv->check($user, '/storage', ['Datastore.Allocate'])
+	    if $param->{remove_storages};
+
+	my $pool = $param->{name};
+
+	my $worker = sub {
+	    my $storages = $get_storages->($pool);
+
+	    # if not forced, destroy ceph pool only when no
+	    # vm disks are on it anymore
+	    if (!$param->{force}) {
+		my $storagecfg = PVE::Storage::config();
+		foreach my $storeid (keys %$storages) {
+		    my $storage = $storages->{$storeid};
+
+		    # check if any vm disks are on the pool
+		    print "checking storage '$storeid' for RBD images..\n";
+		    my $res = PVE::Storage::vdisk_list($storagecfg, $storeid);
+		    die "ceph pool '$pool' still in use by storage '$storeid'\n"
+			if @{$res->{$storeid}} != 0;
+		}
+	    }
+
+	    PVE::Ceph::Tools::destroy_pool($pool);
+
+	    if ($param->{remove_storages}) {
+		my $err;
+		foreach my $storeid (keys %$storages) {
+		    # skip external clusters, not managed by pveceph
+		    next if $storages->{$storeid}->{monhost};
+		    eval { PVE::API2::Storage::Config->delete({storage => $storeid}) };
+		    if ($@) {
+			warn "failed to remove storage '$storeid': $@\n";
+			$err = 1;
+		    }
+		}
+		die "failed to remove (some) storages - check log and remove manually!\n"
+		    if $err;
+	    }
+	};
+	return $rpcenv->fork_worker('cephdestroypool', $pool,  $user, $worker);
+    }});
+
+
+__PACKAGE__->register_method ({
+    name => 'setpool',
+    path => '{name}',
+    method => 'PUT',
+    description => "Change POOL settings",
+    proxyto => 'node',
+    protected => 1,
+    permissions => {
+	check => ['perm', '/', [ 'Sys.Modify' ]],
+    },
+    parameters => {
+	additionalProperties => 0,
+	properties => {
+	    node => get_standard_option('pve-node'),
+	    %{ $ceph_pool_common_options->('nodefault') },
+	},
+    },
+    returns => { type => 'string' },
+    code => sub {
+	my ($param) = @_;
+
+	PVE::Ceph::Tools::check_ceph_configured();
+
+	my $rpcenv = PVE::RPCEnvironment::get();
+	my $authuser = $rpcenv->get_user();
+
+	my $pool = $param->{name};
+	my $ceph_param = \%$param;
+	for my $item ('name', 'node') {
+	    # not ceph parameters
+	    delete $ceph_param->{$item};
+	}
+
+	my $worker = sub {
+	    PVE::Ceph::Tools::set_pool($pool, $ceph_param);
+	};
+
+	return $rpcenv->fork_worker('cephsetpool', $pool,  $authuser, $worker);
+    }});
+
+
+1;
diff --git a/PVE/CLI/pveceph.pm b/PVE/CLI/pveceph.pm
index edcc7ded..4114df7e 100755
--- a/PVE/CLI/pveceph.pm
+++ b/PVE/CLI/pveceph.pm
@@ -199,7 +199,7 @@ __PACKAGE__->register_method ({
 our $cmddef = {
     init => [ 'PVE::API2::Ceph', 'init', [], { node => $nodename } ],
     pool => {
-	ls => [ 'PVE::API2::Ceph', 'lspools', [], { node => $nodename }, sub {
+	ls => [ 'PVE::API2::Ceph::Pools', 'lspools', [], { node => $nodename }, sub {
 	    my ($data, $schema, $options) = @_;
 	    PVE::CLIFormatter::print_api_result($data, $schema,
 		[
@@ -214,9 +214,9 @@ our $cmddef = {
 		],
 		$options);
 	}, $PVE::RESTHandler::standard_output_options],
-	create => [ 'PVE::API2::Ceph', 'createpool', ['name'], { node => $nodename }],
-	destroy => [ 'PVE::API2::Ceph', 'destroypool', ['name'], { node => $nodename } ],
-	set => [ 'PVE::API2::Ceph', 'setpool', ['name'], { node => $nodename } ],
+	create => [ 'PVE::API2::Ceph::Pools', 'createpool', ['name'], { node => $nodename }],
+	destroy => [ 'PVE::API2::Ceph::Pools', 'destroypool', ['name'], { node => $nodename } ],
+	set => [ 'PVE::API2::Ceph::Pools', 'setpool', ['name'], { node => $nodename } ],
     },
     lspools => { alias => 'pool ls' },
     createpool => { alias => 'pool create' },
-- 
2.29.2





  reply	other threads:[~2021-01-12 10:22 UTC|newest]

Thread overview: 15+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-01-12 10:21 [pve-devel] [PATCH manager v3 00/10] ceph: allow pools settings to be changed Alwin Antreich
2021-01-12 10:21 ` Alwin Antreich [this message]
2021-02-06 13:28   ` [pve-devel] applied: [PATCH manager v3 01/10] api: ceph: subclass pools Thomas Lamprecht
2021-01-12 10:21 ` [pve-devel] [PATCH manager v3 02/10] ceph: setpool, use parameter extraction instead Alwin Antreich
2021-02-06 13:29   ` [pve-devel] applied: " Thomas Lamprecht
2021-01-12 10:21 ` [pve-devel] [PATCH manager v3 03/10] ceph: add titles to ceph_pool_common_options Alwin Antreich
2021-02-06 13:29   ` [pve-devel] applied: " Thomas Lamprecht
2021-01-12 10:21 ` [pve-devel] [PATCH manager v3 04/10] ceph: add get api call for single pool Alwin Antreich
2021-01-14 16:49   ` Alwin Antreich
2021-01-12 10:21 ` [pve-devel] [PATCH manager v3 05/10] ceph: add autoscale_status to api calls Alwin Antreich
2021-01-12 10:21 ` [pve-devel] [PATCH manager v3 06/10] ceph: gui: add autoscale & flatten pool view Alwin Antreich
2021-01-12 10:21 ` [pve-devel] [PATCH manager v3 07/10] ceph: set allowed minimal pg_num down to 1 Alwin Antreich
2021-01-12 10:21 ` [pve-devel] [PATCH manager v3 08/10] ceph: gui: rework pool input panel Alwin Antreich
2021-01-12 10:21 ` [pve-devel] [PATCH manager v3 09/10] ceph: gui: add min num of PG Alwin Antreich
2021-01-12 10:21 ` [pve-devel] [PATCH manager v3 10/10] fix: ceph: always set pool size first Alwin Antreich

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210112102153.3215121-2-a.antreich@proxmox.com \
    --to=a.antreich@proxmox.com \
    --cc=pve-devel@lists.proxmox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.
Service provided by Proxmox Server Solutions GmbH | Privacy | Legal