public inbox for pve-devel@lists.proxmox.com
 help / color / mirror / Atom feed
* [pve-devel] [PATCH manager 1/2] api: ceph: deprecate pools in favor or pool
@ 2022-12-09 12:58 Aaron Lauterer
  2022-12-09 12:58 ` [pve-devel] [PATCH manager 2/2] ui: ceph: adapt urls to new ceph/pool endpoint Aaron Lauterer
                   ` (3 more replies)
  0 siblings, 4 replies; 5+ messages in thread
From: Aaron Lauterer @ 2022-12-09 12:58 UTC (permalink / raw)
  To: pve-devel

/nodes/{node}/ceph/pools/{pool} returns the pool details right away on a
GET.  This makes it bad practice to add additional sub API endpoints.

By deprecating it and replacing it with /nodes/{node}/ceph/pool/{pool}
(singular instead of plural) we can turn that into an index GET
response, making it possible to expand it more in the future.

The GET call returning the pool details is moved into
/nodes/{node}/ceph/pool/{pool}/status

The code in the new Pool.pm is basically a copy of Pools.pm to avoid
a close coupling with the old code as it is possible that it will divert
until we can entirely remove the old code.

Signed-off-by: Aaron Lauterer <a.lauterer@proxmox.com>
---
The next step is to add a pool/{name}/namespace API so that we can list
available namespaces and maybe also manage them via the API/UI in the
future.

 PVE/API2/Ceph.pm       |   7 +
 PVE/API2/Ceph/Makefile |   1 +
 PVE/API2/Ceph/Pool.pm  | 801 +++++++++++++++++++++++++++++++++++++++++
 PVE/API2/Ceph/Pools.pm |  11 +-
 4 files changed, 815 insertions(+), 5 deletions(-)
 create mode 100644 PVE/API2/Ceph/Pool.pm

diff --git a/PVE/API2/Ceph.pm b/PVE/API2/Ceph.pm
index f3442408..946aebd3 100644
--- a/PVE/API2/Ceph.pm
+++ b/PVE/API2/Ceph.pm
@@ -23,6 +23,7 @@ use PVE::API2::Ceph::FS;
 use PVE::API2::Ceph::MDS;
 use PVE::API2::Ceph::MGR;
 use PVE::API2::Ceph::MON;
+use PVE::API2::Ceph::Pool;
 use PVE::API2::Ceph::Pools;
 use PVE::API2::Storage::Config;
 
@@ -55,6 +56,12 @@ __PACKAGE__->register_method ({
     path => 'fs',
 });
 
+__PACKAGE__->register_method ({
+    subclass => "PVE::API2::Ceph::Pool",
+    path => 'pool',
+});
+
+# TODO: deprecrated, remove with PVE 8
 __PACKAGE__->register_method ({
     subclass => "PVE::API2::Ceph::Pools",
     path => 'pools',
diff --git a/PVE/API2/Ceph/Makefile b/PVE/API2/Ceph/Makefile
index 45daafda..5d6f642b 100644
--- a/PVE/API2/Ceph/Makefile
+++ b/PVE/API2/Ceph/Makefile
@@ -5,6 +5,7 @@ PERLSOURCE= 			\
 	MON.pm			\
 	OSD.pm			\
 	FS.pm			\
+	Pool.pm			\
 	Pools.pm		\
 	MDS.pm
 
diff --git a/PVE/API2/Ceph/Pool.pm b/PVE/API2/Ceph/Pool.pm
new file mode 100644
index 00000000..cd46311b
--- /dev/null
+++ b/PVE/API2/Ceph/Pool.pm
@@ -0,0 +1,801 @@
+package PVE::API2::Ceph::Pool;
+
+use strict;
+use warnings;
+
+use PVE::Ceph::Tools;
+use PVE::Ceph::Services;
+use PVE::JSONSchema qw(get_standard_option parse_property_string);
+use PVE::RADOS;
+use PVE::RESTHandler;
+use PVE::RPCEnvironment;
+use PVE::Storage;
+use PVE::Tools qw(extract_param);
+
+use PVE::API2::Storage::Config;
+
+use base qw(PVE::RESTHandler);
+
+my $get_autoscale_status = sub {
+    my ($rados) = shift;
+
+    $rados = PVE::RADOS->new() if !defined($rados);
+
+    my $autoscale = $rados->mon_command({
+	    prefix => 'osd pool autoscale-status'});
+
+    my $data;
+    foreach my $p (@$autoscale) {
+	$data->{$p->{pool_name}} = $p;
+    }
+
+    return $data;
+};
+
+
+__PACKAGE__->register_method ({
+    name => 'lspools',
+    path => '',
+    method => 'GET',
+    description => "List all pools.",
+    proxyto => 'node',
+    protected => 1,
+    permissions => {
+	check => ['perm', '/', [ 'Sys.Audit', 'Datastore.Audit' ], any => 1],
+    },
+    parameters => {
+	additionalProperties => 0,
+	properties => {
+	    node => get_standard_option('pve-node'),
+	},
+    },
+    returns => {
+	type => 'array',
+	items => {
+	    type => "object",
+	    properties => {
+		pool => {
+		    type => 'integer',
+		    title => 'ID',
+		},
+		pool_name => {
+		    type => 'string',
+		    title => 'Name',
+		},
+		size => {
+		    type => 'integer',
+		    title => 'Size',
+		},
+		type => {
+		    type => 'string',
+		    title => 'Type',
+		    enum => ['replicated', 'erasure', 'unknown'],
+		},
+		min_size => {
+		    type => 'integer',
+		    title => 'Min Size',
+		},
+		pg_num => {
+		    type => 'integer',
+		    title => 'PG Num',
+		},
+		pg_num_min => {
+		    type => 'integer',
+		    title => 'min. PG Num',
+		    optional => 1,
+		},
+		pg_num_final => {
+		    type => 'integer',
+		    title => 'Optimal PG Num',
+		    optional => 1,
+		},
+		pg_autoscale_mode => {
+		    type => 'string',
+		    title => 'PG Autoscale Mode',
+		    optional => 1,
+		},
+		crush_rule => {
+		    type => 'integer',
+		    title => 'Crush Rule',
+		},
+		crush_rule_name => {
+		    type => 'string',
+		    title => 'Crush Rule Name',
+		},
+		percent_used => {
+		    type => 'number',
+		    title => '%-Used',
+		},
+		bytes_used => {
+		    type => 'integer',
+		    title => 'Used',
+		},
+		target_size => {
+		    type => 'integer',
+		    title => 'PG Autoscale Target Size',
+		    optional => 1,
+		},
+		target_size_ratio => {
+		    type => 'number',
+		    title => 'PG Autoscale Target Ratio',
+		    optional => 1,
+		},
+		autoscale_status => {
+		    type => 'object',
+		    title => 'Autoscale Status',
+		    optional => 1,
+		},
+		application_metadata => {
+		    type => 'object',
+		    title => 'Associated Applications',
+		    optional => 1,
+		},
+	    },
+	},
+	links => [ { rel => 'child', href => "{pool_name}" } ],
+    },
+    code => sub {
+	my ($param) = @_;
+
+	PVE::Ceph::Tools::check_ceph_inited();
+
+	my $rados = PVE::RADOS->new();
+
+	my $stats = {};
+	my $res = $rados->mon_command({ prefix => 'df' });
+
+	foreach my $d (@{$res->{pools}}) {
+	    next if !$d->{stats};
+	    next if !defined($d->{id});
+	    $stats->{$d->{id}} = $d->{stats};
+	}
+
+	$res = $rados->mon_command({ prefix => 'osd dump' });
+	my $rulestmp = $rados->mon_command({ prefix => 'osd crush rule dump'});
+
+	my $rules = {};
+	for my $rule (@$rulestmp) {
+	    $rules->{$rule->{rule_id}} = $rule->{rule_name};
+	}
+
+	my $data = [];
+	my $attr_list = [
+	    'pool',
+	    'pool_name',
+	    'size',
+	    'min_size',
+	    'pg_num',
+	    'crush_rule',
+	    'pg_autoscale_mode',
+	    'application_metadata',
+	];
+
+	# pg_autoscaler module is not enabled in Nautilus
+	my $autoscale = eval { $get_autoscale_status->($rados) };
+
+	foreach my $e (@{$res->{pools}}) {
+	    my $d = {};
+	    foreach my $attr (@$attr_list) {
+		$d->{$attr} = $e->{$attr} if defined($e->{$attr});
+	    }
+
+	    if ($autoscale) {
+		$d->{autoscale_status} = $autoscale->{$d->{pool_name}};
+		$d->{pg_num_final} = $d->{autoscale_status}->{pg_num_final};
+		# some info is nested under options instead
+		$d->{pg_num_min} = $e->{options}->{pg_num_min};
+		$d->{target_size} = $e->{options}->{target_size_bytes};
+		$d->{target_size_ratio} = $e->{options}->{target_size_ratio};
+	    }
+
+	    if (defined($d->{crush_rule}) && defined($rules->{$d->{crush_rule}})) {
+		$d->{crush_rule_name} = $rules->{$d->{crush_rule}};
+	    }
+
+	    if (my $s = $stats->{$d->{pool}}) {
+		$d->{bytes_used} = $s->{bytes_used};
+		$d->{percent_used} = $s->{percent_used};
+	    }
+
+	    # Cephs numerical pool types are barely documented. Found the following in the Ceph
+	    # codebase: https://github.com/ceph/ceph/blob/ff144995a849407c258bcb763daa3e03cfce5059/src/osd/osd_types.h#L1221-L1233
+	    if ($e->{type} == 1) {
+		$d->{type} = 'replicated';
+	    } elsif ($e->{type} == 3) {
+		$d->{type} = 'erasure';
+	    } else {
+		# we should never get here, but better be safe
+		$d->{type} = 'unknown';
+	    }
+	    push @$data, $d;
+	}
+
+
+	return $data;
+    }});
+
+
+my $ceph_pool_common_options = sub {
+    my ($nodefault) = shift;
+    my $options = {
+	name => {
+	    title => 'Name',
+	    description => "The name of the pool. It must be unique.",
+	    type => 'string',
+	},
+	size => {
+	    title => 'Size',
+	    description => 'Number of replicas per object',
+	    type => 'integer',
+	    default => 3,
+	    optional => 1,
+	    minimum => 1,
+	    maximum => 7,
+	},
+	min_size => {
+	    title => 'Min Size',
+	    description => 'Minimum number of replicas per object',
+	    type => 'integer',
+	    default => 2,
+	    optional => 1,
+	    minimum => 1,
+	    maximum => 7,
+	},
+	pg_num => {
+	    title => 'PG Num',
+	    description => "Number of placement groups.",
+	    type => 'integer',
+	    default => 128,
+	    optional => 1,
+	    minimum => 1,
+	    maximum => 32768,
+	},
+	pg_num_min => {
+	    title => 'min. PG Num',
+	    description => "Minimal number of placement groups.",
+	    type => 'integer',
+	    optional => 1,
+	    maximum => 32768,
+	},
+	crush_rule => {
+	    title => 'Crush Rule Name',
+	    description => "The rule to use for mapping object placement in the cluster.",
+	    type => 'string',
+	    optional => 1,
+	},
+	application => {
+	    title => 'Application',
+	    description => "The application of the pool.",
+	    default => 'rbd',
+	    type => 'string',
+	    enum => ['rbd', 'cephfs', 'rgw'],
+	    optional => 1,
+	},
+	pg_autoscale_mode => {
+	    title => 'PG Autoscale Mode',
+	    description => "The automatic PG scaling mode of the pool.",
+	    type => 'string',
+	    enum => ['on', 'off', 'warn'],
+	    default => 'warn',
+	    optional => 1,
+	},
+	target_size => {
+	    description => "The estimated target size of the pool for the PG autoscaler.",
+	    title => 'PG Autoscale Target Size',
+	    type => 'string',
+	    pattern => '^(\d+(\.\d+)?)([KMGT])?$',
+	    optional => 1,
+	},
+	target_size_ratio => {
+	    description => "The estimated target ratio of the pool for the PG autoscaler.",
+	    title => 'PG Autoscale Target Ratio',
+	    type => 'number',
+	    optional => 1,
+	},
+    };
+
+    if ($nodefault) {
+	delete $options->{$_}->{default} for keys %$options;
+    }
+    return $options;
+};
+
+
+my $add_storage = sub {
+    my ($pool, $storeid, $ec_data_pool) = @_;
+
+    my $storage_params = {
+	type => 'rbd',
+	pool => $pool,
+	storage => $storeid,
+	krbd => 0,
+	content => 'rootdir,images',
+    };
+
+    $storage_params->{'data-pool'} = $ec_data_pool if $ec_data_pool;
+
+    PVE::API2::Storage::Config->create($storage_params);
+};
+
+my $get_storages = sub {
+    my ($pool) = @_;
+
+    my $cfg = PVE::Storage::config();
+
+    my $storages = $cfg->{ids};
+    my $res = {};
+    foreach my $storeid (keys %$storages) {
+	my $curr = $storages->{$storeid};
+	next if $curr->{type} ne 'rbd';
+	$curr->{pool} = 'rbd' if !defined $curr->{pool}; # set default
+	if (
+	    $pool eq $curr->{pool} ||
+	    (defined $curr->{'data-pool'} && $pool eq $curr->{'data-pool'})
+	) {
+	    $res->{$storeid} = $storages->{$storeid};
+	}
+    }
+
+    return $res;
+};
+
+my $ec_format = {
+    k => {
+	type => 'integer',
+	description => "Number of data chunks. Will create an erasure coded pool plus a"
+	    ." replicated pool for metadata.",
+	minimum => 2,
+    },
+    m => {
+	type => 'integer',
+	description => "Number of coding chunks. Will create an erasure coded pool plus a"
+	    ." replicated pool for metadata.",
+	minimum => 1,
+    },
+    'failure-domain' => {
+	type => 'string',
+	description => "CRUSH failure domain. Default is 'host'. Will create an erasure"
+	    ." coded pool plus a replicated pool for metadata.",
+	format_description => 'domain',
+	optional => 1,
+	default => 'host',
+    },
+    'device-class' => {
+	type => 'string',
+	description => "CRUSH device class. Will create an erasure coded pool plus a"
+	    ." replicated pool for metadata.",
+	format_description => 'class',
+	optional => 1,
+    },
+    profile => {
+	description => "Override the erasure code (EC) profile to use. Will create an"
+	    ." erasure coded pool plus a replicated pool for metadata.",
+	type => 'string',
+	format_description => 'profile',
+	optional => 1,
+    },
+};
+
+sub ec_parse_and_check {
+    my ($property, $rados) = @_;
+    return if !$property;
+
+    my $ec = parse_property_string($ec_format, $property);
+
+    die "Erasure code profile '$ec->{profile}' does not exist.\n"
+	if $ec->{profile} && !PVE::Ceph::Tools::ecprofile_exists($ec->{profile}, $rados);
+
+    return $ec;
+}
+
+
+__PACKAGE__->register_method ({
+    name => 'createpool',
+    path => '',
+    method => 'POST',
+    description => "Create Ceph pool",
+    proxyto => 'node',
+    protected => 1,
+    permissions => {
+	check => ['perm', '/', [ 'Sys.Modify' ]],
+    },
+    parameters => {
+	additionalProperties => 0,
+	properties => {
+	    node => get_standard_option('pve-node'),
+	    add_storages => {
+		description => "Configure VM and CT storage using the new pool.",
+		type => 'boolean',
+		optional => 1,
+		default => "0; for erasure coded pools: 1",
+	    },
+	    'erasure-coding' => {
+		description => "Create an erasure coded pool for RBD with an accompaning"
+		    ." replicated pool for metadata storage. With EC, the common ceph options 'size',"
+		    ." 'min_size' and 'crush_rule' parameters will be applied to the metadata pool.",
+		type => 'string',
+		format => $ec_format,
+		optional => 1,
+	    },
+	    %{ $ceph_pool_common_options->() },
+	},
+    },
+    returns => { type => 'string' },
+    code => sub {
+	my ($param) = @_;
+
+	PVE::Cluster::check_cfs_quorum();
+	PVE::Ceph::Tools::check_ceph_configured();
+
+	my $pool = my $name = extract_param($param, 'name');
+	my $node = extract_param($param, 'node');
+	my $add_storages = extract_param($param, 'add_storages');
+
+	my $rpcenv = PVE::RPCEnvironment::get();
+	my $user = $rpcenv->get_user();
+	# Ceph uses target_size_bytes
+	if (defined($param->{'target_size'})) {
+	    my $target_sizestr = extract_param($param, 'target_size');
+	    $param->{target_size_bytes} = PVE::JSONSchema::parse_size($target_sizestr);
+	}
+
+	my $rados = PVE::RADOS->new();
+	my $ec = ec_parse_and_check(extract_param($param, 'erasure-coding'), $rados);
+	$add_storages = 1 if $ec && !defined($add_storages);
+
+	if ($add_storages) {
+	    $rpcenv->check($user, '/storage', ['Datastore.Allocate']);
+	    die "pool name contains characters which are illegal for storage naming\n"
+		if !PVE::JSONSchema::parse_storage_id($pool);
+	}
+
+	# pool defaults
+	$param->{pg_num} //= 128;
+	$param->{size} //= 3;
+	$param->{min_size} //= 2;
+	$param->{application} //= 'rbd';
+	$param->{pg_autoscale_mode} //= 'warn';
+
+	my $worker = sub {
+	    # reopen with longer timeout
+	    $rados = PVE::RADOS->new(timeout => PVE::Ceph::Tools::get_config('long_rados_timeout'));
+
+	    if ($ec) {
+		if (!$ec->{profile}) {
+		    $ec->{profile} = PVE::Ceph::Tools::get_ecprofile_name($pool, $rados);
+		    eval {
+			PVE::Ceph::Tools::create_ecprofile(
+			    $ec->@{'profile', 'k', 'm', 'failure-domain', 'device-class'},
+			    $rados,
+			);
+		    };
+		    die "could not create erasure code profile '$ec->{profile}': $@\n" if $@;
+		    print "created new erasure code profile '$ec->{profile}'\n";
+		}
+
+		my $ec_data_param = {};
+		# copy all params, should be a flat hash
+		$ec_data_param = { map { $_ => $param->{$_} } keys %$param };
+
+		$ec_data_param->{pool_type} = 'erasure';
+		$ec_data_param->{allow_ec_overwrites} = 'true';
+		$ec_data_param->{erasure_code_profile} = $ec->{profile};
+		delete $ec_data_param->{size};
+		delete $ec_data_param->{min_size};
+		delete $ec_data_param->{crush_rule};
+
+		# metadata pool should be ok with 32 PGs
+		$param->{pg_num} = 32;
+
+		$pool = "${name}-metadata";
+		$ec->{data_pool} = "${name}-data";
+
+		PVE::Ceph::Tools::create_pool($ec->{data_pool}, $ec_data_param, $rados);
+	    }
+
+	    PVE::Ceph::Tools::create_pool($pool, $param, $rados);
+
+	    if ($add_storages) {
+		eval { $add_storage->($pool, "${name}", $ec->{data_pool}) };
+		die "adding PVE storage for ceph pool '$name' failed: $@\n" if $@;
+	    }
+	};
+
+	return $rpcenv->fork_worker('cephcreatepool', $pool,  $user, $worker);
+    }});
+
+
+__PACKAGE__->register_method ({
+    name => 'destroypool',
+    path => '{name}',
+    method => 'DELETE',
+    description => "Destroy pool",
+    proxyto => 'node',
+    protected => 1,
+    permissions => {
+	check => ['perm', '/', [ 'Sys.Modify' ]],
+    },
+    parameters => {
+	additionalProperties => 0,
+	properties => {
+	    node => get_standard_option('pve-node'),
+	    name => {
+		description => "The name of the pool. It must be unique.",
+		type => 'string',
+	    },
+	    force => {
+		description => "If true, destroys pool even if in use",
+		type => 'boolean',
+		optional => 1,
+		default => 0,
+	    },
+	    remove_storages => {
+		description => "Remove all pveceph-managed storages configured for this pool",
+		type => 'boolean',
+		optional => 1,
+		default => 0,
+	    },
+	    remove_ecprofile => {
+		description => "Remove the erasure code profile. Defaults to true, if applicable.",
+		type => 'boolean',
+		optional => 1,
+		default => 1,
+	    },
+	},
+    },
+    returns => { type => 'string' },
+    code => sub {
+	my ($param) = @_;
+
+	PVE::Ceph::Tools::check_ceph_inited();
+
+	my $rpcenv = PVE::RPCEnvironment::get();
+	my $user = $rpcenv->get_user();
+	$rpcenv->check($user, '/storage', ['Datastore.Allocate'])
+	    if $param->{remove_storages};
+
+	my $pool = $param->{name};
+
+	my $worker = sub {
+	    my $storages = $get_storages->($pool);
+
+	    # if not forced, destroy ceph pool only when no
+	    # vm disks are on it anymore
+	    if (!$param->{force}) {
+		my $storagecfg = PVE::Storage::config();
+		foreach my $storeid (keys %$storages) {
+		    my $storage = $storages->{$storeid};
+
+		    # check if any vm disks are on the pool
+		    print "checking storage '$storeid' for RBD images..\n";
+		    my $res = PVE::Storage::vdisk_list($storagecfg, $storeid);
+		    die "ceph pool '$pool' still in use by storage '$storeid'\n"
+			if @{$res->{$storeid}} != 0;
+		}
+	    }
+	    my $rados = PVE::RADOS->new();
+
+	    my $pool_properties = PVE::Ceph::Tools::get_pool_properties($pool, $rados);
+
+	    PVE::Ceph::Tools::destroy_pool($pool, $rados);
+
+	    if (my $ecprofile = $pool_properties->{erasure_code_profile}) {
+		print "found erasure coded profile '$ecprofile', destroying its CRUSH rule\n";
+		my $crush_rule = $pool_properties->{crush_rule};
+		eval { PVE::Ceph::Tools::destroy_crush_rule($crush_rule, $rados); };
+		warn "removing crush rule '${crush_rule}' failed: $@\n" if $@;
+
+		if ($param->{remove_ecprofile} // 1) {
+		    print "destroying erasure coded profile '$ecprofile'\n";
+		    eval { PVE::Ceph::Tools::destroy_ecprofile($ecprofile, $rados) };
+		    warn "removing EC profile '${ecprofile}' failed: $@\n" if $@;
+		}
+	    }
+
+	    if ($param->{remove_storages}) {
+		my $err;
+		foreach my $storeid (keys %$storages) {
+		    # skip external clusters, not managed by pveceph
+		    next if $storages->{$storeid}->{monhost};
+		    eval { PVE::API2::Storage::Config->delete({storage => $storeid}) };
+		    if ($@) {
+			warn "failed to remove storage '$storeid': $@\n";
+			$err = 1;
+		    }
+		}
+		die "failed to remove (some) storages - check log and remove manually!\n"
+		    if $err;
+	    }
+	};
+	return $rpcenv->fork_worker('cephdestroypool', $pool,  $user, $worker);
+    }});
+
+
+__PACKAGE__->register_method ({
+    name => 'setpool',
+    path => '{name}',
+    method => 'PUT',
+    description => "Change POOL settings",
+    proxyto => 'node',
+    protected => 1,
+    permissions => {
+	check => ['perm', '/', [ 'Sys.Modify' ]],
+    },
+    parameters => {
+	additionalProperties => 0,
+	properties => {
+	    node => get_standard_option('pve-node'),
+	    %{ $ceph_pool_common_options->('nodefault') },
+	},
+    },
+    returns => { type => 'string' },
+    code => sub {
+	my ($param) = @_;
+
+	PVE::Ceph::Tools::check_ceph_configured();
+
+	my $rpcenv = PVE::RPCEnvironment::get();
+	my $authuser = $rpcenv->get_user();
+
+	my $pool = extract_param($param, 'name');
+	my $node = extract_param($param, 'node');
+
+	# Ceph uses target_size_bytes
+	if (defined($param->{'target_size'})) {
+	    my $target_sizestr = extract_param($param, 'target_size');
+	    $param->{target_size_bytes} = PVE::JSONSchema::parse_size($target_sizestr);
+	}
+
+	my $worker = sub {
+	    PVE::Ceph::Tools::set_pool($pool, $param);
+	};
+
+	return $rpcenv->fork_worker('cephsetpool', $pool,  $authuser, $worker);
+    }});
+
+__PACKAGE__->register_method ({
+    name => 'poolindex',
+    path => '{name}',
+    method => 'GET',
+    permissions => {
+	check => ['perm', '/', [ 'Sys.Audit', 'Datastore.Audit' ], any => 1],
+    },
+    description => "Pool index.",
+    parameters => {
+	additionalProperties => 0,
+	properties => {
+	    node => get_standard_option('pve-node'),
+	    name => {
+		description => 'The name of the pool.',
+		type => 'string',
+	    },
+	},
+    },
+    returns => {
+	type => 'array',
+	items => {
+	    type => "object",
+	    properties => {},
+	},
+	links => [ { rel => 'child', href => "{name}" } ],
+    },
+    code => sub {
+	my ($param) = @_;
+
+	my $result = [
+	    { name => 'status' },
+	];
+
+	return $result;
+    }});
+
+
+__PACKAGE__->register_method ({
+    name => 'getpool',
+    path => '{name}/status',
+    method => 'GET',
+    description => "List pool settings.",
+    proxyto => 'node',
+    protected => 1,
+    permissions => {
+	check => ['perm', '/', [ 'Sys.Audit', 'Datastore.Audit' ], any => 1],
+    },
+    parameters => {
+	additionalProperties => 0,
+	properties => {
+	    node => get_standard_option('pve-node'),
+	    name => {
+		description => "The name of the pool. It must be unique.",
+		type => 'string',
+	    },
+	    verbose => {
+		type => 'boolean',
+		default => 0,
+		optional => 1,
+		description => "If enabled, will display additional data".
+		    "(eg. statistics).",
+	    },
+	},
+    },
+    returns => {
+	type => "object",
+	properties => {
+	    id                     => { type => 'integer', title => 'ID' },
+	    pgp_num                => { type => 'integer', title => 'PGP num' },
+	    noscrub                => { type => 'boolean', title => 'noscrub' },
+	    'nodeep-scrub'         => { type => 'boolean', title => 'nodeep-scrub' },
+	    nodelete               => { type => 'boolean', title => 'nodelete' },
+	    nopgchange             => { type => 'boolean', title => 'nopgchange' },
+	    nosizechange           => { type => 'boolean', title => 'nosizechange' },
+	    write_fadvise_dontneed => { type => 'boolean', title => 'write_fadvise_dontneed' },
+	    hashpspool             => { type => 'boolean', title => 'hashpspool' },
+	    use_gmt_hitset         => { type => 'boolean', title => 'use_gmt_hitset' },
+	    fast_read              => { type => 'boolean', title => 'Fast Read' },
+	    application_list       => { type => 'array', title => 'Application', optional => 1 },
+	    statistics             => { type => 'object', title => 'Statistics', optional => 1 },
+	    autoscale_status       => { type => 'object',  title => 'Autoscale Status', optional => 1 },
+	    %{ $ceph_pool_common_options->() },
+	},
+    },
+    code => sub {
+	my ($param) = @_;
+
+	PVE::Ceph::Tools::check_ceph_inited();
+
+	my $verbose = $param->{verbose};
+	my $pool = $param->{name};
+
+	my $rados = PVE::RADOS->new();
+	my $res = $rados->mon_command({
+		prefix => 'osd pool get',
+		pool   => "$pool",
+		var    => 'all',
+	    });
+
+	my $data = {
+	    id                     => $res->{pool_id},
+	    name                   => $pool,
+	    size                   => $res->{size},
+	    min_size               => $res->{min_size},
+	    pg_num                 => $res->{pg_num},
+	    pg_num_min             => $res->{pg_num_min},
+	    pgp_num                => $res->{pgp_num},
+	    crush_rule             => $res->{crush_rule},
+	    pg_autoscale_mode      => $res->{pg_autoscale_mode},
+	    noscrub                => "$res->{noscrub}",
+	    'nodeep-scrub'         => "$res->{'nodeep-scrub'}",
+	    nodelete               => "$res->{nodelete}",
+	    nopgchange             => "$res->{nopgchange}",
+	    nosizechange           => "$res->{nosizechange}",
+	    write_fadvise_dontneed => "$res->{write_fadvise_dontneed}",
+	    hashpspool             => "$res->{hashpspool}",
+	    use_gmt_hitset         => "$res->{use_gmt_hitset}",
+	    fast_read              => "$res->{fast_read}",
+	    target_size            => $res->{target_size_bytes},
+	    target_size_ratio      => $res->{target_size_ratio},
+	};
+
+	if ($verbose) {
+	    my $stats;
+	    my $res = $rados->mon_command({ prefix => 'df' });
+
+	    # pg_autoscaler module is not enabled in Nautilus
+	    # avoid partial read further down, use new rados instance
+	    my $autoscale_status = eval { $get_autoscale_status->() };
+	    $data->{autoscale_status} = $autoscale_status->{$pool};
+
+	    foreach my $d (@{$res->{pools}}) {
+		next if !$d->{stats};
+		next if !defined($d->{name}) && !$d->{name} ne "$pool";
+		$data->{statistics} = $d->{stats};
+	    }
+
+	    my $apps = $rados->mon_command({ prefix => "osd pool application get", pool => "$pool", });
+	    $data->{application_list} = [ keys %$apps ];
+	}
+
+	return $data;
+    }});
+
+
+1;
diff --git a/PVE/API2/Ceph/Pools.pm b/PVE/API2/Ceph/Pools.pm
index fce56787..ffae73b9 100644
--- a/PVE/API2/Ceph/Pools.pm
+++ b/PVE/API2/Ceph/Pools.pm
@@ -1,4 +1,5 @@
 package PVE::API2::Ceph::Pools;
+# TODO: Deprecated, drop with PVE 8.0! PVE::API2::Ceph::Pool is the replacement
 
 use strict;
 use warnings;
@@ -37,7 +38,7 @@ __PACKAGE__->register_method ({
     name => 'lspools',
     path => '',
     method => 'GET',
-    description => "List all pools.",
+    description => "List all pools. Deprecated, please use `/nodes/{node}/ceph/pool`.",
     proxyto => 'node',
     protected => 1,
     permissions => {
@@ -393,7 +394,7 @@ __PACKAGE__->register_method ({
     name => 'createpool',
     path => '',
     method => 'POST',
-    description => "Create Ceph pool",
+    description => "Create Ceph pool. Deprecated, please use `/nodes/{node}/ceph/pool`.",
     proxyto => 'node',
     protected => 1,
     permissions => {
@@ -509,7 +510,7 @@ __PACKAGE__->register_method ({
     name => 'destroypool',
     path => '{name}',
     method => 'DELETE',
-    description => "Destroy pool",
+    description => "Destroy pool. Deprecated, please use `/nodes/{node}/ceph/pool/{name}`.",
     proxyto => 'node',
     protected => 1,
     permissions => {
@@ -615,7 +616,7 @@ __PACKAGE__->register_method ({
     name => 'setpool',
     path => '{name}',
     method => 'PUT',
-    description => "Change POOL settings",
+    description => "Change POOL settings. Deprecated, please use `/nodes/{node}/ceph/pool/{name}`.",
     proxyto => 'node',
     protected => 1,
     permissions => {
@@ -658,7 +659,7 @@ __PACKAGE__->register_method ({
     name => 'getpool',
     path => '{name}',
     method => 'GET',
-    description => "List pool settings.",
+    description => "List pool settings. Deprecated, please use `/nodes/{node}/ceph/pool/{pool}/status`.",
     proxyto => 'node',
     protected => 1,
     permissions => {
-- 
2.30.2





^ permalink raw reply	[flat|nested] 5+ messages in thread

* [pve-devel] [PATCH manager 2/2] ui: ceph: adapt urls to new ceph/pool endpoint
  2022-12-09 12:58 [pve-devel] [PATCH manager 1/2] api: ceph: deprecate pools in favor or pool Aaron Lauterer
@ 2022-12-09 12:58 ` Aaron Lauterer
  2022-12-12 10:34 ` [pve-devel] [PATCH manager 1/2] api: ceph: deprecate pools in favor or pool Aaron Lauterer
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 5+ messages in thread
From: Aaron Lauterer @ 2022-12-09 12:58 UTC (permalink / raw)
  To: pve-devel

ceph/pools (plural) is deprecated, use the new one.
Since the details / status of a pool has been moved from previously
ceph/pools/{name} to now ceph/pool/{name}/status, we need to pass the
'loadUrl' to the edit window.

Signed-off-by: Aaron Lauterer <a.lauterer@proxmox.com>
---
 www/manager6/ceph/Pool.js             | 9 +++++----
 www/manager6/form/CephPoolSelector.js | 2 +-
 2 files changed, 6 insertions(+), 5 deletions(-)

diff --git a/www/manager6/ceph/Pool.js b/www/manager6/ceph/Pool.js
index a1f008d1..21fb171c 100644
--- a/www/manager6/ceph/Pool.js
+++ b/www/manager6/ceph/Pool.js
@@ -200,8 +200,9 @@ Ext.define('PVE.Ceph.PoolEdit', {
     cbind: {
 	autoLoad: get => !get('isCreate'),
 	url: get => get('isCreate')
-	    ? `/nodes/${get('nodename')}/ceph/pools`
-	    : `/nodes/${get('nodename')}/ceph/pools/${get('pool_name')}`,
+	    ? `/nodes/${get('nodename')}/ceph/pool`
+	    : `/nodes/${get('nodename')}/ceph/pool/${get('pool_name')}`,
+	loadUrl: get => `/nodes/${get('nodename')}/ceph/pool/${get('pool_name')}/status`,
 	method: get => get('isCreate') ? 'POST' : 'PUT',
     },
 
@@ -356,7 +357,7 @@ Ext.define('PVE.node.Ceph.PoolList', {
 	    model: 'ceph-pool-list',
 	    proxy: {
 		type: 'proxmox',
-		url: `/api2/json/nodes/${nodename}/ceph/pools`,
+		url: `/api2/json/nodes/${nodename}/ceph/pool`,
 	    },
 	});
 	let store = Ext.create('Proxmox.data.DiffStore', { rstore: rstore });
@@ -420,7 +421,7 @@ Ext.define('PVE.node.Ceph.PoolList', {
 			let poolName = rec.data.pool_name;
 			Ext.create('Proxmox.window.SafeDestroy', {
 			    showProgress: true,
-			    url: `/nodes/${nodename}/ceph/pools/${poolName}`,
+			    url: `/nodes/${nodename}/ceph/pool/${poolName}`,
 			    params: {
 				remove_storages: 1,
 			    },
diff --git a/www/manager6/form/CephPoolSelector.js b/www/manager6/form/CephPoolSelector.js
index e8197077..471739a9 100644
--- a/www/manager6/form/CephPoolSelector.js
+++ b/www/manager6/form/CephPoolSelector.js
@@ -26,7 +26,7 @@ Ext.define('PVE.form.CephPoolSelector', {
 	    ],
 	    proxy: {
 		type: 'proxmox',
-		url: '/api2/json/nodes/' + me.nodename + '/ceph/pools',
+		url: '/api2/json/nodes/' + me.nodename + '/ceph/pool',
 	    },
 	});
 
-- 
2.30.2





^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [pve-devel] [PATCH manager 1/2] api: ceph: deprecate pools in favor or pool
  2022-12-09 12:58 [pve-devel] [PATCH manager 1/2] api: ceph: deprecate pools in favor or pool Aaron Lauterer
  2022-12-09 12:58 ` [pve-devel] [PATCH manager 2/2] ui: ceph: adapt urls to new ceph/pool endpoint Aaron Lauterer
@ 2022-12-12 10:34 ` Aaron Lauterer
  2023-03-13  8:35 ` Aaron Lauterer
  2023-03-20  8:31 ` Fabian Grünbichler
  3 siblings, 0 replies; 5+ messages in thread
From: Aaron Lauterer @ 2022-12-12 10:34 UTC (permalink / raw)
  To: pve-devel

I noticed that the commit title needs a
s/or/of/




^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [pve-devel] [PATCH manager 1/2] api: ceph: deprecate pools in favor or pool
  2022-12-09 12:58 [pve-devel] [PATCH manager 1/2] api: ceph: deprecate pools in favor or pool Aaron Lauterer
  2022-12-09 12:58 ` [pve-devel] [PATCH manager 2/2] ui: ceph: adapt urls to new ceph/pool endpoint Aaron Lauterer
  2022-12-12 10:34 ` [pve-devel] [PATCH manager 1/2] api: ceph: deprecate pools in favor or pool Aaron Lauterer
@ 2023-03-13  8:35 ` Aaron Lauterer
  2023-03-20  8:31 ` Fabian Grünbichler
  3 siblings, 0 replies; 5+ messages in thread
From: Aaron Lauterer @ 2023-03-13  8:35 UTC (permalink / raw)
  To: pve-devel

Ping?




^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [pve-devel] [PATCH manager 1/2] api: ceph: deprecate pools in favor or pool
  2022-12-09 12:58 [pve-devel] [PATCH manager 1/2] api: ceph: deprecate pools in favor or pool Aaron Lauterer
                   ` (2 preceding siblings ...)
  2023-03-13  8:35 ` Aaron Lauterer
@ 2023-03-20  8:31 ` Fabian Grünbichler
  3 siblings, 0 replies; 5+ messages in thread
From: Fabian Grünbichler @ 2023-03-20  8:31 UTC (permalink / raw)
  To: Proxmox VE development discussion

On December 9, 2022 1:58 pm, Aaron Lauterer wrote:
> /nodes/{node}/ceph/pools/{pool} returns the pool details right away on a
> GET.  This makes it bad practice to add additional sub API endpoints.
> 
> By deprecating it and replacing it with /nodes/{node}/ceph/pool/{pool}
> (singular instead of plural) we can turn that into an index GET
> response, making it possible to expand it more in the future.
> 
> The GET call returning the pool details is moved into
> /nodes/{node}/ceph/pool/{pool}/status
> 
> The code in the new Pool.pm is basically a copy of Pools.pm to avoid
> a close coupling with the old code as it is possible that it will divert
> until we can entirely remove the old code.
> 
> Signed-off-by: Aaron Lauterer <a.lauterer@proxmox.com>

high level: pveceph also should be switched to the new endpoints ;)

two small nits inline..

> ---
> The next step is to add a pool/{name}/namespace API so that we can list
> available namespaces and maybe also manage them via the API/UI in the
> future.
> 
>  PVE/API2/Ceph.pm       |   7 +
>  PVE/API2/Ceph/Makefile |   1 +
>  PVE/API2/Ceph/Pool.pm  | 801 +++++++++++++++++++++++++++++++++++++++++
>  PVE/API2/Ceph/Pools.pm |  11 +-
>  4 files changed, 815 insertions(+), 5 deletions(-)
>  create mode 100644 PVE/API2/Ceph/Pool.pm
> 
> diff --git a/PVE/API2/Ceph.pm b/PVE/API2/Ceph.pm
> index f3442408..946aebd3 100644
> --- a/PVE/API2/Ceph.pm
> +++ b/PVE/API2/Ceph.pm
> @@ -23,6 +23,7 @@ use PVE::API2::Ceph::FS;
>  use PVE::API2::Ceph::MDS;
>  use PVE::API2::Ceph::MGR;
>  use PVE::API2::Ceph::MON;
> +use PVE::API2::Ceph::Pool;
>  use PVE::API2::Ceph::Pools;
>  use PVE::API2::Storage::Config;
>  
> @@ -55,6 +56,12 @@ __PACKAGE__->register_method ({
>      path => 'fs',
>  });
>  
> +__PACKAGE__->register_method ({
> +    subclass => "PVE::API2::Ceph::Pool",
> +    path => 'pool',
> +});
> +
> +# TODO: deprecrated, remove with PVE 8
>  __PACKAGE__->register_method ({
>      subclass => "PVE::API2::Ceph::Pools",
>      path => 'pools',
> diff --git a/PVE/API2/Ceph/Makefile b/PVE/API2/Ceph/Makefile
> index 45daafda..5d6f642b 100644
> --- a/PVE/API2/Ceph/Makefile
> +++ b/PVE/API2/Ceph/Makefile
> @@ -5,6 +5,7 @@ PERLSOURCE= 			\
>  	MON.pm			\
>  	OSD.pm			\
>  	FS.pm			\
> +	Pool.pm			\
>  	Pools.pm		\
>  	MDS.pm
>  
> diff --git a/PVE/API2/Ceph/Pool.pm b/PVE/API2/Ceph/Pool.pm
> new file mode 100644
> index 00000000..cd46311b
> --- /dev/null
> +++ b/PVE/API2/Ceph/Pool.pm
> @@ -0,0 +1,801 @@
> +package PVE::API2::Ceph::Pool;
> +
> +use strict;
> +use warnings;
> +
> +use PVE::Ceph::Tools;
> +use PVE::Ceph::Services;
> +use PVE::JSONSchema qw(get_standard_option parse_property_string);
> +use PVE::RADOS;
> +use PVE::RESTHandler;
> +use PVE::RPCEnvironment;
> +use PVE::Storage;
> +use PVE::Tools qw(extract_param);
> +
> +use PVE::API2::Storage::Config;
> +
> +use base qw(PVE::RESTHandler);
> +
> +my $get_autoscale_status = sub {
> +    my ($rados) = shift;
> +
> +    $rados = PVE::RADOS->new() if !defined($rados);
> +
> +    my $autoscale = $rados->mon_command({
> +	    prefix => 'osd pool autoscale-status'});
> +
> +    my $data;
> +    foreach my $p (@$autoscale) {
> +	$data->{$p->{pool_name}} = $p;
> +    }
> +
> +    return $data;
> +};
> +
> +
> +__PACKAGE__->register_method ({
> +    name => 'lspools',
> +    path => '',
> +    method => 'GET',
> +    description => "List all pools.",

and their settings (which are settable by the POST/PUT endpoints).

> +    proxyto => 'node',
> +    protected => 1,
> +    permissions => {
> +	check => ['perm', '/', [ 'Sys.Audit', 'Datastore.Audit' ], any => 1],
> +    },
> +    parameters => {
> +	additionalProperties => 0,
> +	properties => {
> +	    node => get_standard_option('pve-node'),
> +	},
> +    },
> +    returns => {
> +	type => 'array',
> +	items => {
> +	    type => "object",
> +	    properties => {
> +		pool => {
> +		    type => 'integer',
> +		    title => 'ID',
> +		},
> +		pool_name => {
> +		    type => 'string',
> +		    title => 'Name',
> +		},
> +		size => {
> +		    type => 'integer',
> +		    title => 'Size',
> +		},
> +		type => {
> +		    type => 'string',
> +		    title => 'Type',
> +		    enum => ['replicated', 'erasure', 'unknown'],
> +		},
> +		min_size => {
> +		    type => 'integer',
> +		    title => 'Min Size',
> +		},
> +		pg_num => {
> +		    type => 'integer',
> +		    title => 'PG Num',
> +		},
> +		pg_num_min => {
> +		    type => 'integer',
> +		    title => 'min. PG Num',
> +		    optional => 1,
> +		},
> +		pg_num_final => {
> +		    type => 'integer',
> +		    title => 'Optimal PG Num',
> +		    optional => 1,
> +		},
> +		pg_autoscale_mode => {
> +		    type => 'string',
> +		    title => 'PG Autoscale Mode',
> +		    optional => 1,
> +		},
> +		crush_rule => {
> +		    type => 'integer',
> +		    title => 'Crush Rule',
> +		},
> +		crush_rule_name => {
> +		    type => 'string',
> +		    title => 'Crush Rule Name',
> +		},
> +		percent_used => {
> +		    type => 'number',
> +		    title => '%-Used',
> +		},
> +		bytes_used => {
> +		    type => 'integer',
> +		    title => 'Used',
> +		},
> +		target_size => {
> +		    type => 'integer',
> +		    title => 'PG Autoscale Target Size',
> +		    optional => 1,
> +		},
> +		target_size_ratio => {
> +		    type => 'number',
> +		    title => 'PG Autoscale Target Ratio',
> +		    optional => 1,
> +		},
> +		autoscale_status => {
> +		    type => 'object',
> +		    title => 'Autoscale Status',
> +		    optional => 1,
> +		},
> +		application_metadata => {
> +		    type => 'object',
> +		    title => 'Associated Applications',
> +		    optional => 1,
> +		},
> +	    },
> +	},
> +	links => [ { rel => 'child', href => "{pool_name}" } ],
> +    },
> +    code => sub {
> +	my ($param) = @_;
> +
> +	PVE::Ceph::Tools::check_ceph_inited();
> +
> +	my $rados = PVE::RADOS->new();
> +
> +	my $stats = {};
> +	my $res = $rados->mon_command({ prefix => 'df' });
> +
> +	foreach my $d (@{$res->{pools}}) {
> +	    next if !$d->{stats};
> +	    next if !defined($d->{id});
> +	    $stats->{$d->{id}} = $d->{stats};
> +	}
> +
> +	$res = $rados->mon_command({ prefix => 'osd dump' });
> +	my $rulestmp = $rados->mon_command({ prefix => 'osd crush rule dump'});
> +
> +	my $rules = {};
> +	for my $rule (@$rulestmp) {
> +	    $rules->{$rule->{rule_id}} = $rule->{rule_name};
> +	}
> +
> +	my $data = [];
> +	my $attr_list = [
> +	    'pool',
> +	    'pool_name',
> +	    'size',
> +	    'min_size',
> +	    'pg_num',
> +	    'crush_rule',
> +	    'pg_autoscale_mode',
> +	    'application_metadata',
> +	];
> +
> +	# pg_autoscaler module is not enabled in Nautilus
> +	my $autoscale = eval { $get_autoscale_status->($rados) };
> +
> +	foreach my $e (@{$res->{pools}}) {
> +	    my $d = {};
> +	    foreach my $attr (@$attr_list) {
> +		$d->{$attr} = $e->{$attr} if defined($e->{$attr});
> +	    }
> +
> +	    if ($autoscale) {
> +		$d->{autoscale_status} = $autoscale->{$d->{pool_name}};
> +		$d->{pg_num_final} = $d->{autoscale_status}->{pg_num_final};
> +		# some info is nested under options instead
> +		$d->{pg_num_min} = $e->{options}->{pg_num_min};
> +		$d->{target_size} = $e->{options}->{target_size_bytes};
> +		$d->{target_size_ratio} = $e->{options}->{target_size_ratio};
> +	    }
> +
> +	    if (defined($d->{crush_rule}) && defined($rules->{$d->{crush_rule}})) {
> +		$d->{crush_rule_name} = $rules->{$d->{crush_rule}};
> +	    }
> +
> +	    if (my $s = $stats->{$d->{pool}}) {
> +		$d->{bytes_used} = $s->{bytes_used};
> +		$d->{percent_used} = $s->{percent_used};
> +	    }
> +
> +	    # Cephs numerical pool types are barely documented. Found the following in the Ceph
> +	    # codebase: https://github.com/ceph/ceph/blob/ff144995a849407c258bcb763daa3e03cfce5059/src/osd/osd_types.h#L1221-L1233
> +	    if ($e->{type} == 1) {
> +		$d->{type} = 'replicated';
> +	    } elsif ($e->{type} == 3) {
> +		$d->{type} = 'erasure';
> +	    } else {
> +		# we should never get here, but better be safe
> +		$d->{type} = 'unknown';
> +	    }
> +	    push @$data, $d;
> +	}
> +
> +
> +	return $data;
> +    }});
> +
> +
> +my $ceph_pool_common_options = sub {
> +    my ($nodefault) = shift;
> +    my $options = {
> +	name => {
> +	    title => 'Name',
> +	    description => "The name of the pool. It must be unique.",
> +	    type => 'string',
> +	},
> +	size => {
> +	    title => 'Size',
> +	    description => 'Number of replicas per object',
> +	    type => 'integer',
> +	    default => 3,
> +	    optional => 1,
> +	    minimum => 1,
> +	    maximum => 7,
> +	},
> +	min_size => {
> +	    title => 'Min Size',
> +	    description => 'Minimum number of replicas per object',
> +	    type => 'integer',
> +	    default => 2,
> +	    optional => 1,
> +	    minimum => 1,
> +	    maximum => 7,
> +	},
> +	pg_num => {
> +	    title => 'PG Num',
> +	    description => "Number of placement groups.",
> +	    type => 'integer',
> +	    default => 128,
> +	    optional => 1,
> +	    minimum => 1,
> +	    maximum => 32768,
> +	},
> +	pg_num_min => {
> +	    title => 'min. PG Num',
> +	    description => "Minimal number of placement groups.",
> +	    type => 'integer',
> +	    optional => 1,
> +	    maximum => 32768,
> +	},
> +	crush_rule => {
> +	    title => 'Crush Rule Name',
> +	    description => "The rule to use for mapping object placement in the cluster.",
> +	    type => 'string',
> +	    optional => 1,
> +	},
> +	application => {
> +	    title => 'Application',
> +	    description => "The application of the pool.",
> +	    default => 'rbd',
> +	    type => 'string',
> +	    enum => ['rbd', 'cephfs', 'rgw'],
> +	    optional => 1,
> +	},
> +	pg_autoscale_mode => {
> +	    title => 'PG Autoscale Mode',
> +	    description => "The automatic PG scaling mode of the pool.",
> +	    type => 'string',
> +	    enum => ['on', 'off', 'warn'],
> +	    default => 'warn',
> +	    optional => 1,
> +	},
> +	target_size => {
> +	    description => "The estimated target size of the pool for the PG autoscaler.",
> +	    title => 'PG Autoscale Target Size',
> +	    type => 'string',
> +	    pattern => '^(\d+(\.\d+)?)([KMGT])?$',
> +	    optional => 1,
> +	},
> +	target_size_ratio => {
> +	    description => "The estimated target ratio of the pool for the PG autoscaler.",
> +	    title => 'PG Autoscale Target Ratio',
> +	    type => 'number',
> +	    optional => 1,
> +	},
> +    };
> +
> +    if ($nodefault) {
> +	delete $options->{$_}->{default} for keys %$options;
> +    }
> +    return $options;
> +};
> +
> +
> +my $add_storage = sub {
> +    my ($pool, $storeid, $ec_data_pool) = @_;
> +
> +    my $storage_params = {
> +	type => 'rbd',
> +	pool => $pool,
> +	storage => $storeid,
> +	krbd => 0,
> +	content => 'rootdir,images',
> +    };
> +
> +    $storage_params->{'data-pool'} = $ec_data_pool if $ec_data_pool;
> +
> +    PVE::API2::Storage::Config->create($storage_params);
> +};
> +
> +my $get_storages = sub {
> +    my ($pool) = @_;
> +
> +    my $cfg = PVE::Storage::config();
> +
> +    my $storages = $cfg->{ids};
> +    my $res = {};
> +    foreach my $storeid (keys %$storages) {
> +	my $curr = $storages->{$storeid};
> +	next if $curr->{type} ne 'rbd';
> +	$curr->{pool} = 'rbd' if !defined $curr->{pool}; # set default
> +	if (
> +	    $pool eq $curr->{pool} ||
> +	    (defined $curr->{'data-pool'} && $pool eq $curr->{'data-pool'})
> +	) {
> +	    $res->{$storeid} = $storages->{$storeid};
> +	}
> +    }
> +
> +    return $res;
> +};
> +
> +my $ec_format = {
> +    k => {
> +	type => 'integer',
> +	description => "Number of data chunks. Will create an erasure coded pool plus a"
> +	    ." replicated pool for metadata.",
> +	minimum => 2,
> +    },
> +    m => {
> +	type => 'integer',
> +	description => "Number of coding chunks. Will create an erasure coded pool plus a"
> +	    ." replicated pool for metadata.",
> +	minimum => 1,
> +    },
> +    'failure-domain' => {
> +	type => 'string',
> +	description => "CRUSH failure domain. Default is 'host'. Will create an erasure"
> +	    ." coded pool plus a replicated pool for metadata.",
> +	format_description => 'domain',
> +	optional => 1,
> +	default => 'host',
> +    },
> +    'device-class' => {
> +	type => 'string',
> +	description => "CRUSH device class. Will create an erasure coded pool plus a"
> +	    ." replicated pool for metadata.",
> +	format_description => 'class',
> +	optional => 1,
> +    },
> +    profile => {
> +	description => "Override the erasure code (EC) profile to use. Will create an"
> +	    ." erasure coded pool plus a replicated pool for metadata.",
> +	type => 'string',
> +	format_description => 'profile',
> +	optional => 1,
> +    },
> +};
> +
> +sub ec_parse_and_check {
> +    my ($property, $rados) = @_;
> +    return if !$property;
> +
> +    my $ec = parse_property_string($ec_format, $property);
> +
> +    die "Erasure code profile '$ec->{profile}' does not exist.\n"
> +	if $ec->{profile} && !PVE::Ceph::Tools::ecprofile_exists($ec->{profile}, $rados);
> +
> +    return $ec;
> +}
> +
> +
> +__PACKAGE__->register_method ({
> +    name => 'createpool',
> +    path => '',
> +    method => 'POST',
> +    description => "Create Ceph pool",
> +    proxyto => 'node',
> +    protected => 1,
> +    permissions => {
> +	check => ['perm', '/', [ 'Sys.Modify' ]],
> +    },
> +    parameters => {
> +	additionalProperties => 0,
> +	properties => {
> +	    node => get_standard_option('pve-node'),
> +	    add_storages => {
> +		description => "Configure VM and CT storage using the new pool.",
> +		type => 'boolean',
> +		optional => 1,
> +		default => "0; for erasure coded pools: 1",
> +	    },
> +	    'erasure-coding' => {
> +		description => "Create an erasure coded pool for RBD with an accompaning"
> +		    ." replicated pool for metadata storage. With EC, the common ceph options 'size',"
> +		    ." 'min_size' and 'crush_rule' parameters will be applied to the metadata pool.",
> +		type => 'string',
> +		format => $ec_format,
> +		optional => 1,
> +	    },
> +	    %{ $ceph_pool_common_options->() },
> +	},
> +    },
> +    returns => { type => 'string' },
> +    code => sub {
> +	my ($param) = @_;
> +
> +	PVE::Cluster::check_cfs_quorum();
> +	PVE::Ceph::Tools::check_ceph_configured();
> +
> +	my $pool = my $name = extract_param($param, 'name');
> +	my $node = extract_param($param, 'node');
> +	my $add_storages = extract_param($param, 'add_storages');
> +
> +	my $rpcenv = PVE::RPCEnvironment::get();
> +	my $user = $rpcenv->get_user();
> +	# Ceph uses target_size_bytes
> +	if (defined($param->{'target_size'})) {
> +	    my $target_sizestr = extract_param($param, 'target_size');
> +	    $param->{target_size_bytes} = PVE::JSONSchema::parse_size($target_sizestr);
> +	}
> +
> +	my $rados = PVE::RADOS->new();
> +	my $ec = ec_parse_and_check(extract_param($param, 'erasure-coding'), $rados);
> +	$add_storages = 1 if $ec && !defined($add_storages);
> +
> +	if ($add_storages) {
> +	    $rpcenv->check($user, '/storage', ['Datastore.Allocate']);
> +	    die "pool name contains characters which are illegal for storage naming\n"
> +		if !PVE::JSONSchema::parse_storage_id($pool);
> +	}
> +
> +	# pool defaults
> +	$param->{pg_num} //= 128;
> +	$param->{size} //= 3;
> +	$param->{min_size} //= 2;
> +	$param->{application} //= 'rbd';
> +	$param->{pg_autoscale_mode} //= 'warn';
> +
> +	my $worker = sub {
> +	    # reopen with longer timeout
> +	    $rados = PVE::RADOS->new(timeout => PVE::Ceph::Tools::get_config('long_rados_timeout'));
> +
> +	    if ($ec) {
> +		if (!$ec->{profile}) {
> +		    $ec->{profile} = PVE::Ceph::Tools::get_ecprofile_name($pool, $rados);
> +		    eval {
> +			PVE::Ceph::Tools::create_ecprofile(
> +			    $ec->@{'profile', 'k', 'm', 'failure-domain', 'device-class'},
> +			    $rados,
> +			);
> +		    };
> +		    die "could not create erasure code profile '$ec->{profile}': $@\n" if $@;
> +		    print "created new erasure code profile '$ec->{profile}'\n";
> +		}
> +
> +		my $ec_data_param = {};
> +		# copy all params, should be a flat hash
> +		$ec_data_param = { map { $_ => $param->{$_} } keys %$param };
> +
> +		$ec_data_param->{pool_type} = 'erasure';
> +		$ec_data_param->{allow_ec_overwrites} = 'true';
> +		$ec_data_param->{erasure_code_profile} = $ec->{profile};
> +		delete $ec_data_param->{size};
> +		delete $ec_data_param->{min_size};
> +		delete $ec_data_param->{crush_rule};
> +
> +		# metadata pool should be ok with 32 PGs
> +		$param->{pg_num} = 32;
> +
> +		$pool = "${name}-metadata";
> +		$ec->{data_pool} = "${name}-data";
> +
> +		PVE::Ceph::Tools::create_pool($ec->{data_pool}, $ec_data_param, $rados);
> +	    }
> +
> +	    PVE::Ceph::Tools::create_pool($pool, $param, $rados);
> +
> +	    if ($add_storages) {
> +		eval { $add_storage->($pool, "${name}", $ec->{data_pool}) };
> +		die "adding PVE storage for ceph pool '$name' failed: $@\n" if $@;
> +	    }
> +	};
> +
> +	return $rpcenv->fork_worker('cephcreatepool', $pool,  $user, $worker);
> +    }});
> +
> +
> +__PACKAGE__->register_method ({
> +    name => 'destroypool',
> +    path => '{name}',
> +    method => 'DELETE',
> +    description => "Destroy pool",
> +    proxyto => 'node',
> +    protected => 1,
> +    permissions => {
> +	check => ['perm', '/', [ 'Sys.Modify' ]],
> +    },
> +    parameters => {
> +	additionalProperties => 0,
> +	properties => {
> +	    node => get_standard_option('pve-node'),
> +	    name => {
> +		description => "The name of the pool. It must be unique.",
> +		type => 'string',
> +	    },
> +	    force => {
> +		description => "If true, destroys pool even if in use",
> +		type => 'boolean',
> +		optional => 1,
> +		default => 0,
> +	    },
> +	    remove_storages => {
> +		description => "Remove all pveceph-managed storages configured for this pool",
> +		type => 'boolean',
> +		optional => 1,
> +		default => 0,
> +	    },
> +	    remove_ecprofile => {
> +		description => "Remove the erasure code profile. Defaults to true, if applicable.",
> +		type => 'boolean',
> +		optional => 1,
> +		default => 1,
> +	    },
> +	},
> +    },
> +    returns => { type => 'string' },
> +    code => sub {
> +	my ($param) = @_;
> +
> +	PVE::Ceph::Tools::check_ceph_inited();
> +
> +	my $rpcenv = PVE::RPCEnvironment::get();
> +	my $user = $rpcenv->get_user();
> +	$rpcenv->check($user, '/storage', ['Datastore.Allocate'])
> +	    if $param->{remove_storages};
> +
> +	my $pool = $param->{name};
> +
> +	my $worker = sub {
> +	    my $storages = $get_storages->($pool);
> +
> +	    # if not forced, destroy ceph pool only when no
> +	    # vm disks are on it anymore
> +	    if (!$param->{force}) {
> +		my $storagecfg = PVE::Storage::config();
> +		foreach my $storeid (keys %$storages) {
> +		    my $storage = $storages->{$storeid};
> +
> +		    # check if any vm disks are on the pool
> +		    print "checking storage '$storeid' for RBD images..\n";
> +		    my $res = PVE::Storage::vdisk_list($storagecfg, $storeid);
> +		    die "ceph pool '$pool' still in use by storage '$storeid'\n"
> +			if @{$res->{$storeid}} != 0;
> +		}
> +	    }
> +	    my $rados = PVE::RADOS->new();
> +
> +	    my $pool_properties = PVE::Ceph::Tools::get_pool_properties($pool, $rados);
> +
> +	    PVE::Ceph::Tools::destroy_pool($pool, $rados);
> +
> +	    if (my $ecprofile = $pool_properties->{erasure_code_profile}) {
> +		print "found erasure coded profile '$ecprofile', destroying its CRUSH rule\n";
> +		my $crush_rule = $pool_properties->{crush_rule};
> +		eval { PVE::Ceph::Tools::destroy_crush_rule($crush_rule, $rados); };
> +		warn "removing crush rule '${crush_rule}' failed: $@\n" if $@;
> +
> +		if ($param->{remove_ecprofile} // 1) {
> +		    print "destroying erasure coded profile '$ecprofile'\n";
> +		    eval { PVE::Ceph::Tools::destroy_ecprofile($ecprofile, $rados) };
> +		    warn "removing EC profile '${ecprofile}' failed: $@\n" if $@;
> +		}
> +	    }
> +
> +	    if ($param->{remove_storages}) {
> +		my $err;
> +		foreach my $storeid (keys %$storages) {
> +		    # skip external clusters, not managed by pveceph
> +		    next if $storages->{$storeid}->{monhost};
> +		    eval { PVE::API2::Storage::Config->delete({storage => $storeid}) };
> +		    if ($@) {
> +			warn "failed to remove storage '$storeid': $@\n";
> +			$err = 1;
> +		    }
> +		}
> +		die "failed to remove (some) storages - check log and remove manually!\n"
> +		    if $err;
> +	    }
> +	};
> +	return $rpcenv->fork_worker('cephdestroypool', $pool,  $user, $worker);
> +    }});
> +
> +
> +__PACKAGE__->register_method ({
> +    name => 'setpool',
> +    path => '{name}',
> +    method => 'PUT',
> +    description => "Change POOL settings",
> +    proxyto => 'node',
> +    protected => 1,
> +    permissions => {
> +	check => ['perm', '/', [ 'Sys.Modify' ]],
> +    },
> +    parameters => {
> +	additionalProperties => 0,
> +	properties => {
> +	    node => get_standard_option('pve-node'),
> +	    %{ $ceph_pool_common_options->('nodefault') },
> +	},
> +    },
> +    returns => { type => 'string' },
> +    code => sub {
> +	my ($param) = @_;
> +
> +	PVE::Ceph::Tools::check_ceph_configured();
> +
> +	my $rpcenv = PVE::RPCEnvironment::get();
> +	my $authuser = $rpcenv->get_user();
> +
> +	my $pool = extract_param($param, 'name');
> +	my $node = extract_param($param, 'node');
> +
> +	# Ceph uses target_size_bytes
> +	if (defined($param->{'target_size'})) {
> +	    my $target_sizestr = extract_param($param, 'target_size');
> +	    $param->{target_size_bytes} = PVE::JSONSchema::parse_size($target_sizestr);
> +	}
> +
> +	my $worker = sub {
> +	    PVE::Ceph::Tools::set_pool($pool, $param);
> +	};
> +
> +	return $rpcenv->fork_worker('cephsetpool', $pool,  $authuser, $worker);
> +    }});
> +
> +__PACKAGE__->register_method ({
> +    name => 'poolindex',
> +    path => '{name}',
> +    method => 'GET',
> +    permissions => {
> +	check => ['perm', '/', [ 'Sys.Audit', 'Datastore.Audit' ], any => 1],
> +    },
> +    description => "Pool index.",
> +    parameters => {
> +	additionalProperties => 0,
> +	properties => {
> +	    node => get_standard_option('pve-node'),
> +	    name => {
> +		description => 'The name of the pool.',
> +		type => 'string',
> +	    },
> +	},
> +    },
> +    returns => {
> +	type => 'array',
> +	items => {
> +	    type => "object",
> +	    properties => {},
> +	},
> +	links => [ { rel => 'child', href => "{name}" } ],
> +    },
> +    code => sub {
> +	my ($param) = @_;
> +
> +	my $result = [
> +	    { name => 'status' },
> +	];
> +
> +	return $result;
> +    }});
> +
> +
> +__PACKAGE__->register_method ({
> +    name => 'getpool',
> +    path => '{name}/status',
> +    method => 'GET',
> +    description => "List pool settings.",

whereas this actually returns *much more* than just the settings, and is
therefor rightly named "status", so maybe the description should also make that
clear ;)

> +    proxyto => 'node',
> +    protected => 1,
> +    permissions => {
> +	check => ['perm', '/', [ 'Sys.Audit', 'Datastore.Audit' ], any => 1],
> +    },
> +    parameters => {
> +	additionalProperties => 0,
> +	properties => {
> +	    node => get_standard_option('pve-node'),
> +	    name => {
> +		description => "The name of the pool. It must be unique.",
> +		type => 'string',
> +	    },
> +	    verbose => {
> +		type => 'boolean',
> +		default => 0,
> +		optional => 1,
> +		description => "If enabled, will display additional data".
> +		    "(eg. statistics).",
> +	    },
> +	},
> +    },
> +    returns => {
> +	type => "object",
> +	properties => {
> +	    id                     => { type => 'integer', title => 'ID' },
> +	    pgp_num                => { type => 'integer', title => 'PGP num' },
> +	    noscrub                => { type => 'boolean', title => 'noscrub' },
> +	    'nodeep-scrub'         => { type => 'boolean', title => 'nodeep-scrub' },
> +	    nodelete               => { type => 'boolean', title => 'nodelete' },
> +	    nopgchange             => { type => 'boolean', title => 'nopgchange' },
> +	    nosizechange           => { type => 'boolean', title => 'nosizechange' },
> +	    write_fadvise_dontneed => { type => 'boolean', title => 'write_fadvise_dontneed' },
> +	    hashpspool             => { type => 'boolean', title => 'hashpspool' },
> +	    use_gmt_hitset         => { type => 'boolean', title => 'use_gmt_hitset' },
> +	    fast_read              => { type => 'boolean', title => 'Fast Read' },
> +	    application_list       => { type => 'array', title => 'Application', optional => 1 },
> +	    statistics             => { type => 'object', title => 'Statistics', optional => 1 },
> +	    autoscale_status       => { type => 'object',  title => 'Autoscale Status', optional => 1 },
> +	    %{ $ceph_pool_common_options->() },
> +	},
> +    },
> +    code => sub {
> +	my ($param) = @_;
> +
> +	PVE::Ceph::Tools::check_ceph_inited();
> +
> +	my $verbose = $param->{verbose};
> +	my $pool = $param->{name};
> +
> +	my $rados = PVE::RADOS->new();
> +	my $res = $rados->mon_command({
> +		prefix => 'osd pool get',
> +		pool   => "$pool",
> +		var    => 'all',
> +	    });
> +
> +	my $data = {
> +	    id                     => $res->{pool_id},
> +	    name                   => $pool,
> +	    size                   => $res->{size},
> +	    min_size               => $res->{min_size},
> +	    pg_num                 => $res->{pg_num},
> +	    pg_num_min             => $res->{pg_num_min},
> +	    pgp_num                => $res->{pgp_num},
> +	    crush_rule             => $res->{crush_rule},
> +	    pg_autoscale_mode      => $res->{pg_autoscale_mode},
> +	    noscrub                => "$res->{noscrub}",
> +	    'nodeep-scrub'         => "$res->{'nodeep-scrub'}",
> +	    nodelete               => "$res->{nodelete}",
> +	    nopgchange             => "$res->{nopgchange}",
> +	    nosizechange           => "$res->{nosizechange}",
> +	    write_fadvise_dontneed => "$res->{write_fadvise_dontneed}",
> +	    hashpspool             => "$res->{hashpspool}",
> +	    use_gmt_hitset         => "$res->{use_gmt_hitset}",
> +	    fast_read              => "$res->{fast_read}",
> +	    target_size            => $res->{target_size_bytes},
> +	    target_size_ratio      => $res->{target_size_ratio},
> +	};
> +
> +	if ($verbose) {
> +	    my $stats;
> +	    my $res = $rados->mon_command({ prefix => 'df' });
> +
> +	    # pg_autoscaler module is not enabled in Nautilus
> +	    # avoid partial read further down, use new rados instance
> +	    my $autoscale_status = eval { $get_autoscale_status->() };
> +	    $data->{autoscale_status} = $autoscale_status->{$pool};
> +
> +	    foreach my $d (@{$res->{pools}}) {
> +		next if !$d->{stats};
> +		next if !defined($d->{name}) && !$d->{name} ne "$pool";
> +		$data->{statistics} = $d->{stats};
> +	    }
> +
> +	    my $apps = $rados->mon_command({ prefix => "osd pool application get", pool => "$pool", });
> +	    $data->{application_list} = [ keys %$apps ];
> +	}
> +
> +	return $data;
> +    }});
> +
> +
> +1;
> diff --git a/PVE/API2/Ceph/Pools.pm b/PVE/API2/Ceph/Pools.pm
> index fce56787..ffae73b9 100644
> --- a/PVE/API2/Ceph/Pools.pm
> +++ b/PVE/API2/Ceph/Pools.pm
> @@ -1,4 +1,5 @@
>  package PVE::API2::Ceph::Pools;
> +# TODO: Deprecated, drop with PVE 8.0! PVE::API2::Ceph::Pool is the replacement
>  
>  use strict;
>  use warnings;
> @@ -37,7 +38,7 @@ __PACKAGE__->register_method ({
>      name => 'lspools',
>      path => '',
>      method => 'GET',
> -    description => "List all pools.",
> +    description => "List all pools. Deprecated, please use `/nodes/{node}/ceph/pool`.",
>      proxyto => 'node',
>      protected => 1,
>      permissions => {
> @@ -393,7 +394,7 @@ __PACKAGE__->register_method ({
>      name => 'createpool',
>      path => '',
>      method => 'POST',
> -    description => "Create Ceph pool",
> +    description => "Create Ceph pool. Deprecated, please use `/nodes/{node}/ceph/pool`.",
>      proxyto => 'node',
>      protected => 1,
>      permissions => {
> @@ -509,7 +510,7 @@ __PACKAGE__->register_method ({
>      name => 'destroypool',
>      path => '{name}',
>      method => 'DELETE',
> -    description => "Destroy pool",
> +    description => "Destroy pool. Deprecated, please use `/nodes/{node}/ceph/pool/{name}`.",
>      proxyto => 'node',
>      protected => 1,
>      permissions => {
> @@ -615,7 +616,7 @@ __PACKAGE__->register_method ({
>      name => 'setpool',
>      path => '{name}',
>      method => 'PUT',
> -    description => "Change POOL settings",
> +    description => "Change POOL settings. Deprecated, please use `/nodes/{node}/ceph/pool/{name}`.",
>      proxyto => 'node',
>      protected => 1,
>      permissions => {
> @@ -658,7 +659,7 @@ __PACKAGE__->register_method ({
>      name => 'getpool',
>      path => '{name}',
>      method => 'GET',
> -    description => "List pool settings.",
> +    description => "List pool settings. Deprecated, please use `/nodes/{node}/ceph/pool/{pool}/status`.",
>      proxyto => 'node',
>      protected => 1,
>      permissions => {
> -- 
> 2.30.2
> 
> 
> 
> _______________________________________________
> pve-devel mailing list
> pve-devel@lists.proxmox.com
> https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel
> 
> 
> 




^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2023-03-20  8:32 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-12-09 12:58 [pve-devel] [PATCH manager 1/2] api: ceph: deprecate pools in favor or pool Aaron Lauterer
2022-12-09 12:58 ` [pve-devel] [PATCH manager 2/2] ui: ceph: adapt urls to new ceph/pool endpoint Aaron Lauterer
2022-12-12 10:34 ` [pve-devel] [PATCH manager 1/2] api: ceph: deprecate pools in favor or pool Aaron Lauterer
2023-03-13  8:35 ` Aaron Lauterer
2023-03-20  8:31 ` Fabian Grünbichler

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox
Service provided by Proxmox Server Solutions GmbH | Privacy | Legal