From: Aaron Lauterer <a.lauterer@proxmox.com>
To: pve-devel@lists.proxmox.com
Subject: [pve-devel] [PATCH v2 storage 3/3] rbd: add integration test for namespace handling
Date: Wed, 7 Apr 2021 16:22:18 +0200 [thread overview]
Message-ID: <20210407142218.29156-4-a.lauterer@proxmox.com> (raw)
In-Reply-To: <20210407142218.29156-1-a.lauterer@proxmox.com>
This test is intended to be run on a hyperconverged PVE cluster to test
the most common operations of VMs using a namespaced Ceph RBD pool.
Signed-off-by: Aaron Lauterer <a.lauterer@proxmox.com>
---
v1 -> v2:
reworked the test from the feedback I got [0].
* tests are now defined in deeper hashes/arrays and can contain testing
steps as well as preparation and cleanup steps where needed.
* command calls don't use fixed paths
* command calls use arrays
* a new Ceph pool will be created and removed at the end by default. Can
be overriden with the '--use_existing' parameter. Most likely in
combination with the '--pool' parameter.
* debug flag has been introduced to be used if some debug printing via
the `jp` sub is needed. Currenctly not used but might be useful when
tinkering with the test in the future. Thx @Thomas for the hint
[0] https://lists.proxmox.com/pipermail/pve-devel/2021-April/047472.html
test/rbd_namespace.pl | 370 ++++++++++++++++++++++++++++++++++++++++++
1 file changed, 370 insertions(+)
create mode 100755 test/rbd_namespace.pl
diff --git a/test/rbd_namespace.pl b/test/rbd_namespace.pl
new file mode 100755
index 0000000..c6a9468
--- /dev/null
+++ b/test/rbd_namespace.pl
@@ -0,0 +1,370 @@
+#!/usr/bin/perl
+
+# This script is meant to be run manually on hyperconverged PVE server with a
+# Ceph cluster. It tests how PVE handles RBD namespaces.
+#
+# The pool (default: rbd) must already exist. The namespace and VMs will be
+# created.
+#
+# Parameters like names for the pool an namespace and the VMID can be
+# configured. The VMIDs for the clones is $vmid -1 and $vmid -2.
+#
+# Cleanup is done after a successful run. Cleanup can also be called manually.
+#
+# Known issues:
+#
+# * Snapshot rollback can sometimes be racy with stopping the VM and Ceph
+# recognizing that the disk image is not in use anymore.
+
+use strict;
+use warnings;
+
+use Test::More;
+use Getopt::Long;
+use JSON;
+
+use PVE::Tools qw(run_command);
+
+my $pool = "testpool";
+my $use_existing= undef;
+my $namespace = "testspace";
+my $showhelp = '';
+my $vmid = 999999;
+my $cleanup = undef;
+my $DEBUG = 0;
+
+my $helpstring = "To override default values, set them as named parameters:
+
+--pool pool name, default: ${pool}
+--use_existing use existing pool, default: 0, needs --pool set
+--namespace rbd namespace, default: ${namespace}
+--vmid VMID of the test VM, default: ${vmid}
+--cleanup Remove the storage definitions, namespaces and VMs
+--debug Enable debug output\n";
+
+GetOptions (
+ "pool=s" => \$pool,
+ "use_existing" => \$use_existing,
+ "namespace=s" => \$namespace,
+ "vmid=i" => \$vmid,
+ "help" => \$showhelp,
+ "cleanup" => \$cleanup,
+ "debug" => \$DEBUG,
+) or die ($helpstring);
+
+die $helpstring if $showhelp;
+
+my $storage_name = "${pool}-${namespace}";
+
+my $vmid_clone = int($vmid) - 1;
+my $vmid_linked_clone = int($vmid) - 2;
+
+sub jp {
+ return if !$DEBUG;
+ print to_json($_[0], { utf8 => 8, pretty => 1, canonical => 1 }) . "\n";
+}
+
+sub run_cmd {
+ my ($cmd, $json, $ignore_errors) = @_;
+
+ my $raw = '';
+ my $parser = sub {$raw .= shift;};
+
+ eval {
+ run_command($cmd, outfunc => $parser);
+ };
+ if (my $err = $@) {
+ die $err if !$ignore_errors;
+ }
+
+ if ($json) {
+ my $result;
+ if ($raw eq '') {
+ $result = [];
+ } elsif ($raw =~ m/^(\[.*\])$/s) { # untaint
+ $result = JSON::decode_json($1);
+ } else {
+ die "got unexpected data from command: '$cmd' -> '$raw'\n";
+ }
+ return $result;
+ }
+ return $raw;
+}
+
+sub run_test_cmd {
+ my ($cmd) = @_;
+
+ my $raw = '';
+ my $out = sub {
+ my $line = shift;
+ $raw .= "${line}\n";
+ };
+
+ eval {
+ run_command($cmd, outfunc => $out);
+ };
+ if (my $err = $@) {
+ print $raw;
+ print $err;
+ return 0;
+ }
+ print $raw;
+ return 1;
+}
+
+sub prepare {
+ print "Preparing test environent\n";
+
+ my $pools = run_cmd("ceph osd pool ls --format json", 1);
+
+ my %poolnames = map {$_ => 1} @$pools;
+ die "Pool '$pool' does not exist!\n"
+ if !exists($poolnames{$pool}) && $use_existing;
+
+ run_cmd(['pveceph', 'pool', 'create', ${pool}, '--add_storages', 1])
+ if !$use_existing;
+
+ my $namespaces = run_cmd(['rbd', '-p', ${pool}, 'namespace', 'ls', '--format', 'json'], 1);
+ my $ns_found = 0;
+ for my $i (@$namespaces) {
+ #print Dumper $i;
+ $ns_found = 1 if $i->{name} eq $namespace;
+ }
+
+ if (!$ns_found) {
+ print "Create namespace '${namespace}' in pool '${pool}'\n";
+ run_cmd(['rbd', 'namespace', 'create', "${pool}/${namespace}"]);
+ }
+
+ my $storages = run_cmd(['pvesh', 'get', 'storage', '--output-format', 'json'], 1);
+ #print Dumper $storages;
+ my $rbd_found = 0;
+ my $pool_found = 0;
+
+ print "Create storage definition\n";
+ for my $stor (@$storages) {
+ $pool_found = 1 if $stor->{storage} eq $pool;
+ $rbd_found = 1 if $stor->{storage} eq $storage_name;
+
+ if ($rbd_found) {
+ run_cmd(['pvesm', 'set', ${storage_name}, '--krbd', '0']);
+ die "Enable the storage '$stor->{storage}'!" if $stor->{disable};
+ }
+ }
+ if (!$pool_found) {
+ die "No storage for pool '${pool}' found! Must have same name as pool!\n"
+ if $use_existing;
+
+ run_cmd(['pvesm', 'add', 'rbd', $pool, '--pool', $pool, '--content', 'images,rootdir']);
+ }
+ # create PVE storages (librbd / krbd)
+ run_cmd(['pvesm', 'add', 'rbd', ${storage_name}, '--krbd', '0', '--pool', ${pool}, '--namespace', ${namespace}, '--content', 'images,rootdir'])
+ if !$rbd_found;
+
+
+ # create test VM
+ print "Create test VM ${vmid}\n";
+ my $vms = run_cmd(['pvesh', 'get', 'cluster/resources', '--type', 'vm', '--output-format', 'json'], 1);
+ for my $vm (@$vms) {
+ # TODO: introduce a force flag to make this behaviour configurable
+
+ if ($vm->{vmid} eq $vmid) {
+ print "Test VM '${vmid}' already exists. It will be removed and recreated!\n";
+ run_cmd(['qm', 'stop', ${vmid}], 0, 1);
+ run_cmd(['qm', 'destroy', ${vmid}]);
+ }
+ }
+ run_cmd(['qm', 'create', ${vmid}, '--bios', 'ovmf', '--efidisk0', "${storage_name}:1", '--scsi0', "${storage_name}:2"]);
+}
+
+
+sub cleanup {
+ print "Cleaning up test environment!\n";
+ print "Removing VMs\n";
+ run_cmd(['qm', 'stop', ${vmid}], 0, 1);
+ run_cmd(['qm', 'stop', ${vmid_linked_clone}], 0, 1);
+ run_cmd(['qm', 'stop', ${vmid_clone}], 0, 1);
+ run_cmd(['qm', 'destroy', ${vmid_linked_clone}], 0, 1);
+ run_cmd(['qm', 'destroy', ${vmid_clone}], 0, 1);
+ run_cmd(['for', 'i', 'in', "/dev/rbd/${pool}/${namespace}/*;", 'do', '/usr/bin/rbd', 'unmap', '\$i;', 'done'], 0, 1);
+ run_cmd(['qm', 'unlock', ${vmid}], 0, 1);
+ run_cmd(['qm', 'destroy', ${vmid}], 0, 1);
+
+ print "Removing Storage definition for ${storage_name}\n";
+ run_cmd(['pvesm', 'remove', ${storage_name}], 0, 1);
+
+ print "Removing RBD namespace '${pool}/${namespace}'\n";
+ run_cmd(['rbd', 'namespace', 'remove', "${pool}/${namespace}"], 0, 1);
+
+ if (!$use_existing) {
+ print "Removing Storage definition for ${pool}\n";
+ run_cmd(['pvesm', 'remove', ${pool}], 0, 1);
+ print "Removing test pool\n";
+ run_cmd(['pveceph', 'pool', 'destroy', $pool]);
+ }
+}
+
+my $tests = [
+ # Example structure for tests
+ # {
+ # name => "name of test section",
+ # preparations => [
+ # ['some', 'prep', 'command'],
+ # ],
+ # steps => [
+ # ['test', 'cmd', $vmid],
+ # ['second', 'step', $vmid],
+ # ],
+ # cleanup => [
+ # ['cleanup', 'command'],
+ # ],
+ # },
+ {
+ name => 'first VM start',
+ steps => [
+ ['qm', 'start', $vmid],
+ ],
+ },
+ {
+ name => 'snapshot/rollback',
+ steps => [
+ ['qm', 'snapshot', $vmid, 'test'],
+ ['qm', 'rollback', $vmid, 'test'],
+ ],
+ cleanup => [
+ ['qm', 'unlock', $vmid],
+ ],
+ },
+ {
+ name => 'remove snapshot',
+ steps => [
+ ['qm', 'delsnapshot', $vmid, 'test'],
+ ],
+ },
+ {
+ name => 'moving disk between namespaces',
+ steps => [
+ ['qm', 'move_disk', $vmid, 'scsi0', $pool, '--delete', 1],
+ ['qm', 'move_disk', $vmid, 'scsi0', $storage_name, '--delete', 1],
+ ],
+ },
+ {
+ name => 'switch to krbd',
+ preparations => [
+ ['qm', 'stop', $vmid],
+ ['pvesm', 'set', $storage_name, '--krbd', 1]
+ ],
+ },
+ {
+ name => 'start VM with krbd',
+ steps => [
+ ['qm', 'start', $vmid],
+ ],
+ },
+ {
+ name => 'snapshot/rollback with krbd',
+ steps => [
+ ['qm', 'snapshot', $vmid, 'test'],
+ ['qm', 'rollback', $vmid, 'test'],
+ ],
+ cleanup => [
+ ['qm', 'unlock', $vmid],
+ ],
+ },
+ {
+ name => 'remove snapshot with krbd',
+ steps => [
+ ['qm', 'delsnapshot', $vmid, 'test'],
+ ],
+ },
+ {
+ name => 'moving disk between namespaces with krbd',
+ steps => [
+ ['qm', 'move_disk', $vmid, 'scsi0', $pool, '--delete', 1],
+ ['qm', 'move_disk', $vmid, 'scsi0', $storage_name, '--delete', 1],
+ ],
+ },
+ {
+ name => 'clone VM with krbd',
+ steps => [
+ ['qm', 'clone', $vmid, $vmid_clone],
+ ],
+ },
+ {
+ name => 'switch to non krbd',
+ preparations => [
+ ['qm', 'stop', $vmid],
+ ['qm', 'stop', $vmid_clone],
+ ['pvesm', 'set', $storage_name, '--krbd', 0]
+ ],
+ },
+ {
+ name => 'templates and linked clone',
+ steps => [
+ ['qm', 'template', $vmid],
+ ['qm', 'clone', $vmid, $vmid_linked_clone],
+ ['qm', 'start', $vmid_linked_clone],
+ ['qm', 'stop', $vmid_linked_clone],
+ ],
+ },
+ {
+ name => 'start linked clone with krbd',
+ preparations => [
+ ['pvesm', 'set', $storage_name, '--krbd', 1]
+ ],
+ steps => [
+ ['qm', 'start', $vmid_linked_clone],
+ ['qm', 'stop', $vmid_linked_clone],
+ ],
+ },
+];
+
+sub run_prep_cleanup {
+ my ($cmds) = @_;
+
+ for (@$cmds) {
+ print join(' ', @$_). "\n";
+ run_cmd($_);
+ }
+}
+
+sub run_steps {
+ my ($steps) = @_;
+
+ for (@$steps) {
+ ok(run_test_cmd($_), join(' ', @$_));
+ }
+}
+
+sub run_tests {
+ print "Running tests:\n";
+
+ my $num_tests = 0;
+ for (@$tests) {
+ $num_tests += scalar(@{$_->{steps}}) if defined $_->{steps};
+ }
+
+ print("Tests: $num_tests\n");
+ plan tests => $num_tests;
+
+ for my $test (@$tests) {
+ print "Section: $test->{name}\n";
+ run_prep_cleanup($test->{preparations}) if defined $test->{preparations};
+ run_steps($test->{steps}) if defined $test->{steps};
+ run_prep_cleanup($test->{cleanup}) if defined $test->{cleanup};
+ }
+
+ done_testing();
+
+ if (Test::More->builder->is_passing()) {
+ cleanup();
+ }
+}
+
+if ($cleanup) {
+ cleanup();
+} else {
+ prepare();
+ run_tests();
+}
+
--
2.20.1
next prev parent reply other threads:[~2021-04-07 14:22 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-04-07 14:22 [pve-devel] [PATCH v2 storage 0/3] ceph: add namespace support Aaron Lauterer
2021-04-07 14:22 ` [pve-devel] [PATCH v2 storage 1/3] rbd: centralize rbd path concatenation Aaron Lauterer
2021-04-12 12:39 ` [pve-devel] applied: " Thomas Lamprecht
2021-04-07 14:22 ` [pve-devel] [PATCH v2 storage 2/3] rbd: fix #3286 add namespace support Aaron Lauterer
2021-04-12 12:41 ` [pve-devel] applied: " Thomas Lamprecht
2021-04-07 14:22 ` Aaron Lauterer [this message]
2021-04-12 12:48 ` [pve-devel] applied: [PATCH v2 storage 3/3] rbd: add integration test for namespace handling Thomas Lamprecht
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210407142218.29156-4-a.lauterer@proxmox.com \
--to=a.lauterer@proxmox.com \
--cc=pve-devel@lists.proxmox.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.
Service provided by Proxmox Server Solutions GmbH | Privacy | Legal