From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [212.224.123.68]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits)) (No client certificate requested) by lists.proxmox.com (Postfix) with ESMTPS id EFD2F6D6D6 for ; Thu, 1 Apr 2021 16:35:20 +0200 (CEST) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id DD0D61E002 for ; Thu, 1 Apr 2021 16:34:50 +0200 (CEST) Received: from proxmox-new.maurer-it.com (proxmox-new.maurer-it.com [212.186.127.180]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits) server-digest SHA256) (No client certificate requested) by firstgate.proxmox.com (Proxmox) with ESMTPS id C5C601DFF5 for ; Thu, 1 Apr 2021 16:34:49 +0200 (CEST) Received: from proxmox-new.maurer-it.com (localhost.localdomain [127.0.0.1]) by proxmox-new.maurer-it.com (Proxmox) with ESMTP id 953D4439CD for ; Thu, 1 Apr 2021 16:34:49 +0200 (CEST) Message-ID: Date: Thu, 1 Apr 2021 16:34:47 +0200 MIME-Version: 1.0 User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:88.0) Gecko/20100101 Thunderbird/88.0 Content-Language: en-US To: Proxmox VE development discussion , Aaron Lauterer References: <20210401134057.26305-1-a.lauterer@proxmox.com> <20210401134057.26305-2-a.lauterer@proxmox.com> From: Thomas Lamprecht In-Reply-To: <20210401134057.26305-2-a.lauterer@proxmox.com> Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 7bit X-SPAM-LEVEL: Spam detection results: 0 AWL -0.044 Adjusted score from AWL reputation of From: address KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment NICE_REPLY_A -0.001 Looks like a legit reply (A) RCVD_IN_DNSWL_MED -2.3 Sender listed at https://www.dnswl.org/, medium trust SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record Subject: Re: [pve-devel] [PATCH storage 2/2] rbd: add integration test for namespace handling X-BeenThere: pve-devel@lists.proxmox.com X-Mailman-Version: 2.1.29 Precedence: list List-Id: Proxmox VE development discussion List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Thu, 01 Apr 2021 14:35:21 -0000 On 01.04.21 15:40, Aaron Lauterer wrote: > This test is intended to be run on a hyperconverged PVE cluster to test > the most common operations of VMs using a namespaced Ceph RBD pool. > > Signed-off-by: Aaron Lauterer > --- > > Could probably be done nicer here and there but it worked (except for > the sometimes racy rollback ...) and should help to test if anything > broke in newer Ceph versions. in general surely nice to have and thanks for tackling this, some comments inline > > test/rbd_namespace.pl | 226 ++++++++++++++++++++++++++++++++++++++++++ > 1 file changed, 226 insertions(+) > create mode 100755 test/rbd_namespace.pl > > diff --git a/test/rbd_namespace.pl b/test/rbd_namespace.pl > new file mode 100755 > index 0000000..b8a5532 > --- /dev/null > +++ b/test/rbd_namespace.pl > @@ -0,0 +1,226 @@ > +#!/usr/bin/perl > + > +# This script is meant to be run manually on hyperconverged PVE server with a > +# Ceph cluster. It tests how PVE handles RBD namespaces. > +# > +# The pool (default: rbd) must already exist. The namespace and VMs will be > +# created. > +# > +# Parameters like names for the pool an namespace and the VMID can be > +# configured. The VMIDs for the clones is $vmid -1 and $vmid -2. > +# > +# Cleanup is done after a successful run. Cleanup can also be called manually. > +# > +# Known issues: > +# > +# * Snapshot rollback can sometimes be racy with stopping the VM and Ceph > +# recognizing that the disk image is not in use anymore. > + > +use strict; > +use warnings; > + > +use Test::More; > +use Getopt::Long; > +use PVE::Tools qw(run_command); nit: group use staements by perl-modules and proxmox-modules, so above should be below > +use Data::Dumper; avoid Dumper, it's a hog and actually not required if one has ... > +use JSON; .. json, I often just add a json print (jp) helper with wraps to_json with the correct settings. Also it's nicer to just have a global $DEBUG flag to switch such prints on/off centraly. E.g.: sub jp { return if !$DEBUG; print to_json($_[0], { utf8 => 1, pretty => 1, canonical => 1} ) ."\n"; } > + > +my $pool = "rbd"; > +my $namespace = "testspace"; > +my $showhelp = ''; > +my $vmid = 999999; > +my $cleanup = undef; > + > +my $helpstring = "To override default values, set them as named parameters: > + > +--pool pool name, default: ${pool} > +--namespace rbd namespace, default: ${namespace} > +--vmid VMID of the test VM, default: ${vmid} > +--cleanup Remove the storage definitions, namespaces and VMs\n"; > + > +GetOptions ( > + "pool=s" => \$pool, > + "namespace=s" => \$namespace, > + "vmid=i" => \$vmid, > + "help" => \$showhelp, > + "cleanup" => \$cleanup, > +) or die ($helpstring); > + > +die $helpstring if $showhelp; > + > +my $storage_name_rbd = "${pool}-${namespace}-rbd"; > + > +my $vmid_clone = int($vmid) - 1; > +my $vmid_linked_clone = int($vmid) - 2; > + > +sub run_cmd { > + my ($cmd, $json, $ignore_errors) = @_; > + > + my $raw = ''; > + my $parser = sub {$raw .= shift;}; > + > + eval { > + run_command($cmd, outfunc => $parser); > + }; > + if (my $err = $@) { > + die $err if !$ignore_errors; > + } > + > + if ($json) { > + my $result; > + if ($raw eq '') { > + $result = []; > + } elsif ($raw =~ m/^(\[.*\])$/s) { # untaint > + $result = JSON::decode_json($1); > + } else { > + die "got unexpected data from command: '$cmd' -> '$raw'\n"; > + } > + return $result; > + } > + return $raw; > +} > + > +sub run_test_cmd { > + my ($cmd) = @_; > + system($cmd); > + my $err = $? >> 8; > + > + return 1 if $err == 0; > + return 0; > +} > + > +sub prepare { > + my $pools = run_cmd("/usr/bin/ceph osd pool ls --format json", 1); > + > + my %poolnames = map {$_ => 1} @$pools; > + die "Pool '$pool' does not exist!\n" if !exists($poolnames{$pool}); why not `pveceph pool create ` ? Actually I'd expect that the default is some random name, gets created here and cleaned up at the end of this script (at least when we created it) > + > + my $namespaces = run_cmd("/usr/bin/rbd -p ${pool} namespace ls --format json", 1); > + my $ns_found = 0; > + for my $i (@$namespaces) { > + #print Dumper $i; > + $ns_found = 1 if $i->{name} eq $namespace; > + } > + > + if (!$ns_found) { > + print "Create namespace '${namespace}' in pool '${pool}'\n"; > + run_cmd("/usr/bin/rbd namespace create ${pool}/${namespace}"); > + } > + > + my $storages = run_cmd("/usr/bin/pvesh get storage --output-format json", 1); avoid fixed paths for stuff in /(usr/)?s?bin Also, please use arrays for commands also in test scripts - I do not want any (potential later added) $param to shell inject anything in a test either. > + #print Dumper $storages; > + my $rbd_found = 0; > + my $pool_found = 0; > + > + print "Create storage definition\n"; > + for my $stor (@$storages) { > + $pool_found = 1 if $stor->{storage} eq $pool; > + $rbd_found = 1 if $stor->{storage} eq $storage_name_rbd; > + > + if ($rbd_found) { > + run_cmd("/usr/sbin/pvesm set ${storage_name_rbd} --krbd 0"); > + die "Enable the storage '$stor->{storage}'!" if $stor->{disable}; > + } > + } > + die "No storage for pool '${pool}' found! Must have same name as pool!\n" same here, auto-create (and destory) it in that case? Then this could be more easily run in a (planned) buildbot-triggered cluster test run. > + if !$pool_found; > + # create PVE storages (librbd / krbd) > + run_cmd("/usr/sbin/pvesm add rbd ${storage_name_rbd} --krbd 0 --pool ${pool} --namespace ${namespace} --content images,rootdir") > + if !$rbd_found; > + > + > + # create test VM > + print "Create test VM ${vmid}\n"; > + my $vms = run_cmd("/usr/bin/pvesh get cluster/resources --type vm --output-format json", 1); > + for my $vm (@$vms) { > + # TODO: introduce a force flag to make this behaviour configurable > + > + if ($vm->{vmid} eq $vmid) { > + print "Test VM '${vmid}' already exists. It will be removed and recreated!\n"; > + run_cmd("/usr/sbin/qm stop ${vmid}", 0, 1); > + run_cmd("/usr/sbin/qm destroy ${vmid}"); > + } > + } > + run_cmd("/usr/sbin/qm create ${vmid} --bios ovmf --efidisk0 ${storage_name_rbd}:1 --scsi0 ${storage_name_rbd}:2"); > +} > + > + > +sub cleanup { > + print "Cleaning up test environment!\n"; > + print "Removing VMs\n"; > + run_cmd("/usr/sbin/qm stop ${vmid}", 0, 1); same again for above and below run_cmd, arrays and no absolute path please. > + run_cmd("/usr/sbin/qm stop ${vmid_linked_clone}", 0, 1); > + run_cmd("/usr/sbin/qm stop ${vmid_clone}", 0, 1); > + run_cmd("/usr/sbin/qm destroy ${vmid_linked_clone}", 0, 1); > + run_cmd("/usr/sbin/qm destroy ${vmid_clone}", 0, 1); > + run_cmd("for i in /dev/rbd/${pool}/${namespace}/*; do /usr/bin/rbd unmap \$i; done", 0, 1); > + run_cmd("/usr/sbin/qm unlock ${vmid}", 0, 1); > + run_cmd("/usr/sbin/qm destroy ${vmid}", 0, 1); > + > + print "Removing Storage definition for ${storage_name_rbd}\n"; > + run_cmd("/usr/sbin/pvesm remove ${storage_name_rbd}", 0, 1); > + > + print "Removing RBD namespace '${pool}/${namespace}'\n"; > + run_cmd("/usr/bin/rbd namespace remove ${pool}/${namespace}", 0, 1); > +} > + > +sub run_tests { > + my @tests = ( > + {name => "Start VM", cmd => "/usr/sbin/qm start ${vmid}"}, Please define the tests out of the sub and use a less flat notation. As those are quite order dependent I would call the strucure also "steps"? Or have two-levels, I'd actually omit the single command names and log the executed command in run_cmd, it actually has the info encoded in the name already. my $tests = [ { name => "snapshot/rollback", steps => [ ['qm', 'snapshot', $vmid, 'test_snap' ], ['qm', 'rollback', $vmid 'test_snap' ], ], cleanup => [ 'qm', 'unlock', $vmid ], # that whats now marked as "maintenance" }, # ... ]; > + {name => "Snapshot VM", cmd => "/usr/sbin/qm snapshot ${vmid} test"}, > + {name => "Rollback VM to snapshot", cmd => "/usr/sbin/qm rollback ${vmid} test"}, > + {name => "Remove lock", cmd => "/usr/sbin/qm unlock ${vmid}", maintenance => 1}, > + {name => "Remove VM Snapshot", cmd => "/usr/sbin/qm delsnapshot ${vmid} test"}, > + {name => "Move Disk to pool w/o namespace", cmd =>"/usr/sbin/qm move_disk ${vmid} scsi0 ${pool} --delete 1"}, > + {name => "Move Disk to pool w/ namespace", cmd =>"/usr/sbin/qm move_disk ${vmid} scsi0 ${storage_name_rbd} --delete 1"}, > + {name => "Stop VM", cmd =>"/usr/sbin/qm stop ${vmid}", maintenance => 1}, > + {name => "Change to krbd", cmd =>"/usr/sbin/pvesm set ${storage_name_rbd} --krbd 1", maintenance => 1}, > + {name => "Start VM w/ krbd", cmd => "/usr/sbin/qm start ${vmid}"}, > + {name => "Move Disk to pool w/o namespace w/ krbd", cmd =>"/usr/sbin/qm move_disk ${vmid} scsi0 ${pool} --delete 1"}, > + {name => "Move Disk to pool w/ namespace w/ krbd", cmd =>"/usr/sbin/qm move_disk ${vmid} scsi0 ${storage_name_rbd} --delete 1"}, > + {name => "Snapshot VM w/ krbd", cmd => "/usr/sbin/qm snapshot ${vmid} test"}, > + {name => "Rollback VM to snapshot w/ krbd", cmd => "/usr/sbin/qm rollback ${vmid} test"}, > + {name => "Remove VM Snapshot w/ krbd", cmd => "/usr/sbin/qm delsnapshot ${vmid} test"}, > + {name => "Clone VM w/ krbd", cmd => "/usr/sbin/qm clone ${vmid} ${vmid_clone}"}, > + {name => "Stop VM", cmd =>"/usr/sbin/qm stop ${vmid}", maintenace => 1}, > + {name => "Change to non krbd", cmd =>"/usr/sbin/pvesm set ${storage_name_rbd} --krbd 0", maintenance => 1}, > + {name => "Convert to template", cmd =>"/usr/sbin/qm template ${vmid}"}, > + {name => "Create linked clone", cmd =>"/usr/sbin/qm clone ${vmid} ${vmid_linked_clone}"}, > + {name => "Start linked clone", cmd =>"/usr/sbin/qm start ${vmid_linked_clone}"}, > + {name => "Stop linked clone", cmd =>"/usr/sbin/qm stop ${vmid_linked_clone}"}, > + {name => "Change to krbd", cmd =>"/usr/sbin/pvesm set ${storage_name_rbd} --krbd 1", maintenance => 1}, > + {name => "Start linked clone w/ krbd", cmd =>"/usr/sbin/qm start ${vmid_linked_clone}"}, > + {name => "Stop linked clone w/ krbd", cmd =>"/usr/sbin/qm stop ${vmid_linked_clone}"}, > + ); > + print "Running tests:\n"; > + > + my $num_tests = 0; > + for (@tests) { > + $num_tests +=1 if !defined $_->{maintenance}; > + } > + > + plan tests => $num_tests; > + > + for my $test (@tests) { > + print "$test->{name}\n"; > + if ($test->{maintenance}) { > + run_cmd($test->{cmd}); > + } else { > + ok(run_test_cmd($test->{cmd}), $test->{name}); > + } > + } > + > + done_testing(); > + > + if (Test::More->builder->is_passing()) { > + cleanup(); > + } > +} > + > +if ($cleanup) { > + cleanup(); > +} else { > + prepare(); > + run_tests(); > +} > + >