all lists on lists.proxmox.com
 help / color / mirror / Atom feed
From: Aaron Lauterer <a.lauterer@proxmox.com>
To: pve-devel@lists.proxmox.com
Subject: [pve-devel] [PATCH container 7/7] api: move-volume: add move to another container
Date: Tue,  1 Jun 2021 18:10:25 +0200	[thread overview]
Message-ID: <20210601161025.32024-8-a.lauterer@proxmox.com> (raw)
In-Reply-To: <20210601161025.32024-1-a.lauterer@proxmox.com>

The goal of this is to expand the move-volume API endpoint to make it
possible to move a container volume / mountpoint to another container.

Currently it works for regular mountpoints though it would be nice to be
able to do it for unused mounpoints as well.

Signed-off-by: Aaron Lauterer <a.lauterer@proxmox.com>
---
This is mostly the code from qemu-server with some adaptions. Mainly
error messages and some checks.

Previous checks have been moved to '$move_to_storage_checks'.


 src/PVE/API2/LXC.pm | 229 ++++++++++++++++++++++++++++++++++++++++----
 src/PVE/CLI/pct.pm  |   2 +-
 2 files changed, 209 insertions(+), 22 deletions(-)

diff --git a/src/PVE/API2/LXC.pm b/src/PVE/API2/LXC.pm
index 4965f5d..373dff1 100644
--- a/src/PVE/API2/LXC.pm
+++ b/src/PVE/API2/LXC.pm
@@ -27,6 +27,8 @@ use PVE::API2::LXC::Snapshot;
 use PVE::JSONSchema qw(get_standard_option);
 use base qw(PVE::RESTHandler);
 
 BEGIN {
     if (!$ENV{PVE_GENERATING_DOCS}) {
 	require PVE::HA::Env::PVE2;
@@ -1750,10 +1752,12 @@ __PACKAGE__->register_method({
     method => 'POST',
     protected => 1,
     proxyto => 'node',
-    description => "Move a rootfs-/mp-volume to a different storage",
+    description => "Move a rootfs-/mp-volume to a different storage or to a different container.",
     permissions => {
 	description => "You need 'VM.Config.Disk' permissions on /vms/{vmid}, " .
-	    "and 'Datastore.AllocateSpace' permissions on the storage.",
+	    "and 'Datastore.AllocateSpace' permissions on the storage. To move ".
+	    "a volume to another container, you need the permissions on the ".
+	    "target container as well.",
 	check =>
 	[ 'and',
 	  ['perm', '/vms/{vmid}', [ 'VM.Config.Disk' ]],
@@ -1765,14 +1769,20 @@ __PACKAGE__->register_method({
 	properties => {
 	    node => get_standard_option('pve-node'),
 	    vmid => get_standard_option('pve-vmid', { completion => \&PVE::LXC::complete_ctid }),
+	    'target-vmid' => get_standard_option('pve-vmid', {
+		completion => \&PVE::LXC::complete_ctid,
+		optional => 1,
+	    }),
 	    volume => {
 		type => 'string',
+		#TODO: check how to handle unused mount points as the mp parameter is not configured
 		enum => [ PVE::LXC::Config->valid_volume_keys() ],
 		description => "Volume which will be moved.",
 	    },
 	    storage => get_standard_option('pve-storage-id', {
 		description => "Target Storage.",
 		completion => \&PVE::Storage::complete_storage_enabled,
+		optional => 1,
 	    }),
 	    delete => {
 		type => 'boolean',
@@ -1793,6 +1803,18 @@ __PACKAGE__->register_method({
 		minimum => '0',
 		default => 'clone limit from datacenter or storage config',
 	    },
+	    'target-mp' => {
+	        type => 'string',
+		description => "The config key the mp will be moved to.",
+		enum => [PVE::LXC::Config->valid_volume_keys()],
+		optional => 1,
+	    },
+	    'target-digest' => {
+		type => 'string',
+		description => 'Prevent changes if current configuration file of the target container has a different SHA1 digest. This can be used to prevent concurrent modifications.',
+		maxLength => 40,
+		optional => 1,
+	    },
 	},
     },
     returns => {
@@ -1807,32 +1829,49 @@ __PACKAGE__->register_method({
 
 	my $vmid = extract_param($param, 'vmid');
 
+	my $target_vmid = extract_param($param, 'target-vmid');
+
 	my $storage = extract_param($param, 'storage');
 
 	my $mpkey = extract_param($param, 'volume');
 
+	my $target_mp = extract_param($param, 'target-mp');
+
+	my $digest = extract_param($param, 'digest');
+
+	my $target_digest = extract_param($param, 'target-digest');
+
 	my $lockname = 'disk';
 
 	my ($mpdata, $old_volid);
 
-	PVE::LXC::Config->lock_config($vmid, sub {
-	    my $conf = PVE::LXC::Config->load_config($vmid);
-	    PVE::LXC::Config->check_lock($conf);
+	die "either set storage or target-vmid, but not both\n"
+	    if $storage && $target_vmid;
 
-	    die "cannot move volumes of a running container\n" if PVE::LXC::check_running($vmid);
+	die "cannot move volumes of a running container\n" if PVE::LXC::check_running($vmid);
 
-	    $mpdata = PVE::LXC::Config->parse_volume($mpkey, $conf->{$mpkey});
-	    $old_volid = $mpdata->{volume};
+	my $storecfg = PVE::Storage::config();
+	my $source_volid;
 
-	    die "you can't move a volume with snapshots and delete the source\n"
-		if $param->{delete} && PVE::LXC::Config->is_volume_in_use_by_snapshots($conf, $old_volid);
+	my $move_to_storage_checks = sub {
+	    PVE::LXC::Config->lock_config($vmid, sub {
+		my $conf = PVE::LXC::Config->load_config($vmid);
+		PVE::LXC::Config->check_lock($conf);
 
-	    PVE::Tools::assert_if_modified($param->{digest}, $conf->{digest});
 
-	    PVE::LXC::Config->set_lock($vmid, $lockname);
-	});
+		$mpdata = PVE::LXC::Config->parse_volume($mpkey, $conf->{$mpkey});
+		$old_volid = $mpdata->{volume};
 
-	my $realcmd = sub {
+		die "you can't move a volume with snapshots and delete the source\n"
+		    if $param->{delete} && PVE::LXC::Config->is_volume_in_use_by_snapshots($conf, $old_volid);
+
+		PVE::Tools::assert_if_modified($digest, $conf->{digest});
+
+		PVE::LXC::Config->set_lock($vmid, $lockname);
+	    });
+	};
+
+	my $storage_realcmd = sub {
 	    eval {
 		PVE::Cluster::log_msg('info', $authuser, "move volume CT $vmid: move --volume $mpkey --storage $storage");
 
@@ -1902,15 +1941,163 @@ __PACKAGE__->register_method({
 	    warn $@ if $@;
 	    die $err if $err;
 	};
-	my $task = eval {
-	    $rpcenv->fork_worker('move_volume', $vmid, $authuser, $realcmd);
+
+	my $load_and_check_reassign_configs = sub {
+	    my $vmlist = PVE::Cluster::get_vmlist()->{ids};
+	    die "Both containers need to be on the same node ($vmlist->{$vmid}->{node}) but target continer is on $vmlist->{$target_vmid}->{node}.\n"
+		if $vmlist->{$vmid}->{node} ne $vmlist->{$target_vmid}->{node};
+
+	    my $source_conf = PVE::LXC::Config->load_config($vmid);
+	    PVE::LXC::Config->check_lock($source_conf);
+	    my $target_conf = PVE::LXC::Config->load_config($target_vmid);
+	    PVE::LXC::Config->check_lock($target_conf);
+
+	    die "Can't move volumes from or to template VMs\n"
+		if ($source_conf->{template} || $target_conf->{template});
+
+	    if ($digest) {
+		eval { PVE::Tools::assert_if_modified($digest, $source_conf->{digest}) };
+		if (my $err = $@) {
+		    die "Container ${vmid}: ${err}";
+		}
+	    }
+
+	    if ($target_digest) {
+		eval { PVE::Tools::assert_if_modified($target_digest, $target_conf->{digest}) };
+		if (my $err = $@) {
+		    die "Container ${target_vmid}: ${err}";
+		}
+	    }
+
+	    die "volume '${mpkey}' for container '$vmid' does not exist\n"
+		if !defined($source_conf->{$mpkey});
+
+	    die "Target volume key '${target_mp}' is already in use for container '$target_vmid'\n"
+		if exists $target_conf->{$target_mp};
+
+	    my $drive = PVE::LXC::Config->parse_volume(
+		$mpkey,
+		$source_conf->{$mpkey},
+	    );
+
+	    $source_volid = $drive->{volume};
+
+	    die "disk '${mpkey}' has no associated volume\n"
+		if !$source_volid;
+
+	    die "Storage does not support moving of this disk to another container\n"
+		if !PVE::Storage::volume_has_feature(
+		    $storecfg,
+		    'rename',
+		    $source_volid,
+		);
+
+	    die "Cannot move a bindmound or device mount to another container\n"
+		if PVE::LXC::Config->classify_mountpoint($source_volid) ne "volume";
+	    die "Cannot move volume to another container while the source container is running\n"
+		if PVE::LXC::check_running($vmid)
+		    && $mpkey !~ m/^unused\d+$/;
+
+	    return ($source_conf, $target_conf);
+	};
+
+	my $logfunc = sub {
+	    my ($msg) = @_;
+	    print STDERR "$msg\n";
 	};
-	if (my $err = $@) {
-	    eval { PVE::LXC::Config->remove_lock($vmid, $lockname) };
-	    warn $@ if $@;
-	    die $err;
+
+	my $volume_reassignfn = sub {
+	    return PVE::LXC::Config->lock_config($vmid, sub {
+		return PVE::LXC::Config->lock_config($target_vmid, sub {
+		    my ($source_conf, $target_conf) = &$load_and_check_reassign_configs();
+
+		    my $drive_param = PVE::LXC::Config->parse_volume(
+			$target_mp,
+			$source_conf->{$mpkey},
+		    );
+
+		    print "moving volume '$mpkey' from container '$vmid' to '$target_vmid'\n";
+		    my ($storage, undef) = PVE::Storage::parse_volume_id($source_volid);
+
+		    my $format = PVE::Storage::storage_default_format($storecfg, $storage);
+
+		    my $target_volname = PVE::Storage::find_free_diskname($storecfg, $storage, $target_vmid, $format);
+
+		    my $new_volid = PVE::Storage::rename_volume(
+			$storecfg,
+			$source_volid,
+			$vmid,
+			$target_volname,
+			$target_vmid,
+		    );
+
+		    $drive_param->{volume} = $new_volid;
+
+		    delete $source_conf->{$mpkey};
+		    print "removing volume '${mpkey}' from container '${vmid}' config\n";
+		    print "$new_volid\n";
+		    PVE::LXC::Config->write_config($vmid, $source_conf);
+
+		    my $drive_string = PVE::LXC::Config->print_volume($target_mp, $drive_param);
+		    my $running = PVE::LXC::check_running($target_vmid);
+		    my $param = { $target_mp => $drive_string };
+
+		    my $err = Dumper(PVE::LXC::Config->update_pct_config($target_vmid, $target_conf, $running, $param));
+
+		    PVE::LXC::Config->write_config($target_vmid, $target_conf);
+		    $target_conf = PVE::LXC::Config->load_config($target_vmid);
+
+		    PVE::LXC::update_lxc_config($target_vmid, $target_conf);
+		    print "target container '$target_vmid' updated with '$target_mp'\n";
+
+		    # remove possible replication snapshots
+		    if (PVE::Storage::volume_has_feature(
+			    $storecfg,
+			    'replicate',
+			    $source_volid),
+		    ) {
+			eval {
+			    PVE::Replication::prepare(
+				$storecfg,
+				[$new_volid],
+				undef,
+				1,
+				undef,
+				$logfunc,
+			    )
+			};
+			if (my $err = $@) {
+			    print "Failed to remove replication snapshots on volume '$target_mp'. Manual cleanup could be necessary.\n";
+			}
+		    }
+		});
+	    });
+	};
+
+	if ($target_vmid) {
+	    $rpcenv->check_vm_perm($authuser, $target_vmid, undef, ['VM.Config.Disk'])
+		if $authuser ne 'root@pam';
+
+	    die "Moving a volume to the same container is not possible. Did you mean to move the volume to a different storage?\n"
+		if $vmid eq $target_vmid;
+	    &$load_and_check_reassign_configs();
+	    return $rpcenv->fork_worker('move_volume', "${vmid}-${mpkey}>${target_vmid}-${target_mp}", $authuser, $volume_reassignfn);
+	} elsif ($storage) {
+	    &$move_to_storage_checks();
+	    my $task = eval {
+		$rpcenv->fork_worker('move_volume', $vmid, $authuser, $storage_realcmd);
+	    };
+	    if (my $err = $@) {
+		eval { PVE::LXC::Config->remove_lock($vmid, $lockname) };
+		warn $@ if $@;
+		die $err;
+	    }
+	    return $task;
+	} else {
+	    die "Provide either a 'storage' to move the mount point to a ".
+		"different storage or 'target-vmid' and 'target-mp' to move ".
+		"the mount point to another container\n";
 	}
-	return $task;
   }});
 
 __PACKAGE__->register_method({
diff --git a/src/PVE/CLI/pct.pm b/src/PVE/CLI/pct.pm
index 0db23a4..de2d4e2 100755
--- a/src/PVE/CLI/pct.pm
+++ b/src/PVE/CLI/pct.pm
@@ -854,7 +854,7 @@ our $cmddef = {
 
     clone => [ "PVE::API2::LXC", 'clone_vm', ['vmid', 'newid'], { node => $nodename }, $upid_exit ],
     migrate => [ "PVE::API2::LXC", 'migrate_vm', ['vmid', 'target'], { node => $nodename }, $upid_exit],
-    'move-volume' => [ "PVE::API2::LXC", 'move_volume', ['vmid', 'volume', 'storage'], { node => $nodename }, $upid_exit ],
+    'move-volume' => [ "PVE::API2::LXC", 'move_volume', ['vmid', 'volume', 'storage', 'target-vmid', 'target-mp'], { node => $nodename }, $upid_exit ],
     move_volume => { alias => 'move-disk' },
 
     snapshot => [ "PVE::API2::LXC::Snapshot", 'snapshot', ['vmid', 'snapname'], { node => $nodename } , $upid_exit ],
-- 
2.20.1





      parent reply	other threads:[~2021-06-01 16:11 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-06-01 16:10 [pve-devel] [RFC series 0/7] move disk or volume to other guests Aaron Lauterer
2021-06-01 16:10 ` [pve-devel] [RFC storage 1/7] storage: expose find_free_diskname Aaron Lauterer
2021-06-02  8:29   ` Fabian Ebner
2021-07-02 13:38     ` Aaron Lauterer
2021-07-05  7:58       ` Fabian Ebner
2021-06-01 16:10 ` [pve-devel] [RFC storage 2/7] add disk rename feature Aaron Lauterer
2021-06-02  8:36   ` Fabian Ebner
2021-06-09 14:20     ` Aaron Lauterer
2021-06-01 16:10 ` [pve-devel] [RFC qemu-server 3/7] cli: qm: change move_disk to move-disk Aaron Lauterer
2021-06-01 16:10 ` [pve-devel] [RFC qemu-server 4/7] Drive: add valid_drive_names_with_unused Aaron Lauterer
2021-06-01 16:10 ` [pve-devel] [RFC qemu-server 5/7] api: move-disk: add move to other VM Aaron Lauterer
2021-06-02  8:52   ` Fabian Ebner
2021-06-01 16:10 ` [pve-devel] [PATCH container 6/7] cli: pct: change move_volume to move-volume Aaron Lauterer
2021-06-01 16:10 ` Aaron Lauterer [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210601161025.32024-8-a.lauterer@proxmox.com \
    --to=a.lauterer@proxmox.com \
    --cc=pve-devel@lists.proxmox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.
Service provided by Proxmox Server Solutions GmbH | Privacy | Legal