From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [IPv6:2a01:7e0:0:424::9]) by lore.proxmox.com (Postfix) with ESMTPS id 456621FF163 for ; Thu, 21 Nov 2024 15:22:46 +0100 (CET) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id 96B2E1EF50; Thu, 21 Nov 2024 15:22:53 +0100 (CET) Date: Thu, 21 Nov 2024 15:22:47 +0100 From: Fabian =?iso-8859-1?q?Gr=FCnbichler?= To: Proxmox Backup Server development discussion References: <20241113150102.164820-1-h.laimer@proxmox.com> <20241113150102.164820-9-h.laimer@proxmox.com> In-Reply-To: <20241113150102.164820-9-h.laimer@proxmox.com> MIME-Version: 1.0 User-Agent: astroid/0.16.0 (https://github.com/astroidmail/astroid) Message-Id: <1732197970.jvav0hqb6h.astroid@yuna.none> X-SPAM-LEVEL: Spam detection results: 0 AWL 0.046 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment RCVD_IN_VALIDITY_CERTIFIED_BLOCKED 0.001 ADMINISTRATOR NOTICE: The query to Validity was blocked. See https://knowledge.validity.com/hc/en-us/articles/20961730681243 for more information. RCVD_IN_VALIDITY_RPBL_BLOCKED 0.001 ADMINISTRATOR NOTICE: The query to Validity was blocked. See https://knowledge.validity.com/hc/en-us/articles/20961730681243 for more information. RCVD_IN_VALIDITY_SAFE_BLOCKED 0.001 ADMINISTRATOR NOTICE: The query to Validity was blocked. See https://knowledge.validity.com/hc/en-us/articles/20961730681243 for more information. SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record Subject: Re: [pbs-devel] [PATCH proxmox-backup v13 08/26] api: removable datastore creation X-BeenThere: pbs-devel@lists.proxmox.com X-Mailman-Version: 2.1.29 Precedence: list List-Id: Proxmox Backup Server development discussion List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: Proxmox Backup Server development discussion Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Errors-To: pbs-devel-bounces@lists.proxmox.com Sender: "pbs-devel" On November 13, 2024 4:00 pm, Hannes Laimer wrote: > Devices can contains multiple datastores, the only limitations is that > they are not allowed to be nested. > If the specified path already contains a datastore, `reuse datastore` has > to be set so it'll be added without creating a chunckstore. > > Signed-off-by: Hannes Laimer > --- > changes since v12: > * use recently added 'reuse datastore' > * allow creation even if device is already used by datastore, just no > nesting > > src/api2/config/datastore.rs | 50 +++++++++++++++++++++++++++++++----- > 1 file changed, 44 insertions(+), 6 deletions(-) > > diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs > index 374c302f..9140a7a4 100644 > --- a/src/api2/config/datastore.rs > +++ b/src/api2/config/datastore.rs > @@ -20,7 +20,8 @@ use pbs_config::BackupLockGuard; > use pbs_datastore::chunk_store::ChunkStore; > > use crate::api2::admin::{ > - prune::list_prune_jobs, sync::list_sync_jobs, verify::list_verification_jobs, > + datastore::do_mount_device, prune::list_prune_jobs, sync::list_sync_jobs, > + verify::list_verification_jobs, > }; > use crate::api2::config::prune::{delete_prune_job, do_create_prune_job}; > use crate::api2::config::sync::delete_sync_job; > @@ -31,6 +32,7 @@ use pbs_config::CachedUserInfo; > use proxmox_rest_server::WorkerTask; > > use crate::server::jobstate; > +use crate::tools::disks::unmount_by_mountpoint; > > #[api( > input: { > @@ -72,7 +74,11 @@ pub(crate) fn do_create_datastore( > datastore: DataStoreConfig, > reuse_datastore: bool, > ) -> Result<(), Error> { > - let path: PathBuf = datastore.path.clone().into(); > + let path: PathBuf = datastore.absolute_path().into(); > + let need_unmount = datastore.get_mount_point().is_some() && { nit: would be easier to read as let need_unmount = ; if need_unmount {do_mount_device(..)?; } > + do_mount_device(datastore.clone())?; > + true > + }; > > if path.parent().is_none() { > bail!("cannot create datastore in root path"); this can fail (well, not really for a removable datastore), but also some parsing code between this > @@ -84,24 +90,32 @@ pub(crate) fn do_create_datastore( and this, and this repeats below as well.. it might be better to wrap most of the body after the mounting, check for any error, then do the cleanup/unmounting in one place? > )?; > > if reuse_datastore { > - ChunkStore::verify_chunkstore(&path)?; > + if let Err(e) = ChunkStore::verify_chunkstore(&path) { > + let _ = need_unmount && unmount_by_mountpoint(&path).is_ok(); > + return Err(e); > + } then this > } else { > if let Ok(dir) = std::fs::read_dir(&path) { > for file in dir { > let name = file?.file_name(); > if !name.to_str().map_or(false, |name| name.starts_with('.')) { > + let _ = need_unmount && unmount_by_mountpoint(&path).is_ok(); > bail!("datastore path is not empty"); and this > } > } > } > let backup_user = pbs_config::backup_user()?; > - let _store = ChunkStore::create( > + let res = ChunkStore::create( > &datastore.name, > - path, > + path.clone(), > backup_user.uid, > backup_user.gid, > tuning.sync_level.unwrap_or_default(), > - )?; > + ); > + if let Err(e) = res { > + let _ = need_unmount && unmount_by_mountpoint(&path).is_ok(); and this could all just return/bubble up the error, and the cleanup logic lives on call level higher.. > + return Err(e); > + } > } > > config.set_data(&datastore.name, "datastore", &datastore)?; > @@ -145,6 +159,30 @@ pub fn create_datastore( > param_bail!("name", "datastore '{}' already exists.", config.name); > } > > + if !config.path.starts_with("/") { > + param_bail!("path", "expected an abolute path, '{}' is not", config.path); > + } but the schema is now updated to allow relative paths for removable datastores? doesn't this need another condition to only apply for removable datastores? I guess this was only tested via the create_datastore_disk code path, which calls do_create_datastore directly, and not this API endpoint.. > + > + if let Some(uuid) = &config.backing_device { but this here should apply to all datastores? it causes GC confusion also for regular ones if they get nested.. and since this only affects attempts to create datastores, it should be okay to make it fatal? > + for (store_name, (_, store_config)) in §ion_config.sections { > + if let (Some(store_uuid), Some(store_path)) = ( > + store_config["backing-device"].as_str(), > + store_config["path"].as_str(), > + ) { > + // We don't allow two datastores to be nested in each other, so if > + // ds1: /a/b -> can't create new one at /, /a or /a/b/..., /a/c is fine > + if store_uuid == uuid > + && (store_path.starts_with(&config.path) || config.path.starts_with(store_path)) > + { > + param_bail!( > + "path", > + "can't nest datastores, '{store_name}' already in '{store_path}'", "nested datastores not allowed: " is a bit easier/nicer to read I think > + ); > + } > + }; > + } > + } > + > let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; > let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; > > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel@lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > > _______________________________________________ pbs-devel mailing list pbs-devel@lists.proxmox.com https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel