From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [212.224.123.68]) by lore.proxmox.com (Postfix) with ESMTPS id F3B531FF16F for ; Tue, 22 Jul 2025 12:10:53 +0200 (CEST) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id 3AD0735A58; Tue, 22 Jul 2025 12:11:56 +0200 (CEST) From: Christian Ebner To: pbs-devel@lists.proxmox.com Date: Tue, 22 Jul 2025 12:11:05 +0200 Message-ID: <20250722101106.526438-50-c.ebner@proxmox.com> X-Mailer: git-send-email 2.47.2 In-Reply-To: <20250722101106.526438-1-c.ebner@proxmox.com> References: <20250722101106.526438-1-c.ebner@proxmox.com> MIME-Version: 1.0 X-Bm-Milter-Handled: 55990f41-d878-4baa-be0a-ee34c49e34d2 X-Bm-Transport-Timestamp: 1753179089113 X-SPAM-LEVEL: Spam detection results: 0 AWL 0.046 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record Subject: [pbs-devel] [PATCH proxmox-backup v11 45/46] api/ui: add flag to allow overwriting in-use marker for s3 backend X-BeenThere: pbs-devel@lists.proxmox.com X-Mailman-Version: 2.1.29 Precedence: list List-Id: Proxmox Backup Server development discussion List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: Proxmox Backup Server development discussion Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Errors-To: pbs-devel-bounces@lists.proxmox.com Sender: "pbs-devel" Datastores backed by an s3 object store mark the corresponding bucket prefix given by the datastore name as in-use to protect from accidental reuse of the same datastore from other instances. If the datastore has to be re-created because the Proxmox Backup Server instance is no longer available, skipping the check and overwriting the marker with the current hostname is necessary. Expose this flag to the datastore create api endpoint and expose it to the web ui and cli command. Signed-off-by: Christian Ebner Reviewed-by: Lukas Wagner Reviewed-by: Hannes Laimer --- changes since version 10: - no changes src/api2/config/datastore.rs | 51 ++++++++++++++------- src/api2/node/disks/directory.rs | 2 +- src/api2/node/disks/zfs.rs | 2 +- src/bin/proxmox_backup_manager/datastore.rs | 6 +++ www/window/DataStoreEdit.js | 22 +++++++++ 5 files changed, 64 insertions(+), 19 deletions(-) diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs index 969beb14d..8abed7e32 100644 --- a/src/api2/config/datastore.rs +++ b/src/api2/config/datastore.rs @@ -113,6 +113,7 @@ pub(crate) fn do_create_datastore( mut config: SectionConfigData, datastore: DataStoreConfig, reuse_datastore: bool, + overwrite_in_use: bool, ) -> Result<(), Error> { let path: PathBuf = datastore.absolute_path().into(); @@ -156,21 +157,24 @@ pub(crate) fn do_create_datastore( proxmox_async::runtime::block_on(s3_client.head_bucket()) .context("failed to access bucket")?; - let object_key = S3ObjectKey::try_from(S3_DATASTORE_IN_USE_MARKER) - .context("failed to generate s3 object key")?; - if let Some(response) = - proxmox_async::runtime::block_on(s3_client.get_object(object_key.clone())) - .context("failed to get in-use marker from bucket")? - { - let content = proxmox_async::runtime::block_on(response.content.collect()) - .unwrap_or_default(); - let content = - String::from_utf8(content.to_bytes().to_vec()).unwrap_or_default(); - let in_use: InUseContent = serde_json::from_str(&content).unwrap_or_default(); - if let Some(hostname) = in_use.hostname { - bail!("Bucket already contains datastore in use by host {hostname}"); - } else { - bail!("Bucket already contains datastore in use"); + if !overwrite_in_use { + let object_key = S3ObjectKey::try_from(S3_DATASTORE_IN_USE_MARKER) + .context("failed to generate s3 object key")?; + if let Some(response) = + proxmox_async::runtime::block_on(s3_client.get_object(object_key.clone())) + .context("failed to get in-use marker from bucket")? + { + let content = proxmox_async::runtime::block_on(response.content.collect()) + .unwrap_or_default(); + let content = + String::from_utf8(content.to_bytes().to_vec()).unwrap_or_default(); + let in_use: InUseContent = + serde_json::from_str(&content).unwrap_or_default(); + if let Some(hostname) = in_use.hostname { + bail!("Bucket already contains datastore in use by host {hostname}"); + } else { + bail!("Bucket already contains datastore in use"); + } } } backend_s3_client = Some(Arc::new(s3_client)); @@ -263,7 +267,13 @@ pub(crate) fn do_create_datastore( optional: true, default: false, description: "Re-use existing datastore directory." - } + }, + "overwrite-in-use": { + type: Boolean, + optional: true, + default: false, + description: "Overwrite in use marker (S3 backed datastores only)." + }, }, }, access: { @@ -275,6 +285,7 @@ pub(crate) fn do_create_datastore( pub fn create_datastore( config: DataStoreConfig, reuse_datastore: bool, + overwrite_in_use: bool, rpcenv: &mut dyn RpcEnvironment, ) -> Result { let lock = pbs_config::datastore::lock_config()?; @@ -343,7 +354,13 @@ pub fn create_datastore( auth_id.to_string(), to_stdout, move |_worker| { - do_create_datastore(lock, section_config, config, reuse_datastore)?; + do_create_datastore( + lock, + section_config, + config, + reuse_datastore, + overwrite_in_use, + )?; if let Some(prune_job_config) = prune_job_config { do_create_prune_job(prune_job_config)?; diff --git a/src/api2/node/disks/directory.rs b/src/api2/node/disks/directory.rs index 62f463437..74819079c 100644 --- a/src/api2/node/disks/directory.rs +++ b/src/api2/node/disks/directory.rs @@ -254,7 +254,7 @@ pub fn create_datastore_disk( } crate::api2::config::datastore::do_create_datastore( - lock, config, datastore, false, + lock, config, datastore, false, false, )?; } diff --git a/src/api2/node/disks/zfs.rs b/src/api2/node/disks/zfs.rs index b6cf18265..cdb7cc6a1 100644 --- a/src/api2/node/disks/zfs.rs +++ b/src/api2/node/disks/zfs.rs @@ -314,7 +314,7 @@ pub fn create_zpool( } crate::api2::config::datastore::do_create_datastore( - lock, config, datastore, false, + lock, config, datastore, false, false, )?; } diff --git a/src/bin/proxmox_backup_manager/datastore.rs b/src/bin/proxmox_backup_manager/datastore.rs index 703974882..45ad27049 100644 --- a/src/bin/proxmox_backup_manager/datastore.rs +++ b/src/bin/proxmox_backup_manager/datastore.rs @@ -113,6 +113,12 @@ fn show_datastore(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result