From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: <pbs-devel-bounces@lists.proxmox.com> Received: from firstgate.proxmox.com (firstgate.proxmox.com [212.224.123.68]) by lore.proxmox.com (Postfix) with ESMTPS id 0926E1FF165 for <inbox@lore.proxmox.com>; Thu, 8 May 2025 15:06:01 +0200 (CEST) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id 83EEAFA00; Thu, 8 May 2025 15:06:12 +0200 (CEST) From: Christian Ebner <c.ebner@proxmox.com> To: pbs-devel@lists.proxmox.com Date: Thu, 8 May 2025 15:05:35 +0200 Message-Id: <20250508130555.494782-2-c.ebner@proxmox.com> X-Mailer: git-send-email 2.39.5 In-Reply-To: <20250508130555.494782-1-c.ebner@proxmox.com> References: <20250508130555.494782-1-c.ebner@proxmox.com> MIME-Version: 1.0 X-SPAM-LEVEL: Spam detection results: 0 AWL 0.028 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment RCVD_IN_VALIDITY_CERTIFIED_BLOCKED 0.001 ADMINISTRATOR NOTICE: The query to Validity was blocked. See https://knowledge.validity.com/hc/en-us/articles/20961730681243 for more information. RCVD_IN_VALIDITY_RPBL_BLOCKED 0.001 ADMINISTRATOR NOTICE: The query to Validity was blocked. See https://knowledge.validity.com/hc/en-us/articles/20961730681243 for more information. RCVD_IN_VALIDITY_SAFE_BLOCKED 0.001 ADMINISTRATOR NOTICE: The query to Validity was blocked. See https://knowledge.validity.com/hc/en-us/articles/20961730681243 for more information. SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record Subject: [pbs-devel] [RFC v2 proxmox-backup 01/21] datastore/api: mark snapshots as trash on destroy X-BeenThere: pbs-devel@lists.proxmox.com X-Mailman-Version: 2.1.29 Precedence: list List-Id: Proxmox Backup Server development discussion <pbs-devel.lists.proxmox.com> List-Unsubscribe: <https://lists.proxmox.com/cgi-bin/mailman/options/pbs-devel>, <mailto:pbs-devel-request@lists.proxmox.com?subject=unsubscribe> List-Archive: <http://lists.proxmox.com/pipermail/pbs-devel/> List-Post: <mailto:pbs-devel@lists.proxmox.com> List-Help: <mailto:pbs-devel-request@lists.proxmox.com?subject=help> List-Subscribe: <https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel>, <mailto:pbs-devel-request@lists.proxmox.com?subject=subscribe> Reply-To: Proxmox Backup Server development discussion <pbs-devel@lists.proxmox.com> Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Errors-To: pbs-devel-bounces@lists.proxmox.com Sender: "pbs-devel" <pbs-devel-bounces@lists.proxmox.com> In order to implement the trash can functionality, mark snapshots as trash instead of removing them by default. However, provide a `skip-trash` flag to opt-out and destroy the snapshot including it's contents immediately. Trashed snapshots are marked by creating a `.trashed` marker file inside the snapshot folder. Actual removal of the snapshot will be deferred to the garbage collection task. Signed-off-by: Christian Ebner <c.ebner@proxmox.com> --- pbs-datastore/src/backup_info.rs | 66 ++++++++++++++++++-------------- pbs-datastore/src/datastore.rs | 2 +- src/api2/admin/datastore.rs | 18 ++++++++- 3 files changed, 55 insertions(+), 31 deletions(-) diff --git a/pbs-datastore/src/backup_info.rs b/pbs-datastore/src/backup_info.rs index d4732fdd9..76bcd15f5 100644 --- a/pbs-datastore/src/backup_info.rs +++ b/pbs-datastore/src/backup_info.rs @@ -21,6 +21,7 @@ use crate::manifest::{BackupManifest, MANIFEST_LOCK_NAME}; use crate::{DataBlob, DataStore}; pub const DATASTORE_LOCKS_DIR: &str = "/run/proxmox-backup/locks"; +pub const TRASH_MARKER_FILENAME: &str = ".trashed"; // TODO: Remove with PBS 5 // Note: The `expect()` call here will only happen if we can neither confirm nor deny the existence @@ -228,7 +229,7 @@ impl BackupGroup { delete_stats.increment_protected_snapshots(); continue; } - snap.destroy(false)?; + snap.destroy(false, false)?; delete_stats.increment_removed_snapshots(); } @@ -575,7 +576,8 @@ impl BackupDir { /// Destroy the whole snapshot, bails if it's protected /// /// Setting `force` to true skips locking and thus ignores if the backup is currently in use. - pub fn destroy(&self, force: bool) -> Result<(), Error> { + /// Setting `skip_trash` to true will remove the snapshot instead of marking it as trash. + pub fn destroy(&self, force: bool, skip_trash: bool) -> Result<(), Error> { let (_guard, _manifest_guard); if !force { _guard = self @@ -588,37 +590,45 @@ impl BackupDir { bail!("cannot remove protected snapshot"); // use special error type? } - let full_path = self.full_path(); - log::info!("removing backup snapshot {:?}", full_path); - std::fs::remove_dir_all(&full_path).map_err(|err| { - format_err!("removing backup snapshot {:?} failed - {}", full_path, err,) - })?; + let mut full_path = self.full_path(); + log::info!("removing backup snapshot {full_path:?}"); + if skip_trash { + std::fs::remove_dir_all(&full_path).map_err(|err| { + format_err!("removing backup snapshot {full_path:?} failed - {err}") + })?; + } else { + full_path.push(TRASH_MARKER_FILENAME); + let _trash_file = + std::fs::File::create(full_path).context("failed to set trash file")?; + } // remove no longer needed lock files let _ = std::fs::remove_file(self.manifest_lock_path()); // ignore errors let _ = std::fs::remove_file(self.lock_path()); // ignore errors - let group = BackupGroup::from(self); - let guard = group.lock().with_context(|| { - format!("while checking if group '{group:?}' is empty during snapshot destruction") - }); - - // Only remove the group if all of the following is true: - // - // - we can lock it: if we can't lock the group, it is still in use (either by another - // backup process or a parent caller (who needs to take care that empty groups are - // removed themselves). - // - it is now empty: if the group isn't empty, removing it will fail (to avoid removing - // backups that might still be used). - // - the new locking mechanism is used: if the old mechanism is used, a group removal here - // could lead to a race condition. - // - // Do not error out, as we have already removed the snapshot, there is nothing a user could - // do to rectify the situation. - if guard.is_ok() && group.list_backups()?.is_empty() && !*OLD_LOCKING { - group.remove_group_dir()?; - } else if let Err(err) = guard { - log::debug!("{err:#}"); + if skip_trash { + let group = BackupGroup::from(self); + let guard = group.lock().with_context(|| { + format!("while checking if group '{group:?}' is empty during snapshot destruction") + }); + + // Only remove the group if all of the following is true: + // + // - we can lock it: if we can't lock the group, it is still in use (either by another + // backup process or a parent caller (who needs to take care that empty groups are + // removed themselves). + // - it is now empty: if the group isn't empty, removing it will fail (to avoid removing + // backups that might still be used). + // - the new locking mechanism is used: if the old mechanism is used, a group removal here + // could lead to a race condition. + // + // Do not error out, as we have already removed the snapshot, there is nothing a user could + // do to rectify the situation. + if guard.is_ok() && group.list_backups()?.is_empty() && !*OLD_LOCKING { + group.remove_group_dir()?; + } else if let Err(err) = guard { + log::debug!("{err:#}"); + } } Ok(()) diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs index cbf78ecb6..6df26e825 100644 --- a/pbs-datastore/src/datastore.rs +++ b/pbs-datastore/src/datastore.rs @@ -686,7 +686,7 @@ impl DataStore { ) -> Result<(), Error> { let backup_dir = self.backup_dir(ns.clone(), backup_dir.clone())?; - backup_dir.destroy(force) + backup_dir.destroy(force, true) } /// Returns the time of the last successful backup diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs index 392494488..aafd1bbd7 100644 --- a/src/api2/admin/datastore.rs +++ b/src/api2/admin/datastore.rs @@ -402,6 +402,12 @@ pub async fn list_snapshot_files( type: pbs_api_types::BackupDir, flatten: true, }, + "skip-trash": { + type: bool, + optional: true, + default: false, + description: "Immediately remove the snapshot, not marking it as trash.", + }, }, }, access: { @@ -415,6 +421,7 @@ pub async fn delete_snapshot( store: String, ns: Option<BackupNamespace>, backup_dir: pbs_api_types::BackupDir, + skip_trash: bool, _info: &ApiMethod, rpcenv: &mut dyn RpcEnvironment, ) -> Result<Value, Error> { @@ -435,7 +442,7 @@ pub async fn delete_snapshot( let snapshot = datastore.backup_dir(ns, backup_dir)?; - snapshot.destroy(false)?; + snapshot.destroy(false, skip_trash)?; Ok(Value::Null) }) @@ -979,6 +986,12 @@ pub fn verify( optional: true, description: "Spins up an asynchronous task that does the work.", }, + "skip-trash": { + type: bool, + optional: true, + default: false, + description: "Immediately remove the snapshot, not marking it as trash.", + }, }, }, returns: pbs_api_types::ADMIN_DATASTORE_PRUNE_RETURN_TYPE, @@ -995,6 +1008,7 @@ pub fn prune( keep_options: KeepOptions, store: String, ns: Option<BackupNamespace>, + skip_trash: bool, param: Value, rpcenv: &mut dyn RpcEnvironment, ) -> Result<Value, Error> { @@ -1098,7 +1112,7 @@ pub fn prune( }); if !keep { - if let Err(err) = backup_dir.destroy(false) { + if let Err(err) = backup_dir.destroy(false, skip_trash) { warn!( "failed to remove dir {:?}: {}", backup_dir.relative_path(), -- 2.39.5 _______________________________________________ pbs-devel mailing list pbs-devel@lists.proxmox.com https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel