From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [212.224.123.68]) by lore.proxmox.com (Postfix) with ESMTPS id 241C71FF13E for ; Fri, 17 Apr 2026 16:35:17 +0200 (CEST) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id E79605047; Fri, 17 Apr 2026 16:35:16 +0200 (CEST) Message-ID: <7563649d-82ba-441c-bc7c-d08b15226de8@proxmox.com> Date: Fri, 17 Apr 2026 16:35:11 +0200 MIME-Version: 1.0 User-Agent: Mozilla Thunderbird Subject: Re: [PATCH proxmox-backup 1/2] api: datastore: add option to run garbage collection before unmount To: Hannes Laimer , pbs-devel@lists.proxmox.com References: <20260417112755.136134-1-h.laimer@proxmox.com> <20260417112755.136134-2-h.laimer@proxmox.com> Content-Language: en-US, de-DE From: Christian Ebner In-Reply-To: <20260417112755.136134-2-h.laimer@proxmox.com> Content-Type: text/plain; charset=UTF-8; format=flowed Content-Transfer-Encoding: 7bit X-Bm-Milter-Handled: 55990f41-d878-4baa-be0a-ee34c49e34d2 X-Bm-Transport-Timestamp: 1776436431924 X-SPAM-LEVEL: Spam detection results: 0 AWL 0.069 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment RCVD_IN_VALIDITY_CERTIFIED_BLOCKED 0.001 ADMINISTRATOR NOTICE: The query to Validity was blocked. See https://knowledge.validity.com/hc/en-us/articles/20961730681243 for more information. RCVD_IN_VALIDITY_RPBL_BLOCKED 0.001 ADMINISTRATOR NOTICE: The query to Validity was blocked. See https://knowledge.validity.com/hc/en-us/articles/20961730681243 for more information. RCVD_IN_VALIDITY_SAFE_BLOCKED 0.001 ADMINISTRATOR NOTICE: The query to Validity was blocked. See https://knowledge.validity.com/hc/en-us/articles/20961730681243 for more information. SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record Message-ID-Hash: UAIHDM4AZF6BCJBK2RPPRDRK3TKDK7QR X-Message-ID-Hash: UAIHDM4AZF6BCJBK2RPPRDRK3TKDK7QR X-MailFrom: c.ebner@proxmox.com X-Mailman-Rule-Misses: dmarc-mitigation; no-senders; approved; loop; banned-address; emergency; member-moderation; nonmember-moderation; administrivia; implicit-dest; max-recipients; max-size; news-moderation; no-subject; digests; suspicious-header X-Mailman-Version: 3.3.10 Precedence: list List-Id: Proxmox Backup Server development discussion List-Help: List-Owner: List-Post: List-Subscribe: List-Unsubscribe: On 4/17/26 1:26 PM, Hannes Laimer wrote: > Removable datastores set up for auto-unmount have no natural point at > which to run garbage collection, since the drive is unmounted right > after jobs finish. Expose a gc-on-unmount option so GC can be triggered > as part of the unmount for those setups. > > Signed-off-by: Hannes Laimer > --- > src/api2/admin/datastore.rs | 33 +++++++++++++++++++++++++++------ > src/api2/config/datastore.rs | 9 +++++++++ > 2 files changed, 36 insertions(+), 6 deletions(-) > > diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs > index 757b3114..5ce30c57 100644 > --- a/src/api2/admin/datastore.rs > +++ b/src/api2/admin/datastore.rs > @@ -2641,15 +2641,36 @@ fn do_unmount_device( > } > > async fn do_unmount(store: String, auth_id: Authid, to_stdout: bool) -> Result { > - let _lock = pbs_config::datastore::lock_config()?; > - let (mut section_config, _digest) = pbs_config::datastore::config()?; > - let mut datastore: DataStoreConfig = section_config.lookup("datastore", &store)?; > + let gc_on_unmount = { > + let _lock = pbs_config::datastore::lock_config()?; question: Is the exclusive lock on the datastore config really required here? After all this only reads the config and checks it's parameters. > + let (section_config, _digest) = pbs_config::datastore::config()?; > + let datastore: DataStoreConfig = section_config.lookup("datastore", &store)?; > > - if datastore.backing_device.is_none() { > - bail!("datastore '{store}' is not removable"); > + if datastore.backing_device.is_none() { > + bail!("datastore '{store}' is not removable"); > + } > + > + ensure_datastore_is_mounted(&datastore)?; > + > + datastore.gc_on_unmount.unwrap_or(false) > + }; > + > + // Must happen outside the config lock, the GC handler acquires it too. > + if gc_on_unmount { > + let client = crate::client_helpers::connect_to_localhost() > + .context("failed to connect to localhost for starting GC")?; > + match client > + .post(&format!("api2/json/admin/datastore/{store}/gc"), None) question: What about permissions here? Running garbage collection normally requires Datastore.Modify, but since a datastore config edit requires Datastore.Modify or creation via Datastore.Allocate I guess this is okay. Might however warrant a comment. > + .await > + { > + Ok(_) => info!("started garbage collection, unmount will wait for it to finish"), comment: Why this works should be outlined in the commit message IMO. Maybe something along the lines of: ``` Relies on the active write operation by garbage collection to block the unmount task until GC completes (among possible other active operations). Further, no other active operation can occur in the mean time, since the datastore remains in maintenance mode `unmount`. ``` > + Err(err) => warn!("unable to start garbage collection before unmount: {err}"), > + } > } > > - ensure_datastore_is_mounted(&datastore)?; > + let _lock = pbs_config::datastore::lock_config()?; > + let (mut section_config, _digest) = pbs_config::datastore::config()?; > + let mut datastore: DataStoreConfig = section_config.lookup("datastore", &store)?; > > datastore.set_maintenance_mode(Some(MaintenanceMode { > ty: MaintenanceType::Unmount, > diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs > index c44d50d1..c0be0296 100644 > --- a/src/api2/config/datastore.rs > +++ b/src/api2/config/datastore.rs > @@ -409,6 +409,8 @@ pub enum DeletableProperty { > Comment, > /// Delete the garbage collection schedule. > GcSchedule, > + /// Delete the gc-on-unmount property. > + GcOnUnmount, > /// Delete the prune job schedule. > PruneSchedule, > /// Delete the keep-last property > @@ -495,6 +497,9 @@ pub fn update_datastore( > DeletableProperty::GcSchedule => { > data.gc_schedule = None; > } > + DeletableProperty::GcOnUnmount => { > + data.gc_on_unmount = None; > + } > DeletableProperty::PruneSchedule => { > data.prune_schedule = None; > } > @@ -560,6 +565,10 @@ pub fn update_datastore( > data.gc_schedule = update.gc_schedule; > } > > + if update.gc_on_unmount.is_some() { > + data.gc_on_unmount = update.gc_on_unmount; > + } > + > macro_rules! prune_disabled { > ($(($param:literal, $($member:tt)+)),+) => { > $(