From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [IPv6:2a01:7e0:0:424::9]) by lore.proxmox.com (Postfix) with ESMTPS id 5ACFE1FF13C for ; Thu, 19 Mar 2026 15:37:51 +0100 (CET) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id B767C2196; Thu, 19 Mar 2026 15:38:06 +0100 (CET) From: Christian Ebner To: pbs-devel@lists.proxmox.com Subject: [PATCH proxmox-backup 3/5] bin: proxy: periodically schedule fstrim on datastore's filesystems Date: Thu, 19 Mar 2026 15:36:47 +0100 Message-ID: <20260319143649.681937-5-c.ebner@proxmox.com> X-Mailer: git-send-email 2.47.3 In-Reply-To: <20260319143649.681937-1-c.ebner@proxmox.com> References: <20260319143649.681937-1-c.ebner@proxmox.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Bm-Milter-Handled: 55990f41-d878-4baa-be0a-ee34c49e34d2 X-Bm-Transport-Timestamp: 1773930980742 X-SPAM-LEVEL: Spam detection results: 0 AWL -1.004 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment RCVD_IN_VALIDITY_CERTIFIED_BLOCKED 0.408 ADMINISTRATOR NOTICE: The query to Validity was blocked. See https://knowledge.validity.com/hc/en-us/articles/20961730681243 for more information. RCVD_IN_VALIDITY_RPBL_BLOCKED 0.819 ADMINISTRATOR NOTICE: The query to Validity was blocked. See https://knowledge.validity.com/hc/en-us/articles/20961730681243 for more information. RCVD_IN_VALIDITY_SAFE_BLOCKED 0.903 ADMINISTRATOR NOTICE: The query to Validity was blocked. See https://knowledge.validity.com/hc/en-us/articles/20961730681243 for more information. SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record Message-ID-Hash: DFWEIAMBTZGMRQV3T76PEYP2JLEPYXSD X-Message-ID-Hash: DFWEIAMBTZGMRQV3T76PEYP2JLEPYXSD X-MailFrom: c.ebner@proxmox.com X-Mailman-Rule-Misses: dmarc-mitigation; no-senders; approved; loop; banned-address; emergency; member-moderation; nonmember-moderation; administrivia; implicit-dest; max-recipients; max-size; news-moderation; no-subject; digests; suspicious-header X-Mailman-Version: 3.3.10 Precedence: list List-Id: Proxmox Backup Server development discussion List-Help: List-Owner: List-Post: List-Subscribe: List-Unsubscribe: Run the fstrim command as scheduled job on datastores having a schedule defined in the config. This is done since by default the systemd service to execute fstrim invokes the command with: `/sbin/fstrim --listed-in /etc/fstab:/proc/self/mountinfo ...` which however does not cover datastores created on top of a disk, since these are mounted via systemd mount units. fstrim however only evaluates the given list above up to the first non-empty file as stated in the man page [0]. [0] https://www.man7.org/linux/man-pages/man8/fstrim.8.html Fixes: https://forum.proxmox.com/threads/181764/ Signed-off-by: Christian Ebner --- src/bin/proxmox-backup-proxy.rs | 59 +++++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) diff --git a/src/bin/proxmox-backup-proxy.rs b/src/bin/proxmox-backup-proxy.rs index c1fe3ac15..1d375e69d 100644 --- a/src/bin/proxmox-backup-proxy.rs +++ b/src/bin/proxmox-backup-proxy.rs @@ -508,6 +508,7 @@ async fn schedule_tasks() -> Result<(), Error> { schedule_datastore_verify_jobs().await; schedule_tape_backup_jobs().await; schedule_task_log_rotate().await; + schedule_fstrim().await; Ok(()) } @@ -879,6 +880,64 @@ async fn schedule_task_log_rotate() { } } +async fn schedule_fstrim() { + let config = match pbs_config::datastore::config() { + Err(err) => { + eprintln!("unable to read datastore config - {err}"); + return; + } + Ok((config, _digest)) => config, + }; + + for (store, (_, store_config)) in config.sections { + let store_config: DataStoreConfig = match serde_json::from_value(store_config) { + Ok(c) => c, + Err(err) => { + eprintln!("datastore config from_value failed - {err}"); + continue; + } + }; + + let event_schedule = match &store_config.fstrim_schedule { + Some(event_schedule) => event_schedule, + None => continue, + }; + + let worker_type = "fstrim"; + if check_schedule(worker_type, event_schedule, &store) { + let mut job = match Job::new(worker_type, &store) { + Ok(job) => job, + Err(_) => continue, // could not get lock + }; + + if let Err(err) = WorkerTask::new_thread( + worker_type, + None, + Authid::root_auth_id().to_string(), + false, + move |worker| { + job.start(&worker.upid().to_string())?; + info!("executing fstrim on filesystem for {store}"); + + let path = store_config.absolute_path(); + let result = proxmox_backup::tools::disks::fstrim(Path::new(&path)) + .map(|output| log::info!("{output}")); + + let status = worker.create_state(&result); + + if let Err(err) = job.finish(status) { + eprintln!("could not finish job state for {worker_type}: {err}"); + } + + result + }, + ) { + eprintln!("unable to start fstrim task: {err}"); + } + } + } +} + async fn command_reopen_access_logfiles() -> Result<(), Error> { // only care about the most recent daemon instance for each, proxy & api, as other older ones // should not respond to new requests anyway, but only finish their current one and then exit. -- 2.47.3