From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [IPv6:2a01:7e0:0:424::9]) by lore.proxmox.com (Postfix) with ESMTPS id 85DEB1FF13C for ; Thu, 19 Mar 2026 10:41:28 +0100 (CET) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id D3C6715DDF; Thu, 19 Mar 2026 10:41:32 +0100 (CET) From: Christian Ebner To: pbs-devel@lists.proxmox.com Subject: [PATCH proxmox-backup v6 22/22] bin: proxy: periodically schedule counter reset task Date: Thu, 19 Mar 2026 10:41:00 +0100 Message-ID: <20260319094100.240765-35-c.ebner@proxmox.com> X-Mailer: git-send-email 2.47.3 In-Reply-To: <20260319094100.240765-1-c.ebner@proxmox.com> References: <20260319094100.240765-1-c.ebner@proxmox.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Bm-Milter-Handled: 55990f41-d878-4baa-be0a-ee34c49e34d2 X-Bm-Transport-Timestamp: 1773913237067 X-SPAM-LEVEL: Spam detection results: 0 AWL 0.060 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record Message-ID-Hash: 2S2FMM2RKWY26CQVWBXGSZ4PK7M23ZHY X-Message-ID-Hash: 2S2FMM2RKWY26CQVWBXGSZ4PK7M23ZHY X-MailFrom: c.ebner@proxmox.com X-Mailman-Rule-Misses: dmarc-mitigation; no-senders; approved; loop; banned-address; emergency; member-moderation; nonmember-moderation; administrivia; implicit-dest; max-recipients; max-size; news-moderation; no-subject; digests; suspicious-header X-Mailman-Version: 3.3.10 Precedence: list List-Id: Proxmox Backup Server development discussion List-Help: List-Owner: List-Post: List-Subscribe: List-Unsubscribe: Analogous to other recurring scheduled tasks, check the configured counter reset schedule for each datastore and periodically execute the reset task if set. By performing this as a dedicated job, it is assured to keep track of the scheduled executions. Signed-off-by: Christian Ebner --- src/bin/proxmox-backup-proxy.rs | 69 ++++++++++++++++++++++++++++++++- 1 file changed, 67 insertions(+), 2 deletions(-) diff --git a/src/bin/proxmox-backup-proxy.rs b/src/bin/proxmox-backup-proxy.rs index b0efa78ae..aaf2d4a6c 100644 --- a/src/bin/proxmox-backup-proxy.rs +++ b/src/bin/proxmox-backup-proxy.rs @@ -1,5 +1,6 @@ use std::path::{Path, PathBuf}; use std::pin::pin; +use std::sync::atomic::Ordering; use std::sync::{Arc, Mutex}; use anyhow::{bail, format_err, Context, Error}; @@ -40,8 +41,8 @@ use pbs_buildcfg::configdir; use proxmox_time::CalendarEvent; use pbs_api_types::{ - Authid, DataStoreConfig, Operation, PruneJobConfig, SyncJobConfig, TapeBackupJobConfig, - VerificationJobConfig, + Authid, DataStoreConfig, DatastoreBackendConfig, Operation, PruneJobConfig, SyncJobConfig, + TapeBackupJobConfig, VerificationJobConfig, }; use proxmox_backup::auth_helpers::*; @@ -508,6 +509,7 @@ async fn schedule_tasks() -> Result<(), Error> { schedule_datastore_verify_jobs().await; schedule_tape_backup_jobs().await; schedule_task_log_rotate().await; + schedule_notification_threshold_counter_reset().await; Ok(()) } @@ -881,6 +883,69 @@ async fn schedule_task_log_rotate() { } } +async fn schedule_notification_threshold_counter_reset() { + let config = match pbs_config::datastore::config() { + Err(err) => { + eprintln!("unable to read datastore config - {err}"); + return; + } + Ok((config, _digest)) => config, + }; + + for (store, (_, store_config)) in config.sections { + let store_config: DataStoreConfig = match serde_json::from_value(store_config) { + Ok(c) => c, + Err(err) => { + eprintln!("datastore config from_value failed - {err}"); + continue; + } + }; + + let event_str = match &store_config.counter_reset_schedule { + Some(event_str) => event_str, + None => continue, + }; + + let worker_type = "notification-threshold-counter-reset"; + if check_schedule(worker_type, event_str, &store) { + let mut job = match Job::new(worker_type, &store) { + Ok(job) => job, + Err(_) => continue, // could not get lock + }; + + if let Err(err) = WorkerTask::new_thread( + worker_type, + None, + Authid::root_auth_id().to_string(), + false, + move |worker| { + job.start(&worker.upid().to_string())?; + info!("executing counter reset for {store}"); + + let result = try_block!({ + let backend_config: DatastoreBackendConfig = + store_config.backend.as_deref().unwrap_or("").parse()?; + let request_counters = + DataStore::request_counters(&store_config, &backend_config)?; + request_counters.reset(Ordering::Release); + Ok(()) + }); + + let status = worker.create_state(&result); + + if let Err(err) = job.finish(status) { + eprintln!("could not finish job state for {worker_type}: {err}"); + } + + result + }, + ) { + eprintln!("unable to start counter reset task: {err}"); + } + } + } +} + async fn command_reopen_access_logfiles() -> Result<(), Error> { // only care about the most recent daemon instance for each, proxy & api, as other older ones // should not respond to new requests anyway, but only finish their current one and then exit. -- 2.47.3