From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [212.224.123.68]) by lore.proxmox.com (Postfix) with ESMTPS id A623D1FF146 for ; Tue, 12 May 2026 10:56:01 +0200 (CEST) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id 4D04F97FC; Tue, 12 May 2026 10:55:56 +0200 (CEST) From: Christian Ebner To: pbs-devel@lists.proxmox.com Subject: [PATCH proxmox-backup v2 3/4] datastore: move try_ensure_sync_level() implementation to chunk store Date: Tue, 12 May 2026 10:55:43 +0200 Message-ID: <20260512085544.255754-4-c.ebner@proxmox.com> X-Mailer: git-send-email 2.47.3 In-Reply-To: <20260512085544.255754-1-c.ebner@proxmox.com> References: <20260512085544.255754-1-c.ebner@proxmox.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Bm-Milter-Handled: 55990f41-d878-4baa-be0a-ee34c49e34d2 X-Bm-Transport-Timestamp: 1778576036426 X-SPAM-LEVEL: Spam detection results: 0 AWL 0.069 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record Message-ID-Hash: ZKF6D7R2RUSCLMFDWVQLRTH6ZC6ROQTC X-Message-ID-Hash: ZKF6D7R2RUSCLMFDWVQLRTH6ZC6ROQTC X-MailFrom: c.ebner@proxmox.com X-Mailman-Rule-Misses: dmarc-mitigation; no-senders; approved; loop; banned-address; emergency; member-moderation; nonmember-moderation; administrivia; implicit-dest; max-recipients; max-size; news-moderation; no-subject; digests; suspicious-header X-Mailman-Version: 3.3.10 Precedence: list List-Id: Proxmox Backup Server development discussion List-Help: List-Owner: List-Post: List-Subscribe: List-Unsubscribe: Keep the public interface for the datastore in place, but move the method to the chunk store, dropping the now unneeded state stored on DataStoreImpl. The path for the syncfs call does not change by the move as the previous implementation already passed the chunk store base path. While moving, adapt the log message to use the tracing macro. This is in preparation for fixing the sync level updates not being propagated to the chunk store. Signed-off-by: Christian Ebner --- changes since version 1: - move implementation to chunk store, thereby fixing logic error in check. pbs-datastore/src/chunk_store.rs | 13 +++++++++++++ pbs-datastore/src/datastore.rs | 22 +++++----------------- 2 files changed, 18 insertions(+), 17 deletions(-) diff --git a/pbs-datastore/src/chunk_store.rs b/pbs-datastore/src/chunk_store.rs index 2888dea39..e8c279a62 100644 --- a/pbs-datastore/src/chunk_store.rs +++ b/pbs-datastore/src/chunk_store.rs @@ -966,6 +966,19 @@ impl ChunkStore { } (chunk_path, counter) } + + pub(super) fn try_ensure_sync_level(&self) -> Result<(), Error> { + if self.sync_level != DatastoreFSyncLevel::Filesystem { + return Ok(()); + } + let file = std::fs::File::open(self.base_path())?; + let fd = file.as_raw_fd(); + info!("syncing filesystem"); + if unsafe { libc::syncfs(fd) } < 0 { + bail!("error during syncfs: {}", std::io::Error::last_os_error()); + } + Ok(()) + } } #[derive(PartialEq)] diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs index bc218cb94..6338298a0 100644 --- a/pbs-datastore/src/datastore.rs +++ b/pbs-datastore/src/datastore.rs @@ -32,9 +32,9 @@ use proxmox_worker_task::WorkerTaskContext; use pbs_api_types::{ ArchiveType, Authid, BackupGroupDeleteStats, BackupNamespace, BackupType, ChunkOrder, - DataStoreConfig, DatastoreBackendConfig, DatastoreBackendType, DatastoreFSyncLevel, - DatastoreTuning, GarbageCollectionCacheStats, GarbageCollectionStatus, MaintenanceMode, - MaintenanceType, Operation, S3Statistics, MAX_NAMESPACE_DEPTH, UPID, + DataStoreConfig, DatastoreBackendConfig, DatastoreBackendType, DatastoreTuning, + GarbageCollectionCacheStats, GarbageCollectionStatus, MaintenanceMode, MaintenanceType, + Operation, S3Statistics, MAX_NAMESPACE_DEPTH, UPID, }; use pbs_config::s3::S3_CFG_TYPE_ID; use pbs_config::{BackupLockGuard, ConfigVersionCache}; @@ -204,7 +204,6 @@ pub struct DataStoreImpl { last_gc_status: Mutex, verify_new: bool, chunk_order: ChunkOrder, - sync_level: DatastoreFSyncLevel, backend_config: DatastoreBackendConfig, lru_store_caching: Option, thread_settings: DatastoreThreadSettings, @@ -225,7 +224,6 @@ impl DataStoreImpl { last_gc_status: Mutex::new(GarbageCollectionStatus::default()), verify_new: false, chunk_order: Default::default(), - sync_level: Default::default(), backend_config: Default::default(), lru_store_caching: None, thread_settings: Default::default(), @@ -785,7 +783,6 @@ impl DataStore { last_gc_status: Mutex::new(gc_status), verify_new: config.verify_new.unwrap_or(false), chunk_order: tuning.chunk_order.unwrap_or_default(), - sync_level: tuning.sync_level.unwrap_or_default(), backend_config, lru_store_caching, thread_settings, @@ -3012,19 +3009,10 @@ impl DataStore { } */ - /// Syncs the filesystem of the datastore if 'sync_level' is set to + /// Syncs the filesystem of the chunk store base path if 'sync_level' is set to /// [`DatastoreFSyncLevel::Filesystem`]. Uses syncfs(2). pub fn try_ensure_sync_level(&self) -> Result<(), Error> { - if self.inner.sync_level != DatastoreFSyncLevel::Filesystem { - return Ok(()); - } - let file = std::fs::File::open(self.base_path())?; - let fd = file.as_raw_fd(); - log::info!("syncing filesystem"); - if unsafe { libc::syncfs(fd) } < 0 { - bail!("error during syncfs: {}", std::io::Error::last_os_error()); - } - Ok(()) + self.inner.chunk_store.try_ensure_sync_level() } /// Destroy a datastore. This requires that there are no active operations on the datastore. -- 2.47.3