From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [212.224.123.68]) by lore.proxmox.com (Postfix) with ESMTPS id 62D2B1FF17A for ; Tue, 11 Nov 2025 11:09:11 +0100 (CET) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id 6F1523E96; Tue, 11 Nov 2025 11:09:56 +0100 (CET) Date: Tue, 11 Nov 2025 11:09:49 +0100 From: Fabian =?iso-8859-1?q?Gr=FCnbichler?= To: Proxmox Backup Server development discussion References: <20251104131934.449757-1-c.ebner@proxmox.com> <20251104131934.449757-2-c.ebner@proxmox.com> In-Reply-To: <20251104131934.449757-2-c.ebner@proxmox.com> MIME-Version: 1.0 User-Agent: astroid/0.17.0 (https://github.com/astroidmail/astroid) Message-Id: <1762854920.30j1b3ipx9.astroid@yuna.none> X-Bm-Milter-Handled: 55990f41-d878-4baa-be0a-ee34c49e34d2 X-Bm-Transport-Timestamp: 1762855769673 X-SPAM-LEVEL: Spam detection results: 0 AWL 0.046 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment RCVD_IN_VALIDITY_CERTIFIED_BLOCKED 0.001 ADMINISTRATOR NOTICE: The query to Validity was blocked. See https://knowledge.validity.com/hc/en-us/articles/20961730681243 for more information. RCVD_IN_VALIDITY_RPBL_BLOCKED 0.001 ADMINISTRATOR NOTICE: The query to Validity was blocked. See https://knowledge.validity.com/hc/en-us/articles/20961730681243 for more information. RCVD_IN_VALIDITY_SAFE_BLOCKED 0.001 ADMINISTRATOR NOTICE: The query to Validity was blocked. See https://knowledge.validity.com/hc/en-us/articles/20961730681243 for more information. SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record URIBL_BLOCKED 0.001 ADMINISTRATOR NOTICE: The query to URIBL was blocked. See http://wiki.apache.org/spamassassin/DnsBlocklists#dnsbl-block for more information. [datastore.rs, proxmox.com] Subject: Re: [pbs-devel] [PATCH proxmox-backup 1/2] datastore: s3 refresh: set/unset maintenance mode in api handler X-BeenThere: pbs-devel@lists.proxmox.com X-Mailman-Version: 2.1.29 Precedence: list List-Id: Proxmox Backup Server development discussion List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: Proxmox Backup Server development discussion Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Errors-To: pbs-devel-bounces@lists.proxmox.com Sender: "pbs-devel" On November 4, 2025 2:19 pm, Christian Ebner wrote: > Instead of setting the maintenance mode in the datastores s3 refresh > helper method, do this in the api handler directly. Since this is > now mostly an sync task, adapt the api handler to be a sync function > and run the task on a dedicated thread. > > This is in preparation for fixing the s3 refresh to be able to start > a refresh without checking for active operations. > > Signed-off-by: Christian Ebner > --- > pbs-datastore/src/datastore.rs | 26 -------------------------- > src/api2/admin/datastore.rs | 32 ++++++++++++++++++++++++++++---- > 2 files changed, 28 insertions(+), 30 deletions(-) > > diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs > index 127ba1c81..d5ff6e5f7 100644 > --- a/pbs-datastore/src/datastore.rs > +++ b/pbs-datastore/src/datastore.rs > @@ -2208,16 +2208,6 @@ impl DataStore { > match self.backend()? { > DatastoreBackend::Filesystem => bail!("store '{}' not backed by S3", self.name()), > DatastoreBackend::S3(s3_client) => { > - let self_clone = Arc::clone(self); > - tokio::task::spawn_blocking(move || { > - self_clone.maintenance_mode(Some(MaintenanceMode { > - ty: MaintenanceType::S3Refresh, > - message: None, > - })) > - }) > - .await? > - .context("failed to set maintenance mode")?; > - > let tmp_base = proxmox_sys::fs::make_tmp_dir(self.base_path(), None) > .context("failed to create temporary content folder in {store_base}")?; > > @@ -2231,27 +2221,11 @@ impl DataStore { > let _ = std::fs::remove_dir_all(&tmp_base); > return Err(err); > } > - > - let self_clone = Arc::clone(self); > - tokio::task::spawn_blocking(move || self_clone.maintenance_mode(None)) > - .await? > - .context("failed to clear maintenance mode")?; > } > } > Ok(()) > } > > - // Set or clear the datastores maintenance mode by locking and updating the datastore config > - fn maintenance_mode(&self, maintenance_mode: Option) -> Result<(), Error> { > - let _lock = pbs_config::datastore::lock_config()?; > - let (mut section_config, _digest) = pbs_config::datastore::config()?; > - let mut datastore: DataStoreConfig = section_config.lookup("datastore", self.name())?; > - datastore.set_maintenance_mode(maintenance_mode)?; > - section_config.set_data(self.name(), "datastore", &datastore)?; > - pbs_config::datastore::save_config(§ion_config)?; > - Ok(()) > - } > - > // Fetch the contents (metadata, no chunks) of the datastore from the S3 object store to the > // provided temporaray directory > async fn fetch_tmp_contents(&self, tmp_base: &Path, s3_client: &S3Client) -> Result<(), Error> { > diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs > index d192ee390..00110119f 100644 > --- a/src/api2/admin/datastore.rs > +++ b/src/api2/admin/datastore.rs > @@ -2737,22 +2737,46 @@ pub async fn unmount(store: String, rpcenv: &mut dyn RpcEnvironment) -> Result }, > )] > /// Refresh datastore contents from S3 to local cache store. > -pub async fn s3_refresh(store: String, rpcenv: &mut dyn RpcEnvironment) -> Result { > +pub fn s3_refresh(store: String, rpcenv: &mut dyn RpcEnvironment) -> Result { > + maintenance_mode( > + &store, > + Some(MaintenanceMode { > + ty: MaintenanceType::S3Refresh, > + message: None, > + }), > + ) > + .context("failed to set maintenance mode")?; > + > let datastore = DataStore::lookup_datastore(&store, Some(Operation::Lookup))?; > let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; > let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; > > - let upid = WorkerTask::spawn( > + let upid = WorkerTask::new_thread( > "s3-refresh", > - Some(store), > + Some(store.clone()), > auth_id.to_string(), > to_stdout, > - move |_worker| async move { datastore.s3_refresh().await }, > + move |_worker| { > + proxmox_async::runtime::block_on(datastore.s3_refresh())?; this helper's doc comments are now wrong.. but also, this would need to work more like unmounting IMHO, since there is no protecting against leavine S3Refresh maintenance mode while it is currently active?? we currently risk issues like the datastore not having a maintenance mode set, tasks being started, and then S3Refresh clearing out all the dirs to replace them with the just-downloaded ones, causing major inconsistencies? I think we can re-use expect_maintenance_unmounting by making it generic, and then hold the maintenance mode lock while doing the refresh? that forces the refresh to be aborted before the maintenance mode can be lifted (and just leaves a crash or restart while refreshing as source of issues) it also makes the `maintenance_mode` helper kinda unnecessary, as we'd now only set the maintenance mode once at the start, and then query that it is still as expected, and there already is a helper for removing maintenance mode at the end or as part of error/abortion handling.. > + > + maintenance_mode(&store, None).context("failed to clear maintenance mode") > + }, > )?; > > Ok(json!(upid)) > } > > +// Set or clear the datastores maintenance mode by locking and updating the datastore config > +fn maintenance_mode(store: &str, maintenance_mode: Option) -> Result<(), Error> { > + let _lock = pbs_config::datastore::lock_config()?; > + let (mut section_config, _digest) = pbs_config::datastore::config()?; > + let mut datastore: DataStoreConfig = section_config.lookup("datastore", store)?; > + datastore.set_maintenance_mode(maintenance_mode)?; > + section_config.set_data(store, "datastore", &datastore)?; > + pbs_config::datastore::save_config(§ion_config)?; > + Ok(()) > +} > + > #[sortable] > const DATASTORE_INFO_SUBDIRS: SubdirMap = &[ > ( > -- > 2.47.3 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel@lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > > _______________________________________________ pbs-devel mailing list pbs-devel@lists.proxmox.com https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel