From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [IPv6:2a01:7e0:0:424::9]) by lore.proxmox.com (Postfix) with ESMTPS id 2E50C1FF15D for ; Thu, 22 Aug 2024 09:32:40 +0200 (CEST) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id AC68E34589; Thu, 22 Aug 2024 09:33:01 +0200 (CEST) Date: Thu, 22 Aug 2024 09:32:26 +0200 From: Wolfgang Bumiller To: Hannes Laimer Message-ID: <745u27p6cav7bmy424rwwncyqv5bipgvypqbq3cgw3foavpfjc@pzfiwpoyftbt> References: <20240819094522.135756-1-h.laimer@proxmox.com> MIME-Version: 1.0 Content-Disposition: inline In-Reply-To: <20240819094522.135756-1-h.laimer@proxmox.com> X-SPAM-LEVEL: Spam detection results: 0 AWL 0.088 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment RCVD_IN_VALIDITY_CERTIFIED_BLOCKED 0.001 ADMINISTRATOR NOTICE: The query to Validity was blocked. See https://knowledge.validity.com/hc/en-us/articles/20961730681243 for more information. RCVD_IN_VALIDITY_RPBL_BLOCKED 0.001 ADMINISTRATOR NOTICE: The query to Validity was blocked. See https://knowledge.validity.com/hc/en-us/articles/20961730681243 for more information. RCVD_IN_VALIDITY_SAFE_BLOCKED 0.001 ADMINISTRATOR NOTICE: The query to Validity was blocked. See https://knowledge.validity.com/hc/en-us/articles/20961730681243 for more information. SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record T_SCC_BODY_TEXT_LINE -0.01 - Subject: Re: [pbs-devel] [PATCH proxmox-backup v2] datastore: simplify update-datastore-cache socket command X-BeenThere: pbs-devel@lists.proxmox.com X-Mailman-Version: 2.1.29 Precedence: list List-Id: Proxmox Backup Server development discussion List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: Proxmox Backup Server development discussion Cc: pbs-devel@lists.proxmox.com Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Errors-To: pbs-devel-bounces@lists.proxmox.com Sender: "pbs-devel" On Mon, Aug 19, 2024 at 11:45:22AM GMT, Hannes Laimer wrote: > ... by always dropping the cache entry and not allowing it to be > re-added based on the maintenance mode. The check is only needed for What "check are you referring to here? > `Lookup` operations since lookups with those wouldn't fail based on > maintenance mode alone. Already running tasks won't be affected and > since they're the only ones holding a reference to a DataStore, it, > and the file handles it holds will be dropped automatically once the > (last) task finishes. > > Signed-off-by: Hannes Laimer > --- > v2: > - rebase onto master > > pbs-datastore/src/datastore.rs | 26 ++++++++++---------------- > src/api2/config/datastore.rs | 7 +++---- > src/bin/proxmox-backup-proxy.rs | 6 +++--- > 3 files changed, 16 insertions(+), 23 deletions(-) > > diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs > index d0f3c53a..69b4c83e 100644 > --- a/pbs-datastore/src/datastore.rs > +++ b/pbs-datastore/src/datastore.rs > @@ -183,10 +183,15 @@ impl DataStore { > )?) > }; > > + let is_offline = config > + .get_maintenance_mode() > + .map_or(false, |m| m.is_offline()); > let datastore = DataStore::with_store_and_config(chunk_store, config, Some(digest))?; > > let datastore = Arc::new(datastore); > - datastore_cache.insert(name.to_string(), datastore.clone()); > + if !is_offline { > + datastore_cache.insert(name.to_string(), datastore.clone()); > + } So previously we always only ever had 1 DataStore+ChunkStore pair. Now, we can have multiple instances for an offline data store, as well as after receiving an "update-datastore-cache" command, which now forces multiple instances. This seems to go against the original idea here. Why do we want this? > > if let Some(operation) = operation { > update_active_operations(name, operation, 1)?; > @@ -208,21 +213,10 @@ impl DataStore { > Ok(()) > } > > - /// trigger clearing cache entry based on maintenance mode. Entry will only > - /// be cleared iff there is no other task running, if there is, the end of the > - /// last running task will trigger the clearing of the cache entry. > - pub fn update_datastore_cache(name: &str) -> Result<(), Error> { > - let (config, _digest) = pbs_config::datastore::config()?; > - let datastore: DataStoreConfig = config.lookup("datastore", name)?; > - if datastore > - .get_maintenance_mode() > - .map_or(false, |m| m.is_offline()) > - { > - // the datastore drop handler does the checking if tasks are running and clears the > - // cache entry, so we just have to trigger it here > - let _ = DataStore::lookup_datastore(name, Some(Operation::Lookup)); > - } > - > + /// removes datastore from cache > + pub fn drop_from_datastore_cache(name: &str) -> Result<(), Error> { There's no remaining error case here, so we wouldn't need a `Result`. > + let mut datastore_cache = DATASTORE_MAP.lock().unwrap(); > + datastore_cache.remove(name); > Ok(()) > } > > diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs > index ca6edf05..65d88265 100644 > --- a/src/api2/config/datastore.rs > +++ b/src/api2/config/datastore.rs > @@ -396,16 +396,15 @@ pub fn update_datastore( > data.tuning = update.tuning; > } > > - let mut maintenance_mode_changed = false; > + let mut drop_store_from_cache = false; > if update.maintenance_mode.is_some() { > - maintenance_mode_changed = data.maintenance_mode != update.maintenance_mode; > - > let maintenance_mode = match update.maintenance_mode { > Some(mode_str) => Some(MaintenanceMode::deserialize( > proxmox_schema::de::SchemaDeserializer::new(mode_str, &MaintenanceMode::API_SCHEMA), > )?), > None => None, > }; > + drop_store_from_cache = maintenance_mode.as_ref().map_or(false, |m| m.is_offline()); (This could be set in the `Some` match arm more efficiently as it already defaults to false) > data.set_maintenance_mode(maintenance_mode)?; > } > > @@ -420,7 +419,7 @@ pub fn update_datastore( > } > > // tell the proxy it might have to clear a cache entry The comment is now wrong. > - if maintenance_mode_changed { > + if drop_store_from_cache { > tokio::spawn(async move { > if let Ok(proxy_pid) = > proxmox_rest_server::read_pid(pbs_buildcfg::PROXMOX_BACKUP_PROXY_PID_FN) > diff --git a/src/bin/proxmox-backup-proxy.rs b/src/bin/proxmox-backup-proxy.rs > index 041f3aff..04622d43 100644 > --- a/src/bin/proxmox-backup-proxy.rs > +++ b/src/bin/proxmox-backup-proxy.rs > @@ -274,11 +274,11 @@ async fn run() -> Result<(), Error> { > Ok(Value::Null) > })?; > > - // clear cache entry for datastore that is in a specific maintenance mode > + // clear cache entry for datastore > command_sock.register_command("update-datastore-cache".to_string(), |value| { > if let Some(name) = value.and_then(Value::as_str) { > - if let Err(err) = DataStore::update_datastore_cache(name) { > - log::error!("could not trigger update datastore cache: {err}"); > + if let Err(err) = DataStore::drop_from_datastore_cache(name) { > + log::error!("could not drop datastore from cache: {err}"); > } > } > Ok(Value::Null) > -- > 2.39.2 _______________________________________________ pbs-devel mailing list pbs-devel@lists.proxmox.com https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel