From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [IPv6:2a01:7e0:0:424::9]) by lore.proxmox.com (Postfix) with ESMTPS id D8EEC1FF14C for ; Fri, 15 May 2026 10:29:06 +0200 (CEST) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id EF4E5FF05; Fri, 15 May 2026 10:29:02 +0200 (CEST) From: Lukas Wagner To: pdm-devel@lists.proxmox.com Subject: [PATCH datacenter-manager v2 4/4] remote-updates: switch over to new api_cache Date: Fri, 15 May 2026 10:28:55 +0200 Message-ID: <20260515082855.85698-5-l.wagner@proxmox.com> X-Mailer: git-send-email 2.47.3 In-Reply-To: <20260515082855.85698-1-l.wagner@proxmox.com> References: <20260515082855.85698-1-l.wagner@proxmox.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Bm-Milter-Handled: 55990f41-d878-4baa-be0a-ee34c49e34d2 X-Bm-Transport-Timestamp: 1778833730933 X-SPAM-LEVEL: Spam detection results: 0 AWL 0.054 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record Message-ID-Hash: VTD2JSUCNQC6W47V327OVGQT4QDYTEOW X-Message-ID-Hash: VTD2JSUCNQC6W47V327OVGQT4QDYTEOW X-MailFrom: l.wagner@proxmox.com X-Mailman-Rule-Misses: dmarc-mitigation; no-senders; approved; loop; banned-address; emergency; member-moderation; nonmember-moderation; administrivia; implicit-dest; max-recipients; max-size; news-moderation; no-subject; digests; suspicious-header X-Mailman-Version: 3.3.10 Precedence: list List-Id: Proxmox Datacenter Manager development discussion List-Help: List-Owner: List-Post: List-Subscribe: List-Unsubscribe: Use the new, centralized API cache for caching remote update summaries. Add some cleanup logic to remove the old cachefile, which can be removed at some point in the future. Signed-off-by: Lukas Wagner --- Notes: Changes since the RFC: - use new async cache interface - clean up old cachefile automatically Changes since v1: - Make cache self-healing in case an entry could not be read or deserialized. This ensure resiliency when the cached data type changes unexpectedly. In this case, we just log an error and return a default, empty summary. - Avoid some .clone() calls - When requesting the list of updates for a single remote, update the cache asynchronously server/src/api/pve/mod.rs | 4 +- server/src/api/remotes/updates.rs | 6 +- server/src/remote_updates.rs | 120 ++++++++++++++++++------------ 3 files changed, 77 insertions(+), 53 deletions(-) diff --git a/server/src/api/pve/mod.rs b/server/src/api/pve/mod.rs index 20892f38..649ab624 100644 --- a/server/src/api/pve/mod.rs +++ b/server/src/api/pve/mod.rs @@ -588,8 +588,8 @@ pub async fn get_options(remote: String) -> Result { }, )] /// Return the cached update information about a remote. -pub fn get_updates(remote: String) -> Result { - let update_summary = get_available_updates_for_remote(&remote)?; +pub async fn get_updates(remote: String) -> Result { + let update_summary = get_available_updates_for_remote(&remote).await?; Ok(update_summary) } diff --git a/server/src/api/remotes/updates.rs b/server/src/api/remotes/updates.rs index 365ffc19..ea46ba0d 100644 --- a/server/src/api/remotes/updates.rs +++ b/server/src/api/remotes/updates.rs @@ -42,7 +42,7 @@ const SUBDIRS: SubdirMap = &sorted!([ returns: { type: UpdateSummary } )] /// Return available update summary for managed remote nodes. -pub fn update_summary(rpcenv: &mut dyn RpcEnvironment) -> Result { +pub async fn update_summary(rpcenv: &mut dyn RpcEnvironment) -> Result { let auth_id = rpcenv .get_auth_id() .context("no authid available")? @@ -53,7 +53,7 @@ pub fn update_summary(rpcenv: &mut dyn RpcEnvironment) -> Result Result for NodeUpdateSummary { - fn from(value: NodeUpdateInfo) -> Self { +impl From<&NodeUpdateInfo> for NodeUpdateSummary { + fn from(value: &NodeUpdateInfo) -> Self { Self { number_of_updates: value.updates.len() as u32, last_refresh: value.last_refresh, status: NodeUpdateStatus::Success, status_message: None, - versions: value.versions, + versions: value.versions.clone(), repository_status: value.repository_status, } } @@ -44,11 +42,20 @@ impl From for NodeUpdateSummary { /// Return a list of available updates for a given remote node. pub async fn list_available_updates( remote: Remote, - node: &str, + node: String, ) -> Result, Error> { - let updates = fetch_available_updates((), remote.clone(), node.to_string()).await?; + let updates = fetch_available_updates((), remote.clone(), node.clone()).await?; - update_cached_summary_for_node(remote, node.into(), updates.clone().into()).await?; + let summary = (&updates).into(); + + // Update cache entry asynchronously, no need to wait for it. + tokio::task::spawn({ + async move { + if let Err(err) = update_cached_summary_for_node(remote, node, summary).await { + log::error!("could not update 'remote-updates' API cache entry: {err}"); + } + } + }); Ok(updates.updates) } @@ -106,10 +113,10 @@ pub async fn get_changelog(remote: &Remote, node: &str, package: String) -> Resu } /// Get update summary for all managed remotes. -pub fn get_available_updates_summary() -> Result { +pub async fn get_available_updates_summary() -> Result { let (config, _digest) = pdm_config::remotes::config()?; - let cache_content = get_cached_summary_or_default()?; + let cache_content = get_cached_summary_or_default().await?; let mut summary = UpdateSummary::default(); @@ -137,11 +144,11 @@ pub fn get_available_updates_summary() -> Result { } /// Return cached update information from specific remote -pub fn get_available_updates_for_remote(remote: &str) -> Result { +pub async fn get_available_updates_for_remote(remote: &str) -> Result { let (config, _digest) = pdm_config::remotes::config()?; if let Some(remote) = config.get(remote) { - let cache_content = get_cached_summary_or_default()?; + let cache_content = get_cached_summary_or_default().await?; Ok(cache_content .remotes .get(&remote.id) @@ -156,22 +163,24 @@ pub fn get_available_updates_for_remote(remote: &str) -> Result Result { - match File::open(UPDATE_CACHE) { - Ok(file) => { - let content = match serde_json::from_reader(file) { - Ok(cache_content) => cache_content, - Err(err) => { - log::error!("failed to deserialize remote update cache: {err:#}"); - Default::default() - } - }; +/// Read the cached summary from the API cache, or return a default, empty summary. +/// +/// Note: This does not return an error if the cache entry could not be read (e.g. due to +/// a deserialization error), but also returns the default, empty summary. +/// This ensure that he cache self-heals if an entry got corrupted for some reason. +async fn get_cached_summary_or_default() -> Result { + let guard = api_cache::read_global().await?; - Ok(content) - } - Err(err) if err.kind() == ErrorKind::NotFound => Ok(Default::default()), - Err(err) => Err(err.into()), - } + let summary = guard + .get::(UPDATE_SUMMARY_CACHE_KEY) + .await + .inspect_err(|err| { + log::error!("could not read 'remote-updates' entry from API cache: {err}") + }) + .unwrap_or_default() + .unwrap_or_default(); + + Ok(summary) } async fn update_cached_summary_for_node( @@ -179,10 +188,11 @@ async fn update_cached_summary_for_node( node: String, node_data: NodeUpdateSummary, ) -> Result<(), Error> { - let mut file = File::open(UPDATE_CACHE)?; - let mut cache_content: UpdateSummary = serde_json::from_reader(&mut file)?; - let remote_entry = - cache_content + let cache = api_cache::write_global().await?; + let cache_content = cache.get::(UPDATE_SUMMARY_CACHE_KEY).await?; + + if let Some(mut entry) = cache_content { + let remote_entry = entry .remotes .entry(remote.id) .or_insert_with(|| RemoteUpdateSummary { @@ -191,15 +201,9 @@ async fn update_cached_summary_for_node( status: RemoteUpdateStatus::Success, }); - remote_entry.nodes.insert(node, node_data); - - let options = proxmox_product_config::default_create_options(); - proxmox_sys::fs::replace_file( - UPDATE_CACHE, - &serde_json::to_vec(&cache_content)?, - options, - true, - )?; + remote_entry.nodes.insert(node, node_data); + cache.set(UPDATE_SUMMARY_CACHE_KEY, entry).await?; + } Ok(()) } @@ -212,7 +216,7 @@ pub async fn refresh_update_summary_cache(remotes: Vec) -> Result<(), Er .do_for_all_remote_nodes(remotes.clone().into_iter(), fetch_available_updates) .await; - let mut content = get_cached_summary_or_default()?; + let mut content = get_cached_summary_or_default().await?; // Clean out any remotes that might have been removed from the remote config in the meanwhile. content @@ -245,7 +249,7 @@ pub async fn refresh_update_summary_cache(remotes: Vec) -> Result<(), Er match node_response.data() { Ok(update_info) => { - entry.nodes.insert(node_name, update_info.clone().into()); + entry.nodes.insert(node_name, update_info.into()); } Err(err) => { // Could not fetch updates from node @@ -275,8 +279,28 @@ pub async fn refresh_update_summary_cache(remotes: Vec) -> Result<(), Er } } - let options = proxmox_product_config::default_create_options(); - proxmox_sys::fs::replace_file(UPDATE_CACHE, &serde_json::to_vec(&content)?, options, true)?; + cleanup_old_cachefile().await?; + + let cache = api_cache::write_global().await?; + cache.set(UPDATE_SUMMARY_CACHE_KEY, content).await?; + + Ok(()) +} + +// FIXME: We can remove this pretty soon. +async fn cleanup_old_cachefile() -> Result<(), Error> { + tokio::task::spawn_blocking(|| { + if let Err(err) = std::fs::remove_file(OLD_CACHEFILE) { + if err.kind() != std::io::ErrorKind::NotFound { + log::error!( + "could not clean up old remote update cache file {OLD_CACHEFILE}: {err}" + ); + } + } else { + log::info!("removed obsolete remote update cachefile {OLD_CACHEFILE}") + } + }) + .await?; Ok(()) } -- 2.47.3