public inbox for pbs-devel@lists.proxmox.com
 help / color / mirror / Atom feed
From: Christian Ebner <c.ebner@proxmox.com>
To: pbs-devel@lists.proxmox.com
Subject: [pbs-devel] [PATCH proxmox-backup v2 08/12] local store cache: drop obsolete cacher implementation
Date: Wed,  8 Oct 2025 17:21:21 +0200	[thread overview]
Message-ID: <20251008152125.849216-9-c.ebner@proxmox.com> (raw)
In-Reply-To: <20251008152125.849216-1-c.ebner@proxmox.com>

Since the local store cache uses no longer the inner lru cache on
chunk access, the S3Cacher implementation is obsolete and can be
replaced by the S3Client directly when fetching is required.

Drop all now obsolete code and adapt callsites accordingly.

Signed-off-by: Christian Ebner <c.ebner@proxmox.com>
---
 pbs-datastore/src/datastore.rs                | 12 -----
 pbs-datastore/src/local_chunk_reader.rs       | 27 +++++-------
 .../src/local_datastore_lru_cache.rs          | 44 ++-----------------
 src/api2/reader/mod.rs                        | 34 ++++++--------
 4 files changed, 29 insertions(+), 88 deletions(-)

diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs
index acf22e9b0..a6b17e3c3 100644
--- a/pbs-datastore/src/datastore.rs
+++ b/pbs-datastore/src/datastore.rs
@@ -41,7 +41,6 @@ use crate::dynamic_index::{DynamicIndexReader, DynamicIndexWriter};
 use crate::fixed_index::{FixedIndexReader, FixedIndexWriter};
 use crate::hierarchy::{ListGroups, ListGroupsType, ListNamespaces, ListNamespacesRecursive};
 use crate::index::IndexFile;
-use crate::local_datastore_lru_cache::S3Cacher;
 use crate::s3::S3_CONTENT_PREFIX;
 use crate::task_tracking::{self, update_active_operations};
 use crate::{DataBlob, LocalDatastoreLruCache};
@@ -291,17 +290,6 @@ impl DataStore {
         Ok(())
     }
 
-    /// Returns the cacher for datastores backed by S3 object stores.
-    /// This allows to fetch chunks to the local cache store on-demand.
-    pub fn cacher(&self) -> Result<Option<S3Cacher>, Error> {
-        self.backend().map(|backend| match backend {
-            DatastoreBackend::S3(s3_client) => {
-                Some(S3Cacher::new(s3_client, self.inner.chunk_store.clone()))
-            }
-            DatastoreBackend::Filesystem => None,
-        })
-    }
-
     pub fn lookup_datastore(
         name: &str,
         operation: Option<Operation>,
diff --git a/pbs-datastore/src/local_chunk_reader.rs b/pbs-datastore/src/local_chunk_reader.rs
index 36bce1552..c50a63fb7 100644
--- a/pbs-datastore/src/local_chunk_reader.rs
+++ b/pbs-datastore/src/local_chunk_reader.rs
@@ -70,13 +70,11 @@ impl ReadChunk for LocalChunkReader {
             DatastoreBackend::S3(s3_client) => match self.store.cache() {
                 None => proxmox_async::runtime::block_on(fetch(Arc::clone(s3_client), digest))?,
                 Some(cache) => {
-                    let mut cacher = self
-                        .store
-                        .cacher()?
-                        .ok_or(format_err!("no cacher for datastore"))?;
-                    proxmox_async::runtime::block_on(cache.access(digest, &mut cacher))?.ok_or(
-                        format_err!("unable to access chunk with digest {}", hex::encode(digest)),
-                    )?
+                    proxmox_async::runtime::block_on(cache.access(digest, s3_client.clone()))?
+                        .ok_or(format_err!(
+                            "unable to access chunk with digest {}",
+                            hex::encode(digest)
+                        ))?
                 }
             },
         };
@@ -109,14 +107,13 @@ impl AsyncReadChunk for LocalChunkReader {
                 DatastoreBackend::S3(s3_client) => match self.store.cache() {
                     None => fetch(Arc::clone(s3_client), digest).await?,
                     Some(cache) => {
-                        let mut cacher = self
-                            .store
-                            .cacher()?
-                            .ok_or(format_err!("no cacher for datastore"))?;
-                        cache.access(digest, &mut cacher).await?.ok_or(format_err!(
-                            "unable to access chunk with digest {}",
-                            hex::encode(digest)
-                        ))?
+                        cache
+                            .access(digest, s3_client.clone())
+                            .await?
+                            .ok_or(format_err!(
+                                "unable to access chunk with digest {}",
+                                hex::encode(digest)
+                            ))?
                     }
                 },
             };
diff --git a/pbs-datastore/src/local_datastore_lru_cache.rs b/pbs-datastore/src/local_datastore_lru_cache.rs
index f03265a5b..fe3b51a55 100644
--- a/pbs-datastore/src/local_datastore_lru_cache.rs
+++ b/pbs-datastore/src/local_datastore_lru_cache.rs
@@ -1,53 +1,17 @@
 //! Use a local datastore as cache for operations on a datastore attached via
 //! a network layer (e.g. via the S3 backend).
 
-use std::future::Future;
 use std::sync::Arc;
 
 use anyhow::{bail, Error};
 use http_body_util::BodyExt;
 
-use pbs_tools::async_lru_cache::{AsyncCacher, AsyncLruCache};
+use pbs_tools::async_lru_cache::AsyncLruCache;
 use proxmox_s3_client::S3Client;
 
 use crate::ChunkStore;
 use crate::DataBlob;
 
-#[derive(Clone)]
-/// Cacher to fetch chunks from the S3 object store and insert them in the local cache store.
-pub struct S3Cacher {
-    client: Arc<S3Client>,
-    store: Arc<ChunkStore>,
-}
-
-impl AsyncCacher<[u8; 32], ()> for S3Cacher {
-    fn fetch(
-        &self,
-        key: [u8; 32],
-    ) -> Box<dyn Future<Output = Result<Option<()>, Error>> + Send + 'static> {
-        let client = Arc::clone(&self.client);
-        let store = Arc::clone(&self.store);
-        Box::new(async move {
-            let object_key = crate::s3::object_key_from_digest(&key)?;
-            match client.get_object(object_key).await? {
-                None => bail!("could not fetch object with key {}", hex::encode(key)),
-                Some(response) => {
-                    let bytes = response.content.collect().await?.to_bytes();
-                    let chunk = DataBlob::from_raw(bytes.to_vec())?;
-                    store.insert_chunk(&chunk, &key)?;
-                    Ok(Some(()))
-                }
-            }
-        })
-    }
-}
-
-impl S3Cacher {
-    pub fn new(client: Arc<S3Client>, store: Arc<ChunkStore>) -> Self {
-        Self { client, store }
-    }
-}
-
 /// LRU cache using local datastore for caching chunks
 ///
 /// Uses a LRU cache, but without storing the values in-memory but rather
@@ -100,7 +64,7 @@ impl LocalDatastoreLruCache {
     pub async fn access(
         &self,
         digest: &[u8; 32],
-        cacher: &mut S3Cacher,
+        client: Arc<S3Client>,
     ) -> Result<Option<DataBlob>, Error> {
         let (path, _digest_str) = self.store.chunk_path(digest);
         match std::fs::File::open(&path) {
@@ -116,7 +80,7 @@ impl LocalDatastoreLruCache {
                     use std::io::Seek;
                     // Check if file is empty marker file, try fetching content if so
                     if file.seek(std::io::SeekFrom::End(0))? == 0 {
-                        let chunk = self.fetch_and_insert(cacher.client.clone(), digest).await?;
+                        let chunk = self.fetch_and_insert(client.clone(), digest).await?;
                         Ok(Some(chunk))
                     } else {
                         Err(err)
@@ -126,7 +90,7 @@ impl LocalDatastoreLruCache {
             Err(err) => {
                 // Failed to open file, missing
                 if err.kind() == std::io::ErrorKind::NotFound {
-                    let chunk = self.fetch_and_insert(cacher.client.clone(), digest).await?;
+                    let chunk = self.fetch_and_insert(client.clone(), digest).await?;
                     Ok(Some(chunk))
                 } else {
                     Err(Error::from(err))
diff --git a/src/api2/reader/mod.rs b/src/api2/reader/mod.rs
index 846493c61..155e862c6 100644
--- a/src/api2/reader/mod.rs
+++ b/src/api2/reader/mod.rs
@@ -327,28 +327,20 @@ fn download_chunk(
 
         let body = match &env.backend {
             DatastoreBackend::Filesystem => load_from_filesystem(env, &digest)?,
-            DatastoreBackend::S3(s3_client) => {
-                match env.datastore.cache() {
-                    None => fetch_from_object_store(s3_client, &digest).await?,
-                    Some(cache) => {
-                        let mut cacher = env
-                            .datastore
-                            .cacher()?
-                            .ok_or(format_err!("no cacher for datastore"))?;
-                        // Download from object store, insert to local cache store and read from
-                        // file. Can this be optimized?
-                        let chunk =
-                            cache
-                                .access(&digest, &mut cacher)
-                                .await?
-                                .ok_or(format_err!(
-                                    "unable to access chunk with digest {}",
-                                    hex::encode(digest)
-                                ))?;
-                        Body::from(chunk.raw_data().to_owned())
-                    }
+            DatastoreBackend::S3(s3_client) => match env.datastore.cache() {
+                None => fetch_from_object_store(s3_client, &digest).await?,
+                Some(cache) => {
+                    let chunk =
+                        cache
+                            .access(&digest, s3_client.clone())
+                            .await?
+                            .ok_or(format_err!(
+                                "unable to access chunk with digest {}",
+                                hex::encode(digest)
+                            ))?;
+                    Body::from(chunk.raw_data().to_owned())
                 }
-            }
+            },
         };
 
         // fixme: set other headers ?
-- 
2.47.3



_______________________________________________
pbs-devel mailing list
pbs-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel


  parent reply	other threads:[~2025-10-08 15:22 UTC|newest]

Thread overview: 13+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-10-08 15:21 [pbs-devel] [PATCH proxmox-backup v2 00/12] s3 store: fix issues with chunk s3 backend upload and cache eviction Christian Ebner
2025-10-08 15:21 ` [pbs-devel] [PATCH proxmox-backup v2 01/12] datastore: gc: inline single callsite method Christian Ebner
2025-10-08 15:21 ` [pbs-devel] [PATCH proxmox-backup v2 02/12] gc: chunk store: rework atime check and gc status into common helper Christian Ebner
2025-10-08 15:21 ` [pbs-devel] [PATCH proxmox-backup v2 03/12] chunk store: add unsafe signature to cache remove method Christian Ebner
2025-10-08 15:21 ` [pbs-devel] [PATCH proxmox-backup v2 04/12] local store cache: replace evicted cache chunks instead of truncate Christian Ebner
2025-10-08 15:21 ` [pbs-devel] [PATCH proxmox-backup v2 05/12] local store cache: serve response fetched from s3 backend Christian Ebner
2025-10-08 15:21 ` [pbs-devel] [PATCH proxmox-backup v2 06/12] local store cache: refactor fetch and insert of chunks for " Christian Ebner
2025-10-08 15:21 ` [pbs-devel] [PATCH proxmox-backup v2 07/12] local store cache: rework access cache fetching and insert logic Christian Ebner
2025-10-08 15:21 ` Christian Ebner [this message]
2025-10-08 15:21 ` [pbs-devel] [PATCH proxmox-backup v2 09/12] chunk store: refactor method for chunk insertion Christian Ebner
2025-10-08 15:21 ` [pbs-devel] [PATCH proxmox-backup v2 10/12] api: chunk upload: fix race between chunk backend upload and insert Christian Ebner
2025-10-08 15:21 ` [pbs-devel] [PATCH proxmox-backup v2 11/12] api: chunk upload: fix race with garbage collection for no-cache on s3 Christian Ebner
2025-10-08 15:21 ` [pbs-devel] [PATCH proxmox-backup v2 12/12] pull: guard chunk upload and only insert into cache after upload Christian Ebner

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20251008152125.849216-9-c.ebner@proxmox.com \
    --to=c.ebner@proxmox.com \
    --cc=pbs-devel@lists.proxmox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox
Service provided by Proxmox Server Solutions GmbH | Privacy | Legal