From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [IPv6:2a01:7e0:0:424::9]) by lore.proxmox.com (Postfix) with ESMTPS id 417E81FF183 for ; Wed, 8 Oct 2025 17:22:12 +0200 (CEST) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id 304CFC55F; Wed, 8 Oct 2025 17:22:16 +0200 (CEST) From: Christian Ebner To: pbs-devel@lists.proxmox.com Date: Wed, 8 Oct 2025 17:21:21 +0200 Message-ID: <20251008152125.849216-9-c.ebner@proxmox.com> X-Mailer: git-send-email 2.47.3 In-Reply-To: <20251008152125.849216-1-c.ebner@proxmox.com> References: <20251008152125.849216-1-c.ebner@proxmox.com> MIME-Version: 1.0 X-Bm-Milter-Handled: 55990f41-d878-4baa-be0a-ee34c49e34d2 X-Bm-Transport-Timestamp: 1759936867986 X-SPAM-LEVEL: Spam detection results: 0 AWL 0.043 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record Subject: [pbs-devel] [PATCH proxmox-backup v2 08/12] local store cache: drop obsolete cacher implementation X-BeenThere: pbs-devel@lists.proxmox.com X-Mailman-Version: 2.1.29 Precedence: list List-Id: Proxmox Backup Server development discussion List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: Proxmox Backup Server development discussion Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Errors-To: pbs-devel-bounces@lists.proxmox.com Sender: "pbs-devel" Since the local store cache uses no longer the inner lru cache on chunk access, the S3Cacher implementation is obsolete and can be replaced by the S3Client directly when fetching is required. Drop all now obsolete code and adapt callsites accordingly. Signed-off-by: Christian Ebner --- pbs-datastore/src/datastore.rs | 12 ----- pbs-datastore/src/local_chunk_reader.rs | 27 +++++------- .../src/local_datastore_lru_cache.rs | 44 ++----------------- src/api2/reader/mod.rs | 34 ++++++-------- 4 files changed, 29 insertions(+), 88 deletions(-) diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs index acf22e9b0..a6b17e3c3 100644 --- a/pbs-datastore/src/datastore.rs +++ b/pbs-datastore/src/datastore.rs @@ -41,7 +41,6 @@ use crate::dynamic_index::{DynamicIndexReader, DynamicIndexWriter}; use crate::fixed_index::{FixedIndexReader, FixedIndexWriter}; use crate::hierarchy::{ListGroups, ListGroupsType, ListNamespaces, ListNamespacesRecursive}; use crate::index::IndexFile; -use crate::local_datastore_lru_cache::S3Cacher; use crate::s3::S3_CONTENT_PREFIX; use crate::task_tracking::{self, update_active_operations}; use crate::{DataBlob, LocalDatastoreLruCache}; @@ -291,17 +290,6 @@ impl DataStore { Ok(()) } - /// Returns the cacher for datastores backed by S3 object stores. - /// This allows to fetch chunks to the local cache store on-demand. - pub fn cacher(&self) -> Result, Error> { - self.backend().map(|backend| match backend { - DatastoreBackend::S3(s3_client) => { - Some(S3Cacher::new(s3_client, self.inner.chunk_store.clone())) - } - DatastoreBackend::Filesystem => None, - }) - } - pub fn lookup_datastore( name: &str, operation: Option, diff --git a/pbs-datastore/src/local_chunk_reader.rs b/pbs-datastore/src/local_chunk_reader.rs index 36bce1552..c50a63fb7 100644 --- a/pbs-datastore/src/local_chunk_reader.rs +++ b/pbs-datastore/src/local_chunk_reader.rs @@ -70,13 +70,11 @@ impl ReadChunk for LocalChunkReader { DatastoreBackend::S3(s3_client) => match self.store.cache() { None => proxmox_async::runtime::block_on(fetch(Arc::clone(s3_client), digest))?, Some(cache) => { - let mut cacher = self - .store - .cacher()? - .ok_or(format_err!("no cacher for datastore"))?; - proxmox_async::runtime::block_on(cache.access(digest, &mut cacher))?.ok_or( - format_err!("unable to access chunk with digest {}", hex::encode(digest)), - )? + proxmox_async::runtime::block_on(cache.access(digest, s3_client.clone()))? + .ok_or(format_err!( + "unable to access chunk with digest {}", + hex::encode(digest) + ))? } }, }; @@ -109,14 +107,13 @@ impl AsyncReadChunk for LocalChunkReader { DatastoreBackend::S3(s3_client) => match self.store.cache() { None => fetch(Arc::clone(s3_client), digest).await?, Some(cache) => { - let mut cacher = self - .store - .cacher()? - .ok_or(format_err!("no cacher for datastore"))?; - cache.access(digest, &mut cacher).await?.ok_or(format_err!( - "unable to access chunk with digest {}", - hex::encode(digest) - ))? + cache + .access(digest, s3_client.clone()) + .await? + .ok_or(format_err!( + "unable to access chunk with digest {}", + hex::encode(digest) + ))? } }, }; diff --git a/pbs-datastore/src/local_datastore_lru_cache.rs b/pbs-datastore/src/local_datastore_lru_cache.rs index f03265a5b..fe3b51a55 100644 --- a/pbs-datastore/src/local_datastore_lru_cache.rs +++ b/pbs-datastore/src/local_datastore_lru_cache.rs @@ -1,53 +1,17 @@ //! Use a local datastore as cache for operations on a datastore attached via //! a network layer (e.g. via the S3 backend). -use std::future::Future; use std::sync::Arc; use anyhow::{bail, Error}; use http_body_util::BodyExt; -use pbs_tools::async_lru_cache::{AsyncCacher, AsyncLruCache}; +use pbs_tools::async_lru_cache::AsyncLruCache; use proxmox_s3_client::S3Client; use crate::ChunkStore; use crate::DataBlob; -#[derive(Clone)] -/// Cacher to fetch chunks from the S3 object store and insert them in the local cache store. -pub struct S3Cacher { - client: Arc, - store: Arc, -} - -impl AsyncCacher<[u8; 32], ()> for S3Cacher { - fn fetch( - &self, - key: [u8; 32], - ) -> Box, Error>> + Send + 'static> { - let client = Arc::clone(&self.client); - let store = Arc::clone(&self.store); - Box::new(async move { - let object_key = crate::s3::object_key_from_digest(&key)?; - match client.get_object(object_key).await? { - None => bail!("could not fetch object with key {}", hex::encode(key)), - Some(response) => { - let bytes = response.content.collect().await?.to_bytes(); - let chunk = DataBlob::from_raw(bytes.to_vec())?; - store.insert_chunk(&chunk, &key)?; - Ok(Some(())) - } - } - }) - } -} - -impl S3Cacher { - pub fn new(client: Arc, store: Arc) -> Self { - Self { client, store } - } -} - /// LRU cache using local datastore for caching chunks /// /// Uses a LRU cache, but without storing the values in-memory but rather @@ -100,7 +64,7 @@ impl LocalDatastoreLruCache { pub async fn access( &self, digest: &[u8; 32], - cacher: &mut S3Cacher, + client: Arc, ) -> Result, Error> { let (path, _digest_str) = self.store.chunk_path(digest); match std::fs::File::open(&path) { @@ -116,7 +80,7 @@ impl LocalDatastoreLruCache { use std::io::Seek; // Check if file is empty marker file, try fetching content if so if file.seek(std::io::SeekFrom::End(0))? == 0 { - let chunk = self.fetch_and_insert(cacher.client.clone(), digest).await?; + let chunk = self.fetch_and_insert(client.clone(), digest).await?; Ok(Some(chunk)) } else { Err(err) @@ -126,7 +90,7 @@ impl LocalDatastoreLruCache { Err(err) => { // Failed to open file, missing if err.kind() == std::io::ErrorKind::NotFound { - let chunk = self.fetch_and_insert(cacher.client.clone(), digest).await?; + let chunk = self.fetch_and_insert(client.clone(), digest).await?; Ok(Some(chunk)) } else { Err(Error::from(err)) diff --git a/src/api2/reader/mod.rs b/src/api2/reader/mod.rs index 846493c61..155e862c6 100644 --- a/src/api2/reader/mod.rs +++ b/src/api2/reader/mod.rs @@ -327,28 +327,20 @@ fn download_chunk( let body = match &env.backend { DatastoreBackend::Filesystem => load_from_filesystem(env, &digest)?, - DatastoreBackend::S3(s3_client) => { - match env.datastore.cache() { - None => fetch_from_object_store(s3_client, &digest).await?, - Some(cache) => { - let mut cacher = env - .datastore - .cacher()? - .ok_or(format_err!("no cacher for datastore"))?; - // Download from object store, insert to local cache store and read from - // file. Can this be optimized? - let chunk = - cache - .access(&digest, &mut cacher) - .await? - .ok_or(format_err!( - "unable to access chunk with digest {}", - hex::encode(digest) - ))?; - Body::from(chunk.raw_data().to_owned()) - } + DatastoreBackend::S3(s3_client) => match env.datastore.cache() { + None => fetch_from_object_store(s3_client, &digest).await?, + Some(cache) => { + let chunk = + cache + .access(&digest, s3_client.clone()) + .await? + .ok_or(format_err!( + "unable to access chunk with digest {}", + hex::encode(digest) + ))?; + Body::from(chunk.raw_data().to_owned()) } - } + }, }; // fixme: set other headers ? -- 2.47.3 _______________________________________________ pbs-devel mailing list pbs-devel@lists.proxmox.com https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel