From: Christian Ebner <c.ebner@proxmox.com>
To: pbs-devel@lists.proxmox.com
Subject: [pbs-devel] [PATCH proxmox-backup v6 27/37] tools: lru cache: add removed callback for evicted cache nodes
Date: Tue, 8 Jul 2025 19:01:04 +0200 [thread overview]
Message-ID: <20250708170114.1556057-37-c.ebner@proxmox.com> (raw)
In-Reply-To: <20250708170114.1556057-1-c.ebner@proxmox.com>
Add a callback function to be executed on evicted cache nodes. The
callback gets the key of the removed node, allowing to externally act
based on that value.
Since the callback might fail, extend the current LRU cache api to
return an error on insert, covering the error for the `removed`
callback.
Async lru cache, callsites and tests are adapted to include the
additional callback parameter accordingly.
Signed-off-by: Christian Ebner <c.ebner@proxmox.com>
---
pbs-datastore/src/cached_chunk_reader.rs | 6 +++-
pbs-datastore/src/datastore.rs | 2 +-
pbs-datastore/src/dynamic_index.rs | 1 +
pbs-tools/src/async_lru_cache.rs | 23 +++++++++----
pbs-tools/src/lru_cache.rs | 42 +++++++++++++++---------
5 files changed, 50 insertions(+), 24 deletions(-)
diff --git a/pbs-datastore/src/cached_chunk_reader.rs b/pbs-datastore/src/cached_chunk_reader.rs
index be7f2a1e2..95ac23a54 100644
--- a/pbs-datastore/src/cached_chunk_reader.rs
+++ b/pbs-datastore/src/cached_chunk_reader.rs
@@ -81,7 +81,11 @@ impl<I: IndexFile, R: AsyncReadChunk + Send + Sync + 'static> CachedChunkReader<
let info = self.index.chunk_info(chunk.0).unwrap();
// will never be None, see AsyncChunkCacher
- let data = self.cache.access(info.digest, &self.cacher).await?.unwrap();
+ let data = self
+ .cache
+ .access(info.digest, &self.cacher, |_| Ok(()))
+ .await?
+ .unwrap();
let want_bytes = ((info.range.end - cur_offset) as usize).min(size - read);
let slice = &mut buf[read..(read + want_bytes)];
diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs
index 205ff7b6c..c1ba2dcea 100644
--- a/pbs-datastore/src/datastore.rs
+++ b/pbs-datastore/src/datastore.rs
@@ -1221,7 +1221,7 @@ impl DataStore {
// Avoid multiple expensive atime updates by utimensat
if let Some(chunk_lru_cache) = chunk_lru_cache {
- if chunk_lru_cache.insert(*digest, ()) {
+ if chunk_lru_cache.insert(*digest, (), |_| Ok(()))? {
continue;
}
}
diff --git a/pbs-datastore/src/dynamic_index.rs b/pbs-datastore/src/dynamic_index.rs
index b1d85a049..83e13b311 100644
--- a/pbs-datastore/src/dynamic_index.rs
+++ b/pbs-datastore/src/dynamic_index.rs
@@ -599,6 +599,7 @@ impl<S: ReadChunk> BufferedDynamicReader<S> {
store: &mut self.store,
index: &self.index,
},
+ |_| Ok(()),
)?
.ok_or_else(|| format_err!("chunk not found by cacher"))?;
diff --git a/pbs-tools/src/async_lru_cache.rs b/pbs-tools/src/async_lru_cache.rs
index c43b87717..141114933 100644
--- a/pbs-tools/src/async_lru_cache.rs
+++ b/pbs-tools/src/async_lru_cache.rs
@@ -42,7 +42,16 @@ impl<K: std::cmp::Eq + std::hash::Hash + Copy, V: Clone + Send + 'static> AsyncL
/// Access an item either via the cache or by calling cacher.fetch. A return value of Ok(None)
/// means the item requested has no representation, Err(_) means a call to fetch() failed,
/// regardless of whether it was initiated by this call or a previous one.
- pub async fn access(&self, key: K, cacher: &dyn AsyncCacher<K, V>) -> Result<Option<V>, Error> {
+ /// Calls the removed callback on the evicted item, if any.
+ pub async fn access<F>(
+ &self,
+ key: K,
+ cacher: &dyn AsyncCacher<K, V>,
+ removed: F,
+ ) -> Result<Option<V>, Error>
+ where
+ F: Fn(K) -> Result<(), Error>,
+ {
let (owner, result_fut) = {
// check if already requested
let mut maps = self.maps.lock().unwrap();
@@ -71,7 +80,7 @@ impl<K: std::cmp::Eq + std::hash::Hash + Copy, V: Clone + Send + 'static> AsyncL
// this call was the one initiating the request, put into LRU and remove from map
let mut maps = self.maps.lock().unwrap();
if let Ok(Some(ref value)) = result {
- maps.0.insert(key, value.clone());
+ maps.0.insert(key, value.clone(), removed)?;
}
maps.1.remove(&key);
}
@@ -106,15 +115,15 @@ mod test {
let cache: AsyncLruCache<i32, String> = AsyncLruCache::new(2);
assert_eq!(
- cache.access(10, &cacher).await.unwrap(),
+ cache.access(10, &cacher, |_| Ok(())).await.unwrap(),
Some("x10".to_string())
);
assert_eq!(
- cache.access(20, &cacher).await.unwrap(),
+ cache.access(20, &cacher, |_| Ok(())).await.unwrap(),
Some("x20".to_string())
);
assert_eq!(
- cache.access(30, &cacher).await.unwrap(),
+ cache.access(30, &cacher, |_| Ok(())).await.unwrap(),
Some("x30".to_string())
);
@@ -123,14 +132,14 @@ mod test {
tokio::spawn(async move {
let cacher = TestAsyncCacher { prefix: "y" };
assert_eq!(
- c.access(40, &cacher).await.unwrap(),
+ c.access(40, &cacher, |_| Ok(())).await.unwrap(),
Some("y40".to_string())
);
});
}
assert_eq!(
- cache.access(20, &cacher).await.unwrap(),
+ cache.access(20, &cacher, |_| Ok(())).await.unwrap(),
Some("x20".to_string())
);
});
diff --git a/pbs-tools/src/lru_cache.rs b/pbs-tools/src/lru_cache.rs
index 94757bbf7..a7aea6528 100644
--- a/pbs-tools/src/lru_cache.rs
+++ b/pbs-tools/src/lru_cache.rs
@@ -60,10 +60,10 @@ impl<K, V> CacheNode<K, V> {
/// assert_eq!(cache.get_mut(1), None);
/// assert_eq!(cache.len(), 0);
///
-/// cache.insert(1, 1);
-/// cache.insert(2, 2);
-/// cache.insert(3, 3);
-/// cache.insert(4, 4);
+/// cache.insert(1, 1, |_| Ok(()));
+/// cache.insert(2, 2, |_| Ok(()));
+/// cache.insert(3, 3, |_| Ok(()));
+/// cache.insert(4, 4, |_| Ok(()));
/// assert_eq!(cache.len(), 3);
///
/// assert_eq!(cache.get_mut(1), None);
@@ -77,9 +77,9 @@ impl<K, V> CacheNode<K, V> {
/// assert_eq!(cache.len(), 0);
/// assert_eq!(cache.get_mut(2), None);
/// // access will fill in missing cache entry by fetching from LruCacher
-/// assert_eq!(cache.access(2, &mut LruCacher {}).unwrap(), Some(&mut 2));
+/// assert_eq!(cache.access(2, &mut LruCacher {}, |_| Ok(())).unwrap(), Some(&mut 2));
///
-/// cache.insert(1, 1);
+/// cache.insert(1, 1, |_| Ok(()));
/// assert_eq!(cache.get_mut(1), Some(&mut 1));
///
/// cache.clear();
@@ -135,7 +135,10 @@ impl<K: std::cmp::Eq + std::hash::Hash + Copy, V> LruCache<K, V> {
/// Insert or update an entry identified by `key` with the given `value`.
/// This entry is placed as the most recently used node at the head.
- pub fn insert(&mut self, key: K, value: V) -> bool {
+ pub fn insert<F>(&mut self, key: K, value: V, removed: F) -> Result<bool, anyhow::Error>
+ where
+ F: Fn(K) -> Result<(), anyhow::Error>,
+ {
match self.map.entry(key) {
Entry::Occupied(mut o) => {
// Node present, update value
@@ -144,7 +147,7 @@ impl<K: std::cmp::Eq + std::hash::Hash + Copy, V> LruCache<K, V> {
let mut node = unsafe { Box::from_raw(node_ptr) };
node.value = value;
let _node_ptr = Box::into_raw(node);
- true
+ Ok(true)
}
Entry::Vacant(v) => {
// Node not present, insert a new one
@@ -160,9 +163,11 @@ impl<K: std::cmp::Eq + std::hash::Hash + Copy, V> LruCache<K, V> {
// avoid borrow conflict. This means there are temporarily
// self.capacity + 1 cache nodes.
if self.map.len() > self.capacity {
- self.pop_tail();
+ if let Some(removed_node) = self.pop_tail() {
+ removed(removed_node)?;
+ }
}
- false
+ Ok(false)
}
}
}
@@ -176,11 +181,12 @@ impl<K: std::cmp::Eq + std::hash::Hash + Copy, V> LruCache<K, V> {
}
/// Remove the least recently used node from the cache.
- fn pop_tail(&mut self) {
+ fn pop_tail(&mut self) -> Option<K> {
if let Some(old_tail) = self.list.pop_tail() {
// Remove HashMap entry for old tail
- self.map.remove(&old_tail.key);
+ return self.map.remove(&old_tail.key).map(|_| old_tail.key);
}
+ None
}
/// Get a mutable reference to the value identified by `key`.
@@ -208,11 +214,15 @@ impl<K: std::cmp::Eq + std::hash::Hash + Copy, V> LruCache<K, V> {
/// value.
/// If fetch returns a value, it is inserted as the most recently used entry
/// in the cache.
- pub fn access<'a>(
+ pub fn access<'a, F>(
&'a mut self,
key: K,
cacher: &mut dyn Cacher<K, V>,
- ) -> Result<Option<&'a mut V>, anyhow::Error> {
+ removed: F,
+ ) -> Result<Option<&'a mut V>, anyhow::Error>
+ where
+ F: Fn(K) -> Result<(), anyhow::Error>,
+ {
match self.map.entry(key) {
Entry::Occupied(mut o) => {
// Cache hit, birng node to front of list
@@ -236,7 +246,9 @@ impl<K: std::cmp::Eq + std::hash::Hash + Copy, V> LruCache<K, V> {
// avoid borrow conflict. This means there are temporarily
// self.capacity + 1 cache nodes.
if self.map.len() > self.capacity {
- self.pop_tail();
+ if let Some(removed_node) = self.pop_tail() {
+ removed(removed_node)?;
+ }
}
}
}
--
2.47.2
_______________________________________________
pbs-devel mailing list
pbs-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
next prev parent reply other threads:[~2025-07-08 17:02 UTC|newest]
Thread overview: 56+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-07-08 17:00 [pbs-devel] [PATCH proxmox{, -backup} v6 00/46] fix #2943: S3 storage backend for datastores Christian Ebner
2025-07-08 17:00 ` [pbs-devel] [PATCH proxmox v6 1/9] s3 client: add crate for AWS s3 compatible object store client Christian Ebner
2025-07-08 17:00 ` [pbs-devel] [PATCH proxmox v6 2/9] s3 client: implement AWS signature v4 request authentication Christian Ebner
2025-07-08 17:00 ` [pbs-devel] [PATCH proxmox v6 3/9] s3 client: add dedicated type for s3 object keys Christian Ebner
2025-07-08 17:00 ` [pbs-devel] [PATCH proxmox v6 4/9] s3 client: add type for last modified timestamp in responses Christian Ebner
2025-07-08 17:00 ` [pbs-devel] [PATCH proxmox v6 5/9] s3 client: add helper to parse http date headers Christian Ebner
2025-07-08 17:00 ` [pbs-devel] [PATCH proxmox v6 6/9] s3 client: implement methods to operate on s3 objects in bucket Christian Ebner
2025-07-09 10:04 ` Christian Ebner
2025-07-08 17:00 ` [pbs-devel] [PATCH proxmox v6 7/9] s3 client: add example usage for basic operations Christian Ebner
2025-07-08 17:00 ` [pbs-devel] [PATCH proxmox v6 8/9] pbs-api-types: extend datastore config by backend config enum Christian Ebner
2025-07-08 17:00 ` [pbs-devel] [PATCH proxmox v6 9/9] pbs-api-types: maintenance: add new maintenance mode S3 refresh Christian Ebner
2025-07-08 17:00 ` [pbs-devel] [PATCH proxmox-backup v6 01/37] datastore: add helpers for path/digest to s3 object key conversion Christian Ebner
2025-07-08 17:00 ` [pbs-devel] [PATCH proxmox-backup v6 02/37] config: introduce s3 object store client configuration Christian Ebner
2025-07-08 17:00 ` [pbs-devel] [PATCH proxmox-backup v6 03/37] api: config: implement endpoints to manipulate and list s3 configs Christian Ebner
2025-07-08 17:00 ` [pbs-devel] [PATCH proxmox-backup v6 04/37] api: datastore: check s3 backend bucket access on datastore create Christian Ebner
2025-07-08 17:00 ` [pbs-devel] [PATCH proxmox-backup v6 05/37] api/cli: add endpoint and command to check s3 client connection Christian Ebner
2025-07-08 17:00 ` [pbs-devel] [PATCH proxmox-backup v6 06/37] datastore: allow to get the backend for a datastore Christian Ebner
2025-07-08 17:00 ` [pbs-devel] [PATCH proxmox-backup v6 07/37] api: backup: store datastore backend in runtime environment Christian Ebner
2025-07-08 17:00 ` [pbs-devel] [PATCH proxmox-backup v6 08/37] api: backup: conditionally upload chunks to s3 object store backend Christian Ebner
2025-07-08 17:00 ` [pbs-devel] [PATCH proxmox-backup v6 09/37] api: backup: conditionally upload blobs " Christian Ebner
2025-07-08 17:00 ` [pbs-devel] [PATCH proxmox-backup v6 10/37] api: backup: conditionally upload indices " Christian Ebner
2025-07-09 7:55 ` Christian Ebner
2025-07-08 17:00 ` [pbs-devel] [PATCH proxmox-backup v6 11/37] api: backup: conditionally upload manifest " Christian Ebner
2025-07-08 17:00 ` [pbs-devel] [PATCH proxmox-backup v6 12/37] sync: pull: conditionally upload content to s3 backend Christian Ebner
2025-07-08 17:00 ` [pbs-devel] [PATCH proxmox-backup v6 13/37] api: reader: fetch chunks based on datastore backend Christian Ebner
2025-07-08 17:00 ` [pbs-devel] [PATCH proxmox-backup v6 14/37] datastore: local chunk reader: read chunks based on backend Christian Ebner
2025-07-08 17:00 ` [pbs-devel] [PATCH proxmox-backup v6 15/37] verify worker: add datastore backed to verify worker Christian Ebner
2025-07-08 17:00 ` [pbs-devel] [PATCH proxmox-backup v6 16/37] verify: implement chunk verification for stores with s3 backend Christian Ebner
2025-07-08 17:00 ` [pbs-devel] [PATCH proxmox-backup v6 17/37] datastore: create namespace marker in " Christian Ebner
2025-07-08 17:00 ` [pbs-devel] [PATCH proxmox-backup v6 18/37] datastore: create/delete protected marker file on s3 storage backend Christian Ebner
2025-07-08 17:00 ` [pbs-devel] [PATCH proxmox-backup v6 19/37] datastore: prune groups/snapshots from s3 object store backend Christian Ebner
2025-07-08 17:00 ` [pbs-devel] [PATCH proxmox-backup v6 20/37] datastore: get and set owner for s3 " Christian Ebner
2025-07-08 17:00 ` [pbs-devel] [PATCH proxmox-backup v6 21/37] datastore: implement garbage collection for s3 backend Christian Ebner
2025-07-10 6:59 ` Thomas Lamprecht
2025-07-10 7:36 ` Christian Ebner
2025-07-10 9:47 ` Christian Ebner
2025-07-10 11:15 ` Christian Ebner
2025-07-08 17:00 ` [pbs-devel] [PATCH proxmox-backup v6 22/37] ui: add datastore type selector and reorganize component layout Christian Ebner
2025-07-08 17:01 ` [pbs-devel] [PATCH proxmox-backup v6 23/37] ui: add s3 client edit window for configuration create/edit Christian Ebner
2025-07-08 17:01 ` [pbs-devel] [PATCH proxmox-backup v6 24/37] ui: add s3 client view for configuration Christian Ebner
2025-07-08 17:01 ` [pbs-devel] [PATCH proxmox-backup v6 25/37] ui: expose the s3 client view in the navigation tree Christian Ebner
2025-07-08 17:01 ` [pbs-devel] [PATCH proxmox-backup v6 26/37] ui: add s3 client selector and bucket field for s3 backend setup Christian Ebner
2025-07-08 17:01 ` Christian Ebner [this message]
2025-07-08 17:01 ` [pbs-devel] [PATCH proxmox-backup v6 28/37] tools: async lru cache: implement insert, remove and contains methods Christian Ebner
2025-07-08 17:01 ` [pbs-devel] [PATCH proxmox-backup v6 29/37] datastore: add local datastore cache for network attached storages Christian Ebner
2025-07-10 10:05 ` Thomas Lamprecht
2025-07-10 10:30 ` Christian Ebner
2025-07-08 17:01 ` [pbs-devel] [PATCH proxmox-backup v6 30/37] api: backup: use local datastore cache on s3 backend chunk upload Christian Ebner
2025-07-08 17:01 ` [pbs-devel] [PATCH proxmox-backup v6 31/37] api: reader: use local datastore cache on s3 backend chunk fetching Christian Ebner
2025-07-08 17:01 ` [pbs-devel] [PATCH proxmox-backup v6 32/37] datastore: local chunk reader: get cached chunk from local cache store Christian Ebner
2025-07-08 17:01 ` [pbs-devel] [PATCH proxmox-backup v6 33/37] api: backup: add no-cache flag to bypass local datastore cache Christian Ebner
2025-07-08 17:01 ` [pbs-devel] [PATCH proxmox-backup v6 34/37] api/datastore: implement refresh endpoint for stores with s3 backend Christian Ebner
2025-07-08 17:01 ` [pbs-devel] [PATCH proxmox-backup v6 35/37] cli: add dedicated subcommand for datastore s3 refresh Christian Ebner
2025-07-08 17:01 ` [pbs-devel] [PATCH proxmox-backup v6 36/37] ui: render s3 refresh as valid maintenance type and task description Christian Ebner
2025-07-08 17:01 ` [pbs-devel] [PATCH proxmox-backup v6 37/37] ui: expose s3 refresh button for datastores backed by object store Christian Ebner
2025-07-10 17:09 ` [pbs-devel] superseded: [PATCH proxmox{, -backup} v6 00/46] fix #2943: S3 storage backend for datastores Christian Ebner
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250708170114.1556057-37-c.ebner@proxmox.com \
--to=c.ebner@proxmox.com \
--cc=pbs-devel@lists.proxmox.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.
Service provided by Proxmox Server Solutions GmbH | Privacy | Legal