public inbox for pbs-devel@lists.proxmox.com
 help / color / mirror / Atom feed
* [pbs-devel] [RFC proxmox-backup] tape: media_catalog: add snapshot list cache for catalog
@ 2021-07-28  7:26 Dietmar Maurer
  0 siblings, 0 replies; only message in thread
From: Dietmar Maurer @ 2021-07-28  7:26 UTC (permalink / raw)
  To: pbs-devel

For some parts of the ui, we only need the snapshot list from the catalog,
and reading the whole catalog (can be multiple hundred MiB) is not
really necessary.

Instead, we write the list of snapshots into a seperate .index file. This file
is generated on demand and is much smaller and thus faster to read.
---

Like the patch from Dominik, but generates the cache on demand.

 src/tape/media_catalog_cache.rs | 106 ++++++++++++++++++++++++++++++++
 src/tape/mod.rs                 |   3 +
 2 files changed, 109 insertions(+)
 create mode 100644 src/tape/media_catalog_cache.rs

diff --git a/src/tape/media_catalog_cache.rs b/src/tape/media_catalog_cache.rs
new file mode 100644
index 00000000..23d3d78b
--- /dev/null
+++ b/src/tape/media_catalog_cache.rs
@@ -0,0 +1,106 @@
+use std::path::Path;
+use std::io::{BufRead, BufReader};
+
+use anyhow::{format_err, bail, Error};
+
+use proxmox::tools::fs::CreateOptions;
+
+use crate::tape::{MediaCatalog, MediaId};
+
+/// Returns a list of (store, snapshot) for a given MediaId
+///
+/// To speedup things for large catalogs, we cache the list of
+/// snapshots into a separate file.
+pub fn media_catalog_snapshot_list(
+    base_path: &Path,
+    media_id: &MediaId,
+) -> Result<Vec<(String, String)>, Error> {
+
+    let uuid = &media_id.label.uuid;
+
+    let mut cache_path = base_path.to_owned();
+    cache_path.push(uuid.to_string());
+    let mut catalog_path = cache_path.clone();
+    cache_path.set_extension("index");
+    catalog_path.set_extension("log");
+
+    let stat = match nix::sys::stat::stat(&catalog_path) {
+        Ok(stat) => stat,
+        Err(err) => bail!("unable to stat media catalog {:?} - {}", catalog_path, err),
+    };
+
+    let cache_id = format!("{:016X}-{:016X}-{:016X}", stat.st_ino, stat.st_size as u64, stat.st_mtime as u64);
+
+    match std::fs::OpenOptions::new().read(true).open(&cache_path) {
+        Ok(file) => {
+            let mut list = Vec::new();
+            let file = BufReader::new(file);
+            let mut lines = file.lines();
+            match lines.next() {
+                Some(Ok(id)) => {
+                    if id != cache_id { // cache is outdated - rewrite
+                        return write_snapshot_cache(base_path, media_id, &cache_path, &cache_id);
+                    }
+                }
+                _ => bail!("unable to read catalog cache firstline {:?}", cache_path),
+            }
+
+            for line in lines {
+                let mut line = line?;
+
+                let idx = line
+                    .find(':')
+                    .ok_or_else(|| format_err!("invalid line format (no store found)"))?;
+
+                let snapshot = line.split_off(idx + 1);
+                line.truncate(idx);
+                list.push((line, snapshot));
+            }
+
+            Ok(list)
+        }
+        Err(err) if err.kind() == std::io::ErrorKind::NotFound => {
+            write_snapshot_cache(base_path, media_id, &cache_path, &cache_id)
+        }
+        Err(err) => bail!("unable to open catalog cache - {}", err),
+    }
+}
+
+fn write_snapshot_cache(
+    base_path: &Path,
+    media_id: &MediaId,
+    cache_path: &Path,
+    cache_id: &str,
+) ->  Result<Vec<(String, String)>, Error> {
+
+    // open normal catalog and write cache
+
+    // fixme: this can fail if someone writes the catalog
+    let catalog = MediaCatalog::open(base_path, media_id, false, false)?;
+
+    let mut data = String::new();
+    data.push_str(cache_id);
+    data.push('\n');
+
+    let mut list = Vec::new();
+    for (store, content) in catalog.content() {
+        for snapshot in content.snapshot_index.keys() {
+            list.push((store.to_string(), snapshot.to_string()));
+            data.push_str(store);
+            data.push(':');
+            data.push_str(snapshot);
+            data.push('\n');
+        }
+    }
+
+    let backup_user = crate::backup::backup_user()?;
+    let options = CreateOptions::new().owner(backup_user.uid).group(backup_user.gid);
+
+    proxmox::tools::fs::replace_file(
+        cache_path,
+        data.as_bytes(),
+        options,
+    )?;
+
+    Ok(list)
+}
diff --git a/src/tape/mod.rs b/src/tape/mod.rs
index 8190e141..93c24719 100644
--- a/src/tape/mod.rs
+++ b/src/tape/mod.rs
@@ -42,6 +42,9 @@ pub use media_pool::*;
 mod media_catalog;
 pub use media_catalog::*;
 
+mod media_catalog_cache;
+pub use media_catalog_cache::*;
+
 mod pool_writer;
 pub use pool_writer::*;
 
-- 
2.30.2





^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2021-07-28  7:44 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-07-28  7:26 [pbs-devel] [RFC proxmox-backup] tape: media_catalog: add snapshot list cache for catalog Dietmar Maurer

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox
Service provided by Proxmox Server Solutions GmbH | Privacy | Legal