From: Christian Ebner <c.ebner@proxmox.com>
To: pbs-devel@lists.proxmox.com
Subject: [pbs-devel] [PATCH proxmox-backup v3 5/8] chunk store: add backend upload marker helpers for s3 backed stores
Date: Wed, 15 Oct 2025 18:40:05 +0200 [thread overview]
Message-ID: <20251015164008.975591-8-c.ebner@proxmox.com> (raw)
In-Reply-To: <20251015164008.975591-1-c.ebner@proxmox.com>
In preparation for fixing the race window between chunk upload/insert
and garbage collection for ongoing backup jobs.
Introduces helper methods to manipulate upload marker files for
chunks, guarded by the chunk store mutex for consistency in case of
concurrent operation.
Upload marker files obtain filenames extending the digest by the
`backend-upload` extension.
Signed-off-by: Christian Ebner <c.ebner@proxmox.com>
---
pbs-datastore/src/chunk_store.rs | 156 +++++++++++++++++++++++++++++--
pbs-datastore/src/datastore.rs | 10 ++
src/api2/config/datastore.rs | 2 +
3 files changed, 160 insertions(+), 8 deletions(-)
diff --git a/pbs-datastore/src/chunk_store.rs b/pbs-datastore/src/chunk_store.rs
index 65d74d68e..2693a1c11 100644
--- a/pbs-datastore/src/chunk_store.rs
+++ b/pbs-datastore/src/chunk_store.rs
@@ -2,12 +2,12 @@ use std::os::unix::fs::MetadataExt;
use std::os::unix::io::AsRawFd;
use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex};
-use std::time::Duration;
+use std::time::{Duration, SystemTime};
use anyhow::{bail, format_err, Context, Error};
use tracing::{info, warn};
-use pbs_api_types::{DatastoreFSyncLevel, GarbageCollectionStatus};
+use pbs_api_types::{DatastoreBackendType, DatastoreFSyncLevel, GarbageCollectionStatus};
use proxmox_io::ReadExt;
use proxmox_s3_client::S3Client;
use proxmox_sys::fs::{create_dir, create_path, file_type_from_file_stat, CreateOptions};
@@ -22,6 +22,10 @@ use crate::file_formats::{
};
use crate::DataBlob;
+/// Filename extension for local datastore cache marker files indicating
+/// the chunk being uploaded to the backend
+const BACKEND_UPLOAD_MARKER_EXT: &str = "backend-upload";
+
/// File system based chunk store
pub struct ChunkStore {
name: String, // used for error reporting
@@ -30,6 +34,7 @@ pub struct ChunkStore {
mutex: Mutex<()>,
locker: Option<Arc<Mutex<ProcessLocker>>>,
sync_level: DatastoreFSyncLevel,
+ datastore_backend_type: DatastoreBackendType,
}
// TODO: what about sysctl setting vm.vfs_cache_pressure (0 - 100) ?
@@ -77,6 +82,7 @@ impl ChunkStore {
mutex: Mutex::new(()),
locker: None,
sync_level: Default::default(),
+ datastore_backend_type: DatastoreBackendType::default(),
}
}
@@ -97,6 +103,7 @@ impl ChunkStore {
uid: nix::unistd::Uid,
gid: nix::unistd::Gid,
sync_level: DatastoreFSyncLevel,
+ datastore_backend_type: DatastoreBackendType,
) -> Result<Self, Error>
where
P: Into<PathBuf>,
@@ -151,7 +158,7 @@ impl ChunkStore {
}
}
- Self::open(name, base, sync_level)
+ Self::open(name, base, sync_level, datastore_backend_type)
}
fn lockfile_path<P: Into<PathBuf>>(base: P) -> PathBuf {
@@ -185,6 +192,7 @@ impl ChunkStore {
name: &str,
base: P,
sync_level: DatastoreFSyncLevel,
+ datastore_backend_type: DatastoreBackendType,
) -> Result<Self, Error> {
let base: PathBuf = base.into();
@@ -201,6 +209,7 @@ impl ChunkStore {
locker: Some(locker),
mutex: Mutex::new(()),
sync_level,
+ datastore_backend_type,
})
}
@@ -557,10 +566,114 @@ impl ChunkStore {
Ok(())
}
+ /// Insert a new backend upload marker or update the atime if pre-existing, signaling to garbage
+ /// collection that there is an in-progress upload for this chunk.
+ ///
+ /// Returns true if the marker was created or touched, returns false if the chunk has been
+ /// inserted since, the marker file not being created and the upload must be avoided.
+ pub(crate) fn touch_backend_upload_marker(&self, digest: &[u8; 32]) -> Result<bool, Error> {
+ if self.datastore_backend_type == DatastoreBackendType::Filesystem {
+ bail!("cannot create backend upload marker, not a cache store");
+ }
+ let (marker_path, digest_str) = self.chunk_backed_upload_marker_path(digest);
+
+ let _lock = self.mutex.lock();
+
+ if self.cond_touch_chunk(digest, false)? {
+ return Ok(false);
+ }
+
+ if let Err(err) = std::fs::File::options()
+ .write(true)
+ .create_new(true)
+ .open(&marker_path)
+ {
+ if err.kind() == std::io::ErrorKind::AlreadyExists {
+ // do not rely on any `File` method implementations such as set_time,
+ // rather update atime using the same logic as for chunks.
+ self.cond_touch_path(&marker_path, true).with_context(|| {
+ format!("failed to update access time of backend upload marker for chunk {digest_str}")
+ })?;
+ } else {
+ return Err(err).with_context(|| {
+ format!("failed to create backend upload marker for chunk {digest_str}")
+ });
+ }
+ }
+ Ok(true)
+ }
+
+ /// Sweep the chunk marker and upload chunk marker, gathering the access timestamp.
+ ///
+ /// The backup upload marker is removed if it has not been touched within
+ /// the cutoff time.
+ /// Note: caller must hold chunk store file lock.
+ pub(crate) unsafe fn sweep_chunk_marker_files(
+ &self,
+ digest: &[u8; 32],
+ min_atime: i64,
+ ) -> Result<i64, Error> {
+ let (chunk_path, _digest_str) = self.chunk_path(digest);
+ let atime = match std::fs::metadata(&chunk_path) {
+ Ok(stat) => stat
+ .accessed()?
+ .duration_since(SystemTime::UNIX_EPOCH)?
+ .as_secs() as i64,
+ Err(err) if err.kind() == std::io::ErrorKind::NotFound => {
+ let (marker_path, _digest_str) = self.chunk_backed_upload_marker_path(digest);
+ match std::fs::metadata(&marker_path) {
+ Ok(upload_marker_stat) => {
+ let atime = upload_marker_stat
+ .accessed()?
+ .duration_since(SystemTime::UNIX_EPOCH)?
+ .as_secs() as i64;
+ if atime < min_atime {
+ std::fs::remove_file(&marker_path)?
+ }
+ atime
+ }
+ Err(err) => {
+ if err.kind() != std::io::ErrorKind::NotFound {
+ bail!("failed to check backend upload marker: {err}");
+ } else {
+ 0
+ }
+ }
+ }
+ }
+ Err(err) => return Err(err.into()),
+ };
+ Ok(atime)
+ }
+
pub fn insert_chunk(&self, chunk: &DataBlob, digest: &[u8; 32]) -> Result<(bool, u64), Error> {
self.insert_chunk_impl(chunk, digest, |_, _| Ok(()))
}
+ /// Insert the chunk into the chunk store and remove the backend upload marker file for
+ /// datastores with non-filesystem backend.
+ pub(crate) fn insert_chunk_and_remove_upload_marker(
+ &self,
+ chunk: &DataBlob,
+ digest: &[u8; 32],
+ ) -> Result<(bool, u64), Error> {
+ self.insert_chunk_impl(chunk, digest, |digest, pre_existing| {
+ if self.datastore_backend_type != DatastoreBackendType::Filesystem {
+ let (marker_path, digest_str) = self.chunk_backed_upload_marker_path(digest);
+ if let Err(err) = std::fs::remove_file(marker_path) {
+ if !(pre_existing && err.kind() == std::io::ErrorKind::NotFound) {
+ bail!(
+ "vanished upload marker file on store '{}' for {digest_str} - {err}",
+ self.name,
+ )
+ }
+ }
+ }
+
+ Ok(())
+ })
+ }
+
fn insert_chunk_impl(
&self,
chunk: &DataBlob,
@@ -692,6 +805,15 @@ impl ChunkStore {
Ok(())
}
+ /// Generate file path for backend upload marker of given chunk digest.
+ fn chunk_backed_upload_marker_path(&self, digest: &[u8; 32]) -> (PathBuf, String) {
+ let (chunk_path, digest_str) = self.chunk_path(digest);
+ (
+ chunk_path.with_extension(BACKEND_UPLOAD_MARKER_EXT),
+ digest_str,
+ )
+ }
+
pub fn relative_path(&self, path: &Path) -> PathBuf {
// unwrap: only `None` in unit tests
assert!(self.locker.is_some());
@@ -777,14 +899,26 @@ fn test_chunk_store1() {
if let Err(_e) = std::fs::remove_dir_all(".testdir") { /* ignore */ }
- let chunk_store = ChunkStore::open("test", &path, DatastoreFSyncLevel::None);
+ let chunk_store = ChunkStore::open(
+ "test",
+ &path,
+ DatastoreFSyncLevel::None,
+ DatastoreBackendType::Filesystem,
+ );
assert!(chunk_store.is_err());
let user = nix::unistd::User::from_uid(nix::unistd::Uid::current())
.unwrap()
.unwrap();
- let chunk_store =
- ChunkStore::create("test", &path, user.uid, user.gid, DatastoreFSyncLevel::None).unwrap();
+ let chunk_store = ChunkStore::create(
+ "test",
+ &path,
+ user.uid,
+ user.gid,
+ DatastoreFSyncLevel::None,
+ DatastoreBackendType::Filesystem,
+ )
+ .unwrap();
let (chunk, digest) = crate::data_blob::DataChunkBuilder::new(&[0u8, 1u8])
.build()
@@ -796,8 +930,14 @@ fn test_chunk_store1() {
let (exists, _) = chunk_store.insert_chunk(&chunk, &digest).unwrap();
assert!(exists);
- let chunk_store =
- ChunkStore::create("test", &path, user.uid, user.gid, DatastoreFSyncLevel::None);
+ let chunk_store = ChunkStore::create(
+ "test",
+ &path,
+ user.uid,
+ user.gid,
+ DatastoreFSyncLevel::None,
+ DatastoreBackendType::Filesystem,
+ );
assert!(chunk_store.is_err());
if let Err(_e) = std::fs::remove_dir_all(".testdir") { /* ignore */ }
diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs
index 45e079f1a..ed994eb0b 100644
--- a/pbs-datastore/src/datastore.rs
+++ b/pbs-datastore/src/datastore.rs
@@ -336,10 +336,14 @@ impl DataStore {
DatastoreTuning::API_SCHEMA
.parse_property_string(config.tuning.as_deref().unwrap_or(""))?,
)?;
+ let backend_config: DatastoreBackendConfig =
+ config.backend.as_deref().unwrap_or("").parse()?;
+ let backend_type = backend_config.ty.unwrap_or_default();
Arc::new(ChunkStore::open(
name,
config.absolute_path(),
tuning.sync_level.unwrap_or_default(),
+ backend_type,
)?)
};
@@ -424,10 +428,16 @@ impl DataStore {
DatastoreTuning::API_SCHEMA
.parse_property_string(config.tuning.as_deref().unwrap_or(""))?,
)?;
+ let backend_config: DatastoreBackendConfig = serde_json::from_value(
+ DatastoreBackendConfig::API_SCHEMA
+ .parse_property_string(config.backend.as_deref().unwrap_or(""))?,
+ )?;
+ let backend_type = backend_config.ty.unwrap_or_default();
let chunk_store = ChunkStore::open(
&name,
config.absolute_path(),
tuning.sync_level.unwrap_or_default(),
+ backend_type,
)?;
let inner = Arc::new(Self::with_store_and_config(
Arc::new(chunk_store),
diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs
index 3b03c0466..541bd0a04 100644
--- a/src/api2/config/datastore.rs
+++ b/src/api2/config/datastore.rs
@@ -173,6 +173,7 @@ pub(crate) fn do_create_datastore(
&datastore.name,
&path,
tuning.sync_level.unwrap_or_default(),
+ backend_type,
)
})?
} else {
@@ -204,6 +205,7 @@ pub(crate) fn do_create_datastore(
backup_user.uid,
backup_user.gid,
tuning.sync_level.unwrap_or_default(),
+ backend_type,
)?
};
--
2.47.3
_______________________________________________
pbs-devel mailing list
pbs-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
next prev parent reply other threads:[~2025-10-15 16:40 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-10-15 16:39 [pbs-devel] [PATCH proxmox{, -backup} v3 00/10] s3 store: fix chunk upload/insert and GC race condition for s3 backend Christian Ebner
2025-10-15 16:39 ` [pbs-devel] [PATCH proxmox v3 1/2] s3-client: add exponential backoff time for upload retries Christian Ebner
2025-10-15 16:40 ` [pbs-devel] [PATCH proxmox v3 2/2] s3-client: add helper method to force final unconditional upload on Christian Ebner
2025-10-15 16:40 ` [pbs-devel] [PATCH proxmox-backup v3 1/8] api/pull: avoid failing on concurrent conditional chunk uploads Christian Ebner
2025-10-15 16:40 ` [pbs-devel] [PATCH proxmox-backup v3 2/8] datastore: GC: drop overly verbose info message during s3 chunk sweep Christian Ebner
2025-10-15 16:40 ` [pbs-devel] [PATCH proxmox-backup v3 3/8] GC: refactor atime gathering for local chunk markers with s3 backend Christian Ebner
2025-10-15 16:40 ` [pbs-devel] [PATCH proxmox-backup v3 4/8] chunk store: refactor method for chunk insertion Christian Ebner
2025-10-15 16:40 ` Christian Ebner [this message]
2025-10-15 16:40 ` [pbs-devel] [PATCH proxmox-backup v3 6/8] api: chunk upload: fix race between chunk backend upload and insert Christian Ebner
2025-10-15 16:40 ` [pbs-devel] [PATCH proxmox-backup v3 7/8] api: chunk upload: fix race with garbage collection for no-cache on s3 Christian Ebner
2025-10-15 16:40 ` [pbs-devel] [PATCH proxmox-backup v3 8/8] pull: guard chunk upload and only insert into cache after upload Christian Ebner
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251015164008.975591-8-c.ebner@proxmox.com \
--to=c.ebner@proxmox.com \
--cc=pbs-devel@lists.proxmox.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox