From: Christian Ebner <c.ebner@proxmox.com>
To: pbs-devel@lists.proxmox.com
Subject: [PATCH proxmox-backup 11/20] fix #7251: api: push: encrypt snapshots using configured encryption key
Date: Wed, 1 Apr 2026 09:55:12 +0200 [thread overview]
Message-ID: <20260401075521.176354-12-c.ebner@proxmox.com> (raw)
In-Reply-To: <20260401075521.176354-1-c.ebner@proxmox.com>
If an encryption key id is provided in the push parameters, the key
is loaded at the start of the push sync job and passed along via the
crypt config.
Backup snapshots which are already encrypted or partially encrypted
snapshots are skipped to avoid mixing of contents. Pre-existing
snapshots on the remote are however not checked to match the key.
Special care has to be taken when tracking the already encountered
chunks. For regular push sync jobs chunk upload is optimized to skip
re-upload of chunks from the previous snapshot (if any) and new, but
already encountered chunks for the current group sync. Since the chunks
now have to be re-processes anyways, do not load the chunks from the
previous snapshot into memory if they need re-encryption and keep track
of the unencrypted -> encrypted digest mapping in a hashmap to avoid
re-processing. This might be optimized in the future by e.g. move the
tracking to an LRU cache, which however requrires more carefully
evaluaton of memory consumption.
Fixes: https://bugzilla.proxmox.com/show_bug.cgi?id=7251
Signed-off-by: Christian Ebner <c.ebner@proxmox.com>
---
src/api2/push.rs | 14 ++--
src/server/push.rs | 167 ++++++++++++++++++++++++++++++++++-----------
src/server/sync.rs | 1 +
3 files changed, 137 insertions(+), 45 deletions(-)
diff --git a/src/api2/push.rs b/src/api2/push.rs
index e5edc13e0..79f220afd 100644
--- a/src/api2/push.rs
+++ b/src/api2/push.rs
@@ -3,10 +3,10 @@ use futures::{future::FutureExt, select};
use pbs_api_types::{
Authid, BackupNamespace, GroupFilter, RateLimitConfig, DATASTORE_SCHEMA,
- GROUP_FILTER_LIST_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, PRIV_DATASTORE_BACKUP,
- PRIV_DATASTORE_READ, PRIV_REMOTE_DATASTORE_BACKUP, PRIV_REMOTE_DATASTORE_PRUNE,
- REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA, SYNC_ENCRYPTED_ONLY_SCHEMA,
- SYNC_VERIFIED_ONLY_SCHEMA, TRANSFER_LAST_SCHEMA,
+ ENCRYPTION_KEY_ID_SCHEMA, GROUP_FILTER_LIST_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA,
+ PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_READ, PRIV_REMOTE_DATASTORE_BACKUP,
+ PRIV_REMOTE_DATASTORE_PRUNE, REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA,
+ SYNC_ENCRYPTED_ONLY_SCHEMA, SYNC_VERIFIED_ONLY_SCHEMA, TRANSFER_LAST_SCHEMA,
};
use proxmox_rest_server::WorkerTask;
use proxmox_router::{Permission, Router, RpcEnvironment};
@@ -108,6 +108,10 @@ fn check_push_privs(
schema: TRANSFER_LAST_SCHEMA,
optional: true,
},
+ "encryption-key": {
+ schema: ENCRYPTION_KEY_ID_SCHEMA,
+ optional: true,
+ },
},
},
access: {
@@ -133,6 +137,7 @@ async fn push(
verified_only: Option<bool>,
limit: RateLimitConfig,
transfer_last: Option<usize>,
+ encryption_key: Option<String>,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
@@ -164,6 +169,7 @@ async fn push(
verified_only,
limit,
transfer_last,
+ encryption_key,
)
.await?;
diff --git a/src/server/push.rs b/src/server/push.rs
index 269a4c386..beacc0819 100644
--- a/src/server/push.rs
+++ b/src/server/push.rs
@@ -1,6 +1,6 @@
//! Sync datastore by pushing contents to remote server
-use std::collections::HashSet;
+use std::collections::{HashMap, HashSet};
use std::sync::{Arc, Mutex};
use anyhow::{bail, format_err, Context, Error};
@@ -11,22 +11,23 @@ use tracing::{info, warn};
use pbs_api_types::{
print_store_and_ns, ApiVersion, ApiVersionInfo, ArchiveType, Authid, BackupArchiveName,
- BackupDir, BackupGroup, BackupGroupDeleteStats, BackupNamespace, GroupFilter, GroupListItem,
- NamespaceListItem, Operation, RateLimitConfig, Remote, SnapshotListItem, CLIENT_LOG_BLOB_NAME,
- MANIFEST_BLOB_NAME, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_READ, PRIV_REMOTE_DATASTORE_BACKUP,
- PRIV_REMOTE_DATASTORE_MODIFY, PRIV_REMOTE_DATASTORE_PRUNE,
+ BackupDir, BackupGroup, BackupGroupDeleteStats, BackupNamespace, CryptMode, GroupFilter,
+ GroupListItem, NamespaceListItem, Operation, RateLimitConfig, Remote, SnapshotListItem,
+ CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_READ,
+ PRIV_REMOTE_DATASTORE_BACKUP, PRIV_REMOTE_DATASTORE_MODIFY, PRIV_REMOTE_DATASTORE_PRUNE,
};
use pbs_client::{
BackupRepository, BackupStats, BackupWriter, BackupWriterOptions, HttpClient, IndexType,
MergedChunkInfo, UploadOptions,
};
use pbs_config::CachedUserInfo;
-use pbs_datastore::data_blob::ChunkInfo;
+use pbs_datastore::data_blob::{ChunkInfo, DataChunkBuilder};
use pbs_datastore::dynamic_index::DynamicIndexReader;
use pbs_datastore::fixed_index::FixedIndexReader;
use pbs_datastore::index::IndexFile;
use pbs_datastore::read_chunk::AsyncReadChunk;
use pbs_datastore::{BackupManifest, DataStore, StoreProgress};
+use pbs_tools::crypt_config::CryptConfig;
use super::sync::{
check_namespace_depth_limit, exclude_not_verified_or_encrypted,
@@ -83,6 +84,9 @@ pub(crate) struct PushParameters {
verified_only: bool,
/// How many snapshots should be transferred at most (taking the newest N snapshots)
transfer_last: Option<usize>,
+ /// Encryption key to use for pushing unencrypted backup snapshots. Does not affect
+ /// already encrypted snapshots.
+ crypt_config: Option<Arc<CryptConfig>>,
}
impl PushParameters {
@@ -102,6 +106,7 @@ impl PushParameters {
verified_only: Option<bool>,
limit: RateLimitConfig,
transfer_last: Option<usize>,
+ encryption_key: Option<String>,
) -> Result<Self, Error> {
if let Some(max_depth) = max_depth {
ns.check_max_depth(max_depth)?;
@@ -154,6 +159,12 @@ impl PushParameters {
};
let group_filter = group_filter.unwrap_or_default();
+ let crypt_config = if let Some(key_id) = &encryption_key {
+ crate::server::sync::check_privs_and_load_key_config(key_id, &local_user)?
+ } else {
+ None
+ };
+
Ok(Self {
source,
target,
@@ -164,6 +175,7 @@ impl PushParameters {
encrypted_only,
verified_only,
transfer_last,
+ crypt_config,
})
}
@@ -794,6 +806,29 @@ pub(crate) async fn push_group(
Ok(stats)
}
+async fn load_previous_snapshot_known_chunks(
+ params: &PushParameters,
+ manifest: &BackupManifest,
+ backup_writer: &BackupWriter,
+ archive_name: &BackupArchiveName,
+ known_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
+) {
+ if let Some(crypt_config) = ¶ms.crypt_config {
+ if let Ok(Some(fingerprint)) = manifest.fingerprint() {
+ if *fingerprint.bytes() == crypt_config.fingerprint() {
+ // Add known chunks only if the fingerprint is not the
+ // same and therefore needs no re-encryption.
+ return;
+ }
+ }
+ }
+
+ // Add known chunks, ignore errors since archive might not be present
+ let _res = backup_writer
+ .download_previous_fixed_index(archive_name, manifest, known_chunks)
+ .await;
+}
+
/// Push snapshot to target
///
/// Creates a new snapshot on the target and pushes the content of the source snapshot to the
@@ -836,6 +871,19 @@ pub(crate) async fn push_snapshot(
return Ok(stats);
}
+ if params.crypt_config.is_some() {
+ // Check if manifest contains only non encrypted files, refuse progress otherwise to
+ // not double encrypt or upload partially unencrypted contents.
+ if !source_manifest
+ .files()
+ .iter()
+ .all(|file| file.chunk_crypt_mode() == CryptMode::None)
+ {
+ warn!("Encountered partially encrypted snapshot, refuse to re-encrypt and skip");
+ return Ok(stats);
+ }
+ }
+
// Writer instance locks the snapshot on the remote side
let backup_writer = BackupWriter::start(
¶ms.target.client,
@@ -843,7 +891,7 @@ pub(crate) async fn push_snapshot(
datastore: params.target.repo.store(),
ns: &target_ns,
backup: snapshot,
- crypt_config: None,
+ crypt_config: params.crypt_config.clone(),
debug: false,
benchmark: false,
no_cache: false,
@@ -860,19 +908,20 @@ pub(crate) async fn push_snapshot(
}
};
- // Dummy upload options: the actual compression and/or encryption already happened while
- // the chunks were generated during creation of the backup snapshot, therefore pre-existing
- // chunks (already compressed and/or encrypted) can be pushed to the target.
+ // Dummy upload options: The actual compression already happened while
+ // the chunks were generated during creation of the backup snapshot,
+ // therefore pre-existing chunks (already compressed) can be pushed to
+ // the target.
+ //
// Further, these steps are skipped in the backup writer upload stream.
//
// Therefore, these values do not need to fit the values given in the manifest.
// The original manifest is uploaded in the end anyways.
//
// Compression is set to true so that the uploaded manifest will be compressed.
- // Encrypt is set to assure that above files are not encrypted.
let upload_options = UploadOptions {
compress: true,
- encrypt: false,
+ encrypt: params.crypt_config.is_some(),
previous_manifest,
..UploadOptions::default()
};
@@ -886,6 +935,10 @@ pub(crate) async fn push_snapshot(
path.push(&entry.filename);
if path.try_exists()? {
let archive_name = BackupArchiveName::from_path(&entry.filename)?;
+ let crypt_mode = match ¶ms.crypt_config {
+ Some(_) => CryptMode::Encrypt,
+ None => entry.chunk_crypt_mode(),
+ };
match archive_name.archive_type() {
ArchiveType::Blob => {
let file = std::fs::File::open(&path)?;
@@ -896,7 +949,7 @@ pub(crate) async fn push_snapshot(
&archive_name,
backup_stats.size,
backup_stats.csum,
- entry.chunk_crypt_mode(),
+ crypt_mode,
)?;
stats.add(SyncStats {
chunk_count: backup_stats.chunk_count as usize,
@@ -907,14 +960,14 @@ pub(crate) async fn push_snapshot(
}
ArchiveType::DynamicIndex => {
if let Some(manifest) = upload_options.previous_manifest.as_ref() {
- // Add known chunks, ignore errors since archive might not be present
- let _res = backup_writer
- .download_previous_dynamic_index(
- &archive_name,
- manifest,
- known_chunks.clone(),
- )
- .await;
+ load_previous_snapshot_known_chunks(
+ params,
+ manifest,
+ &backup_writer,
+ &archive_name,
+ known_chunks.clone(),
+ )
+ .await;
}
let index = DynamicIndexReader::open(&path)?;
let chunk_reader = reader
@@ -927,13 +980,14 @@ pub(crate) async fn push_snapshot(
&backup_writer,
IndexType::Dynamic,
known_chunks.clone(),
+ params.crypt_config.clone(),
)
.await?;
target_manifest.add_file(
&archive_name,
upload_stats.size,
upload_stats.csum,
- entry.chunk_crypt_mode(),
+ crypt_mode,
)?;
stats.add(SyncStats {
chunk_count: upload_stats.chunk_count as usize,
@@ -944,14 +998,14 @@ pub(crate) async fn push_snapshot(
}
ArchiveType::FixedIndex => {
if let Some(manifest) = upload_options.previous_manifest.as_ref() {
- // Add known chunks, ignore errors since archive might not be present
- let _res = backup_writer
- .download_previous_fixed_index(
- &archive_name,
- manifest,
- known_chunks.clone(),
- )
- .await;
+ load_previous_snapshot_known_chunks(
+ params,
+ manifest,
+ &backup_writer,
+ &archive_name,
+ known_chunks.clone(),
+ )
+ .await;
}
let index = FixedIndexReader::open(&path)?;
let chunk_reader = reader
@@ -965,13 +1019,14 @@ pub(crate) async fn push_snapshot(
&backup_writer,
IndexType::Fixed(Some(size)),
known_chunks.clone(),
+ params.crypt_config.clone(),
)
.await?;
target_manifest.add_file(
&archive_name,
upload_stats.size,
upload_stats.csum,
- entry.chunk_crypt_mode(),
+ crypt_mode,
)?;
stats.add(SyncStats {
chunk_count: upload_stats.chunk_count as usize,
@@ -1005,13 +1060,21 @@ pub(crate) async fn push_snapshot(
// needs to update all relevant info for new manifest.
target_manifest.unprotected = source_manifest.unprotected;
target_manifest.signature = source_manifest.signature;
- let manifest_json = serde_json::to_value(target_manifest)?;
- let manifest_string = serde_json::to_string_pretty(&manifest_json)?;
+ let manifest_string = if params.crypt_config.is_some() {
+ target_manifest.to_string(params.crypt_config.as_ref().map(Arc::as_ref))?
+ } else {
+ let manifest_json = serde_json::to_value(target_manifest)?;
+ serde_json::to_string_pretty(&manifest_json)?
+ };
let backup_stats = backup_writer
.upload_blob_from_data(
manifest_string.into_bytes(),
MANIFEST_BLOB_NAME.as_ref(),
- upload_options,
+ UploadOptions {
+ compress: true,
+ encrypt: false,
+ ..UploadOptions::default()
+ },
)
.await?;
backup_writer.finish().await?;
@@ -1037,12 +1100,15 @@ async fn push_index(
backup_writer: &BackupWriter,
index_type: IndexType,
known_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
+ crypt_config: Option<Arc<CryptConfig>>,
) -> Result<BackupStats, Error> {
let (upload_channel_tx, upload_channel_rx) = mpsc::channel(20);
let mut chunk_infos =
stream::iter(0..index.index_count()).map(move |pos| index.chunk_info(pos).unwrap());
+ let crypt_config_cloned = crypt_config.clone();
tokio::spawn(async move {
+ let mut encrypted_mapping = HashMap::new();
while let Some(chunk_info) = chunk_infos.next().await {
// Avoid reading known chunks, as they are not uploaded by the backup writer anyways
let needs_upload = {
@@ -1056,20 +1122,39 @@ async fn push_index(
chunk_reader
.read_raw_chunk(&chunk_info.digest)
.await
- .map(|chunk| {
- MergedChunkInfo::New(ChunkInfo {
+ .and_then(|chunk| {
+ let (chunk, digest, chunk_len) = match crypt_config_cloned.as_ref() {
+ Some(crypt_config) => {
+ let data = chunk.decode(None, Some(&chunk_info.digest))?;
+ let (chunk, digest) = DataChunkBuilder::new(&data)
+ .compress(true)
+ .crypt_config(crypt_config)
+ .build()?;
+ encrypted_mapping.insert(chunk_info.digest, digest);
+ (chunk, digest, data.len() as u64)
+ }
+ None => (chunk, chunk_info.digest, chunk_info.size()),
+ };
+
+ Ok(MergedChunkInfo::New(ChunkInfo {
chunk,
- digest: chunk_info.digest,
- chunk_len: chunk_info.size(),
+ digest,
+ chunk_len,
offset: chunk_info.range.start,
- })
+ }))
})
} else {
+ let digest =
+ if let Some(encrypted_digest) = encrypted_mapping.get(&chunk_info.digest) {
+ *encrypted_digest
+ } else {
+ chunk_info.digest
+ };
Ok(MergedChunkInfo::Known(vec![(
// Pass size instead of offset, will be replaced with offset by the backup
// writer
chunk_info.size(),
- chunk_info.digest,
+ digest,
)]))
};
let _ = upload_channel_tx.send(merged_chunk_info).await;
@@ -1080,7 +1165,7 @@ async fn push_index(
let upload_options = UploadOptions {
compress: true,
- encrypt: false,
+ encrypt: crypt_config.is_some(),
index_type,
..UploadOptions::default()
};
diff --git a/src/server/sync.rs b/src/server/sync.rs
index 2c1d5dc61..d52175a13 100644
--- a/src/server/sync.rs
+++ b/src/server/sync.rs
@@ -677,6 +677,7 @@ pub fn do_sync_job(
sync_job.verified_only,
sync_job.limit.clone(),
sync_job.transfer_last,
+ sync_job.encryption_key,
)
.await?;
push_store(push_params).await?
--
2.47.3
next prev parent reply other threads:[~2026-04-01 7:55 UTC|newest]
Thread overview: 32+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-01 7:55 [PATCH proxmox{,-backup} 00/20] fix #7251: implement server side encryption support for push sync jobs Christian Ebner
2026-04-01 7:55 ` [PATCH proxmox 01/20] pbs-api-types: define encryption key type and schema Christian Ebner
2026-04-01 7:55 ` [PATCH proxmox 02/20] pbs-api-types: sync job: add optional encryption key to config Christian Ebner
2026-04-01 7:55 ` [PATCH proxmox-backup 03/20] pbs-key-config: introduce store_with() for KeyConfig Christian Ebner
2026-04-01 7:55 ` [PATCH proxmox-backup 04/20] pbs-config: implement encryption key config handling Christian Ebner
2026-04-01 23:27 ` Thomas Lamprecht
2026-04-02 7:09 ` Christian Ebner
2026-04-01 7:55 ` [PATCH proxmox-backup 05/20] pbs-config: acls: add 'encryption-keys' as valid 'system' subpath Christian Ebner
2026-04-01 7:55 ` [PATCH proxmox-backup 06/20] ui: expose 'encryption-keys' as acl subpath for 'system' Christian Ebner
2026-04-01 7:55 ` [PATCH proxmox-backup 07/20] api: config: add endpoints for encryption key manipulation Christian Ebner
2026-04-01 7:55 ` [PATCH proxmox-backup 08/20] api: config: allow encryption key manipulation for sync job Christian Ebner
2026-04-01 7:55 ` [PATCH proxmox-backup 09/20] sync: push: rewrite manifest instead of pushing pre-existing one Christian Ebner
2026-04-01 7:55 ` [PATCH proxmox-backup 10/20] sync: add helper to check encryption key acls and load key Christian Ebner
2026-04-01 7:55 ` Christian Ebner [this message]
2026-04-01 7:55 ` [PATCH proxmox-backup 12/20] ui: define and expose encryption key management menu item and windows Christian Ebner
2026-04-01 23:09 ` Thomas Lamprecht
2026-04-03 8:35 ` Dominik Csapak
2026-04-01 23:10 ` Thomas Lamprecht
2026-04-03 12:16 ` Dominik Csapak
2026-04-01 7:55 ` [PATCH proxmox-backup 13/20] ui: expose assigning encryption key to sync jobs Christian Ebner
2026-04-01 7:55 ` [PATCH proxmox-backup 14/20] sync: pull: load encryption key if given in job config Christian Ebner
2026-04-01 7:55 ` [PATCH proxmox-backup 15/20] sync: expand source chunk reader trait by crypt config Christian Ebner
2026-04-01 7:55 ` [PATCH proxmox-backup 16/20] sync: pull: introduce and use decrypt index writer if " Christian Ebner
2026-04-01 7:55 ` [PATCH proxmox-backup 17/20] sync: pull: extend encountered chunk by optional decrypted digest Christian Ebner
2026-04-01 7:55 ` [PATCH proxmox-backup 18/20] sync: pull: decrypt blob files on pull if encryption key is configured Christian Ebner
2026-04-01 7:55 ` [PATCH proxmox-backup 19/20] sync: pull: decrypt chunks and rewrite index file for matching key Christian Ebner
2026-04-01 7:55 ` [PATCH proxmox-backup 20/20] sync: pull: decrypt snapshots with matching encryption key fingerprint Christian Ebner
2026-04-02 0:25 ` [PATCH proxmox{,-backup} 00/20] fix #7251: implement server side encryption support for push sync jobs Thomas Lamprecht
2026-04-02 7:37 ` Christian Ebner
2026-04-03 8:39 ` Dominik Csapak
2026-04-03 8:50 ` Christian Ebner
2026-04-03 9:00 ` Dominik Csapak
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260401075521.176354-12-c.ebner@proxmox.com \
--to=c.ebner@proxmox.com \
--cc=pbs-devel@lists.proxmox.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox