From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [IPv6:2a01:7e0:0:424::9]) by lore.proxmox.com (Postfix) with ESMTPS id E76B51FF13A for ; Wed, 01 Apr 2026 09:55:49 +0200 (CEST) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id C5BE310E4C; Wed, 1 Apr 2026 09:56:13 +0200 (CEST) From: Christian Ebner To: pbs-devel@lists.proxmox.com Subject: [PATCH proxmox-backup 11/20] fix #7251: api: push: encrypt snapshots using configured encryption key Date: Wed, 1 Apr 2026 09:55:12 +0200 Message-ID: <20260401075521.176354-12-c.ebner@proxmox.com> X-Mailer: git-send-email 2.47.3 In-Reply-To: <20260401075521.176354-1-c.ebner@proxmox.com> References: <20260401075521.176354-1-c.ebner@proxmox.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Bm-Milter-Handled: 55990f41-d878-4baa-be0a-ee34c49e34d2 X-Bm-Transport-Timestamp: 1775030089479 X-SPAM-LEVEL: Spam detection results: 0 AWL 0.064 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record Message-ID-Hash: GRKGUDLS64N5RY3UY37IFDXEPRDS4V4P X-Message-ID-Hash: GRKGUDLS64N5RY3UY37IFDXEPRDS4V4P X-MailFrom: c.ebner@proxmox.com X-Mailman-Rule-Misses: dmarc-mitigation; no-senders; approved; loop; banned-address; emergency; member-moderation; nonmember-moderation; administrivia; implicit-dest; max-recipients; max-size; news-moderation; no-subject; digests; suspicious-header X-Mailman-Version: 3.3.10 Precedence: list List-Id: Proxmox Backup Server development discussion List-Help: List-Owner: List-Post: List-Subscribe: List-Unsubscribe: If an encryption key id is provided in the push parameters, the key is loaded at the start of the push sync job and passed along via the crypt config. Backup snapshots which are already encrypted or partially encrypted snapshots are skipped to avoid mixing of contents. Pre-existing snapshots on the remote are however not checked to match the key. Special care has to be taken when tracking the already encountered chunks. For regular push sync jobs chunk upload is optimized to skip re-upload of chunks from the previous snapshot (if any) and new, but already encountered chunks for the current group sync. Since the chunks now have to be re-processes anyways, do not load the chunks from the previous snapshot into memory if they need re-encryption and keep track of the unencrypted -> encrypted digest mapping in a hashmap to avoid re-processing. This might be optimized in the future by e.g. move the tracking to an LRU cache, which however requrires more carefully evaluaton of memory consumption. Fixes: https://bugzilla.proxmox.com/show_bug.cgi?id=7251 Signed-off-by: Christian Ebner --- src/api2/push.rs | 14 ++-- src/server/push.rs | 167 ++++++++++++++++++++++++++++++++++----------- src/server/sync.rs | 1 + 3 files changed, 137 insertions(+), 45 deletions(-) diff --git a/src/api2/push.rs b/src/api2/push.rs index e5edc13e0..79f220afd 100644 --- a/src/api2/push.rs +++ b/src/api2/push.rs @@ -3,10 +3,10 @@ use futures::{future::FutureExt, select}; use pbs_api_types::{ Authid, BackupNamespace, GroupFilter, RateLimitConfig, DATASTORE_SCHEMA, - GROUP_FILTER_LIST_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, PRIV_DATASTORE_BACKUP, - PRIV_DATASTORE_READ, PRIV_REMOTE_DATASTORE_BACKUP, PRIV_REMOTE_DATASTORE_PRUNE, - REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA, SYNC_ENCRYPTED_ONLY_SCHEMA, - SYNC_VERIFIED_ONLY_SCHEMA, TRANSFER_LAST_SCHEMA, + ENCRYPTION_KEY_ID_SCHEMA, GROUP_FILTER_LIST_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, + PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_READ, PRIV_REMOTE_DATASTORE_BACKUP, + PRIV_REMOTE_DATASTORE_PRUNE, REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA, + SYNC_ENCRYPTED_ONLY_SCHEMA, SYNC_VERIFIED_ONLY_SCHEMA, TRANSFER_LAST_SCHEMA, }; use proxmox_rest_server::WorkerTask; use proxmox_router::{Permission, Router, RpcEnvironment}; @@ -108,6 +108,10 @@ fn check_push_privs( schema: TRANSFER_LAST_SCHEMA, optional: true, }, + "encryption-key": { + schema: ENCRYPTION_KEY_ID_SCHEMA, + optional: true, + }, }, }, access: { @@ -133,6 +137,7 @@ async fn push( verified_only: Option, limit: RateLimitConfig, transfer_last: Option, + encryption_key: Option, rpcenv: &mut dyn RpcEnvironment, ) -> Result { let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; @@ -164,6 +169,7 @@ async fn push( verified_only, limit, transfer_last, + encryption_key, ) .await?; diff --git a/src/server/push.rs b/src/server/push.rs index 269a4c386..beacc0819 100644 --- a/src/server/push.rs +++ b/src/server/push.rs @@ -1,6 +1,6 @@ //! Sync datastore by pushing contents to remote server -use std::collections::HashSet; +use std::collections::{HashMap, HashSet}; use std::sync::{Arc, Mutex}; use anyhow::{bail, format_err, Context, Error}; @@ -11,22 +11,23 @@ use tracing::{info, warn}; use pbs_api_types::{ print_store_and_ns, ApiVersion, ApiVersionInfo, ArchiveType, Authid, BackupArchiveName, - BackupDir, BackupGroup, BackupGroupDeleteStats, BackupNamespace, GroupFilter, GroupListItem, - NamespaceListItem, Operation, RateLimitConfig, Remote, SnapshotListItem, CLIENT_LOG_BLOB_NAME, - MANIFEST_BLOB_NAME, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_READ, PRIV_REMOTE_DATASTORE_BACKUP, - PRIV_REMOTE_DATASTORE_MODIFY, PRIV_REMOTE_DATASTORE_PRUNE, + BackupDir, BackupGroup, BackupGroupDeleteStats, BackupNamespace, CryptMode, GroupFilter, + GroupListItem, NamespaceListItem, Operation, RateLimitConfig, Remote, SnapshotListItem, + CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_READ, + PRIV_REMOTE_DATASTORE_BACKUP, PRIV_REMOTE_DATASTORE_MODIFY, PRIV_REMOTE_DATASTORE_PRUNE, }; use pbs_client::{ BackupRepository, BackupStats, BackupWriter, BackupWriterOptions, HttpClient, IndexType, MergedChunkInfo, UploadOptions, }; use pbs_config::CachedUserInfo; -use pbs_datastore::data_blob::ChunkInfo; +use pbs_datastore::data_blob::{ChunkInfo, DataChunkBuilder}; use pbs_datastore::dynamic_index::DynamicIndexReader; use pbs_datastore::fixed_index::FixedIndexReader; use pbs_datastore::index::IndexFile; use pbs_datastore::read_chunk::AsyncReadChunk; use pbs_datastore::{BackupManifest, DataStore, StoreProgress}; +use pbs_tools::crypt_config::CryptConfig; use super::sync::{ check_namespace_depth_limit, exclude_not_verified_or_encrypted, @@ -83,6 +84,9 @@ pub(crate) struct PushParameters { verified_only: bool, /// How many snapshots should be transferred at most (taking the newest N snapshots) transfer_last: Option, + /// Encryption key to use for pushing unencrypted backup snapshots. Does not affect + /// already encrypted snapshots. + crypt_config: Option>, } impl PushParameters { @@ -102,6 +106,7 @@ impl PushParameters { verified_only: Option, limit: RateLimitConfig, transfer_last: Option, + encryption_key: Option, ) -> Result { if let Some(max_depth) = max_depth { ns.check_max_depth(max_depth)?; @@ -154,6 +159,12 @@ impl PushParameters { }; let group_filter = group_filter.unwrap_or_default(); + let crypt_config = if let Some(key_id) = &encryption_key { + crate::server::sync::check_privs_and_load_key_config(key_id, &local_user)? + } else { + None + }; + Ok(Self { source, target, @@ -164,6 +175,7 @@ impl PushParameters { encrypted_only, verified_only, transfer_last, + crypt_config, }) } @@ -794,6 +806,29 @@ pub(crate) async fn push_group( Ok(stats) } +async fn load_previous_snapshot_known_chunks( + params: &PushParameters, + manifest: &BackupManifest, + backup_writer: &BackupWriter, + archive_name: &BackupArchiveName, + known_chunks: Arc>>, +) { + if let Some(crypt_config) = ¶ms.crypt_config { + if let Ok(Some(fingerprint)) = manifest.fingerprint() { + if *fingerprint.bytes() == crypt_config.fingerprint() { + // Add known chunks only if the fingerprint is not the + // same and therefore needs no re-encryption. + return; + } + } + } + + // Add known chunks, ignore errors since archive might not be present + let _res = backup_writer + .download_previous_fixed_index(archive_name, manifest, known_chunks) + .await; +} + /// Push snapshot to target /// /// Creates a new snapshot on the target and pushes the content of the source snapshot to the @@ -836,6 +871,19 @@ pub(crate) async fn push_snapshot( return Ok(stats); } + if params.crypt_config.is_some() { + // Check if manifest contains only non encrypted files, refuse progress otherwise to + // not double encrypt or upload partially unencrypted contents. + if !source_manifest + .files() + .iter() + .all(|file| file.chunk_crypt_mode() == CryptMode::None) + { + warn!("Encountered partially encrypted snapshot, refuse to re-encrypt and skip"); + return Ok(stats); + } + } + // Writer instance locks the snapshot on the remote side let backup_writer = BackupWriter::start( ¶ms.target.client, @@ -843,7 +891,7 @@ pub(crate) async fn push_snapshot( datastore: params.target.repo.store(), ns: &target_ns, backup: snapshot, - crypt_config: None, + crypt_config: params.crypt_config.clone(), debug: false, benchmark: false, no_cache: false, @@ -860,19 +908,20 @@ pub(crate) async fn push_snapshot( } }; - // Dummy upload options: the actual compression and/or encryption already happened while - // the chunks were generated during creation of the backup snapshot, therefore pre-existing - // chunks (already compressed and/or encrypted) can be pushed to the target. + // Dummy upload options: The actual compression already happened while + // the chunks were generated during creation of the backup snapshot, + // therefore pre-existing chunks (already compressed) can be pushed to + // the target. + // // Further, these steps are skipped in the backup writer upload stream. // // Therefore, these values do not need to fit the values given in the manifest. // The original manifest is uploaded in the end anyways. // // Compression is set to true so that the uploaded manifest will be compressed. - // Encrypt is set to assure that above files are not encrypted. let upload_options = UploadOptions { compress: true, - encrypt: false, + encrypt: params.crypt_config.is_some(), previous_manifest, ..UploadOptions::default() }; @@ -886,6 +935,10 @@ pub(crate) async fn push_snapshot( path.push(&entry.filename); if path.try_exists()? { let archive_name = BackupArchiveName::from_path(&entry.filename)?; + let crypt_mode = match ¶ms.crypt_config { + Some(_) => CryptMode::Encrypt, + None => entry.chunk_crypt_mode(), + }; match archive_name.archive_type() { ArchiveType::Blob => { let file = std::fs::File::open(&path)?; @@ -896,7 +949,7 @@ pub(crate) async fn push_snapshot( &archive_name, backup_stats.size, backup_stats.csum, - entry.chunk_crypt_mode(), + crypt_mode, )?; stats.add(SyncStats { chunk_count: backup_stats.chunk_count as usize, @@ -907,14 +960,14 @@ pub(crate) async fn push_snapshot( } ArchiveType::DynamicIndex => { if let Some(manifest) = upload_options.previous_manifest.as_ref() { - // Add known chunks, ignore errors since archive might not be present - let _res = backup_writer - .download_previous_dynamic_index( - &archive_name, - manifest, - known_chunks.clone(), - ) - .await; + load_previous_snapshot_known_chunks( + params, + manifest, + &backup_writer, + &archive_name, + known_chunks.clone(), + ) + .await; } let index = DynamicIndexReader::open(&path)?; let chunk_reader = reader @@ -927,13 +980,14 @@ pub(crate) async fn push_snapshot( &backup_writer, IndexType::Dynamic, known_chunks.clone(), + params.crypt_config.clone(), ) .await?; target_manifest.add_file( &archive_name, upload_stats.size, upload_stats.csum, - entry.chunk_crypt_mode(), + crypt_mode, )?; stats.add(SyncStats { chunk_count: upload_stats.chunk_count as usize, @@ -944,14 +998,14 @@ pub(crate) async fn push_snapshot( } ArchiveType::FixedIndex => { if let Some(manifest) = upload_options.previous_manifest.as_ref() { - // Add known chunks, ignore errors since archive might not be present - let _res = backup_writer - .download_previous_fixed_index( - &archive_name, - manifest, - known_chunks.clone(), - ) - .await; + load_previous_snapshot_known_chunks( + params, + manifest, + &backup_writer, + &archive_name, + known_chunks.clone(), + ) + .await; } let index = FixedIndexReader::open(&path)?; let chunk_reader = reader @@ -965,13 +1019,14 @@ pub(crate) async fn push_snapshot( &backup_writer, IndexType::Fixed(Some(size)), known_chunks.clone(), + params.crypt_config.clone(), ) .await?; target_manifest.add_file( &archive_name, upload_stats.size, upload_stats.csum, - entry.chunk_crypt_mode(), + crypt_mode, )?; stats.add(SyncStats { chunk_count: upload_stats.chunk_count as usize, @@ -1005,13 +1060,21 @@ pub(crate) async fn push_snapshot( // needs to update all relevant info for new manifest. target_manifest.unprotected = source_manifest.unprotected; target_manifest.signature = source_manifest.signature; - let manifest_json = serde_json::to_value(target_manifest)?; - let manifest_string = serde_json::to_string_pretty(&manifest_json)?; + let manifest_string = if params.crypt_config.is_some() { + target_manifest.to_string(params.crypt_config.as_ref().map(Arc::as_ref))? + } else { + let manifest_json = serde_json::to_value(target_manifest)?; + serde_json::to_string_pretty(&manifest_json)? + }; let backup_stats = backup_writer .upload_blob_from_data( manifest_string.into_bytes(), MANIFEST_BLOB_NAME.as_ref(), - upload_options, + UploadOptions { + compress: true, + encrypt: false, + ..UploadOptions::default() + }, ) .await?; backup_writer.finish().await?; @@ -1037,12 +1100,15 @@ async fn push_index( backup_writer: &BackupWriter, index_type: IndexType, known_chunks: Arc>>, + crypt_config: Option>, ) -> Result { let (upload_channel_tx, upload_channel_rx) = mpsc::channel(20); let mut chunk_infos = stream::iter(0..index.index_count()).map(move |pos| index.chunk_info(pos).unwrap()); + let crypt_config_cloned = crypt_config.clone(); tokio::spawn(async move { + let mut encrypted_mapping = HashMap::new(); while let Some(chunk_info) = chunk_infos.next().await { // Avoid reading known chunks, as they are not uploaded by the backup writer anyways let needs_upload = { @@ -1056,20 +1122,39 @@ async fn push_index( chunk_reader .read_raw_chunk(&chunk_info.digest) .await - .map(|chunk| { - MergedChunkInfo::New(ChunkInfo { + .and_then(|chunk| { + let (chunk, digest, chunk_len) = match crypt_config_cloned.as_ref() { + Some(crypt_config) => { + let data = chunk.decode(None, Some(&chunk_info.digest))?; + let (chunk, digest) = DataChunkBuilder::new(&data) + .compress(true) + .crypt_config(crypt_config) + .build()?; + encrypted_mapping.insert(chunk_info.digest, digest); + (chunk, digest, data.len() as u64) + } + None => (chunk, chunk_info.digest, chunk_info.size()), + }; + + Ok(MergedChunkInfo::New(ChunkInfo { chunk, - digest: chunk_info.digest, - chunk_len: chunk_info.size(), + digest, + chunk_len, offset: chunk_info.range.start, - }) + })) }) } else { + let digest = + if let Some(encrypted_digest) = encrypted_mapping.get(&chunk_info.digest) { + *encrypted_digest + } else { + chunk_info.digest + }; Ok(MergedChunkInfo::Known(vec![( // Pass size instead of offset, will be replaced with offset by the backup // writer chunk_info.size(), - chunk_info.digest, + digest, )])) }; let _ = upload_channel_tx.send(merged_chunk_info).await; @@ -1080,7 +1165,7 @@ async fn push_index( let upload_options = UploadOptions { compress: true, - encrypt: false, + encrypt: crypt_config.is_some(), index_type, ..UploadOptions::default() }; diff --git a/src/server/sync.rs b/src/server/sync.rs index 2c1d5dc61..d52175a13 100644 --- a/src/server/sync.rs +++ b/src/server/sync.rs @@ -677,6 +677,7 @@ pub fn do_sync_job( sync_job.verified_only, sync_job.limit.clone(), sync_job.transfer_last, + sync_job.encryption_key, ) .await?; push_store(push_params).await? -- 2.47.3