From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [212.224.123.68]) by lore.proxmox.com (Postfix) with ESMTPS id 1189F1FF140 for ; Fri, 10 Apr 2026 18:55:07 +0200 (CEST) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id 9DF662396B; Fri, 10 Apr 2026 18:55:45 +0200 (CEST) From: Christian Ebner To: pbs-devel@lists.proxmox.com Subject: [PATCH proxmox-backup v2 18/27] fix #7251: api: push: encrypt snapshots using configured encryption key Date: Fri, 10 Apr 2026 18:54:45 +0200 Message-ID: <20260410165454.1578501-19-c.ebner@proxmox.com> X-Mailer: git-send-email 2.47.3 In-Reply-To: <20260410165454.1578501-1-c.ebner@proxmox.com> References: <20260410165454.1578501-1-c.ebner@proxmox.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Bm-Milter-Handled: 55990f41-d878-4baa-be0a-ee34c49e34d2 X-Bm-Transport-Timestamp: 1775840039597 X-SPAM-LEVEL: Spam detection results: 0 AWL 0.070 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record Message-ID-Hash: FDW5SMJUS3YEDQYYBTA6OQXB6Y3OQC7P X-Message-ID-Hash: FDW5SMJUS3YEDQYYBTA6OQXB6Y3OQC7P X-MailFrom: c.ebner@proxmox.com X-Mailman-Rule-Misses: dmarc-mitigation; no-senders; approved; loop; banned-address; emergency; member-moderation; nonmember-moderation; administrivia; implicit-dest; max-recipients; max-size; news-moderation; no-subject; digests; suspicious-header X-Mailman-Version: 3.3.10 Precedence: list List-Id: Proxmox Backup Server development discussion List-Help: List-Owner: List-Post: List-Subscribe: List-Unsubscribe: If an encryption key id is provided in the push parameters, the key is loaded at the start of the push sync job and passed along via the crypt config. Backup snapshots which are already encrypted or partially encrypted snapshots are skipped to avoid mixing of contents. Pre-existing snapshots on the remote are however not checked to match the key. Special care has to be taken when tracking the already encountered chunks. For regular push sync jobs chunk upload is optimized to skip re-upload of chunks from the previous snapshot (if any) and new, but already encountered chunks for the current group sync. Since the chunks now have to be re-processes anyways, do not load the chunks from the previous snapshot into memory if they need re-encryption and keep track of the unencrypted -> encrypted digest mapping in a hashmap to avoid re-processing. This might be optimized in the future by e.g. move the tracking to an LRU cache, which however requrires more carefully evaluaton of memory consumption. Fixes: https://bugzilla.proxmox.com/show_bug.cgi?id=7251 Signed-off-by: Christian Ebner --- src/server/push.rs | 112 ++++++++++++++++++++++++++++++++++----------- 1 file changed, 86 insertions(+), 26 deletions(-) diff --git a/src/server/push.rs b/src/server/push.rs index 9b7a4adcb..f433ca50d 100644 --- a/src/server/push.rs +++ b/src/server/push.rs @@ -1,6 +1,6 @@ //! Sync datastore by pushing contents to remote server -use std::collections::HashSet; +use std::collections::{HashMap, HashSet}; use std::path::Path; use std::sync::{Arc, Mutex}; @@ -12,17 +12,17 @@ use tracing::{info, warn}; use pbs_api_types::{ print_store_and_ns, ApiVersion, ApiVersionInfo, ArchiveType, Authid, BackupArchiveName, - BackupDir, BackupGroup, BackupGroupDeleteStats, BackupNamespace, GroupFilter, GroupListItem, - NamespaceListItem, Operation, RateLimitConfig, Remote, SnapshotListItem, CLIENT_LOG_BLOB_NAME, - MANIFEST_BLOB_NAME, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_READ, PRIV_REMOTE_DATASTORE_BACKUP, - PRIV_REMOTE_DATASTORE_MODIFY, PRIV_REMOTE_DATASTORE_PRUNE, + BackupDir, BackupGroup, BackupGroupDeleteStats, BackupNamespace, CryptMode, GroupFilter, + GroupListItem, NamespaceListItem, Operation, RateLimitConfig, Remote, SnapshotListItem, + CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_READ, + PRIV_REMOTE_DATASTORE_BACKUP, PRIV_REMOTE_DATASTORE_MODIFY, PRIV_REMOTE_DATASTORE_PRUNE, }; use pbs_client::{ BackupRepository, BackupStats, BackupWriter, BackupWriterOptions, HttpClient, IndexType, MergedChunkInfo, UploadOptions, }; use pbs_config::CachedUserInfo; -use pbs_datastore::data_blob::ChunkInfo; +use pbs_datastore::data_blob::{ChunkInfo, DataChunkBuilder}; use pbs_datastore::dynamic_index::DynamicIndexReader; use pbs_datastore::fixed_index::FixedIndexReader; use pbs_datastore::index::IndexFile; @@ -886,6 +886,27 @@ pub(crate) async fn push_snapshot( } let mut encrypt_using_key = None; + if params.crypt_config.is_some() { + let mut contains_unencrypted_file = false; + // Check if snapshot is fully encrypted or not encrypted at all: + // refuse progress otherwise to upload partially unencrypted contents or mix encryption key. + if source_manifest.files().iter().all(|file| { + if file.chunk_crypt_mode() == CryptMode::None { + contains_unencrypted_file = true; + true + } else { + false + } + }) { + encrypt_using_key = params.crypt_config.clone(); + info!("Encrypt and push unencrypted snapshot '{snapshot}'"); + } else if contains_unencrypted_file { + warn!("Encountered partially encrypted snapshot, refuse to re-encrypt and skip"); + return Ok(stats); + } else { + info!("Pushing already encrypted snapshot '{snapshot}' without re-encryption"); + } + } // Writer instance locks the snapshot on the remote side let backup_writer = BackupWriter::start( @@ -911,19 +932,20 @@ pub(crate) async fn push_snapshot( } }; - // Dummy upload options: the actual compression and/or encryption already happened while - // the chunks were generated during creation of the backup snapshot, therefore pre-existing - // chunks (already compressed and/or encrypted) can be pushed to the target. + // Dummy upload options: The actual compression already happened while + // the chunks were generated during creation of the backup snapshot, + // therefore pre-existing chunks (already compressed) can be pushed to + // the target. + // // Further, these steps are skipped in the backup writer upload stream. // // Therefore, these values do not need to fit the values given in the manifest. // The original manifest is uploaded in the end anyways. // // Compression is set to true so that the uploaded manifest will be compressed. - // Encrypt is set to assure that above files are not encrypted. let upload_options = UploadOptions { compress: true, - encrypt: false, + encrypt: encrypt_using_key.is_some(), previous_manifest, ..UploadOptions::default() }; @@ -937,6 +959,10 @@ pub(crate) async fn push_snapshot( path.push(&entry.filename); if path.try_exists()? { let archive_name = BackupArchiveName::from_path(&entry.filename)?; + let crypt_mode = match &encrypt_using_key { + Some(_) => CryptMode::Encrypt, + None => entry.chunk_crypt_mode(), + }; load_previous_snapshot_known_chunks( params, @@ -967,7 +993,7 @@ pub(crate) async fn push_snapshot( &archive_name, backup_stats.size, backup_stats.csum, - entry.chunk_crypt_mode(), + crypt_mode, )?; stats.add(SyncStats { chunk_count: backup_stats.chunk_count as usize, @@ -988,13 +1014,14 @@ pub(crate) async fn push_snapshot( &backup_writer, IndexType::Dynamic, known_chunks.clone(), + encrypt_using_key.clone(), ) .await?; target_manifest.add_file( &archive_name, upload_stats.size, upload_stats.csum, - entry.chunk_crypt_mode(), + crypt_mode, )?; stats.add(SyncStats { chunk_count: upload_stats.chunk_count as usize, @@ -1016,13 +1043,14 @@ pub(crate) async fn push_snapshot( &backup_writer, IndexType::Fixed(Some(size)), known_chunks.clone(), + encrypt_using_key.clone(), ) .await?; target_manifest.add_file( &archive_name, upload_stats.size, upload_stats.csum, - entry.chunk_crypt_mode(), + crypt_mode, )?; stats.add(SyncStats { chunk_count: upload_stats.chunk_count as usize, @@ -1064,15 +1092,25 @@ pub(crate) async fn push_snapshot( // Rewrite manifest for pushed snapshot, recreating manifest from source on target, // needs to update all relevant info for new manifest. - target_manifest.unprotected = source_manifest.unprotected; - target_manifest.signature = source_manifest.signature; - let manifest_json = serde_json::to_value(target_manifest)?; - let manifest_string = serde_json::to_string_pretty(&manifest_json)?; + target_manifest.unprotected = source_manifest.unprotected.clone(); + target_manifest.signature = source_manifest.signature.clone(); + let manifest_string = if encrypt_using_key.is_some() { + let fp = source_manifest.change_detection_fingerprint()?; + target_manifest.set_change_detection_fingerprint(&fp)?; + target_manifest.to_string(encrypt_using_key.as_ref().map(Arc::as_ref))? + } else { + let manifest_json = serde_json::to_value(target_manifest)?; + serde_json::to_string_pretty(&manifest_json)? + }; let backup_stats = backup_writer .upload_blob_from_data( manifest_string.into_bytes(), MANIFEST_BLOB_NAME.as_ref(), - upload_options, + UploadOptions { + compress: true, + encrypt: false, + ..UploadOptions::default() + }, ) .await?; backup_writer.finish().await?; @@ -1112,12 +1150,15 @@ async fn push_index( backup_writer: &BackupWriter, index_type: IndexType, known_chunks: Arc>>, + crypt_config: Option>, ) -> Result { let (upload_channel_tx, upload_channel_rx) = mpsc::channel(20); let mut chunk_infos = stream::iter(0..index.index_count()).map(move |pos| index.chunk_info(pos).unwrap()); + let crypt_config_cloned = crypt_config.clone(); tokio::spawn(async move { + let mut encrypted_mapping = HashMap::new(); while let Some(chunk_info) = chunk_infos.next().await { // Avoid reading known chunks, as they are not uploaded by the backup writer anyways let needs_upload = { @@ -1131,20 +1172,39 @@ async fn push_index( chunk_reader .read_raw_chunk(&chunk_info.digest) .await - .map(|chunk| { - MergedChunkInfo::New(ChunkInfo { + .and_then(|chunk| { + let (chunk, digest, chunk_len) = match crypt_config_cloned.as_ref() { + Some(crypt_config) => { + let data = chunk.decode(None, Some(&chunk_info.digest))?; + let (chunk, digest) = DataChunkBuilder::new(&data) + .compress(true) + .crypt_config(crypt_config) + .build()?; + encrypted_mapping.insert(chunk_info.digest, digest); + (chunk, digest, data.len() as u64) + } + None => (chunk, chunk_info.digest, chunk_info.size()), + }; + + Ok(MergedChunkInfo::New(ChunkInfo { chunk, - digest: chunk_info.digest, - chunk_len: chunk_info.size(), + digest, + chunk_len, offset: chunk_info.range.start, - }) + })) }) } else { + let digest = + if let Some(encrypted_digest) = encrypted_mapping.get(&chunk_info.digest) { + *encrypted_digest + } else { + chunk_info.digest + }; Ok(MergedChunkInfo::Known(vec![( // Pass size instead of offset, will be replaced with offset by the backup // writer chunk_info.size(), - chunk_info.digest, + digest, )])) }; let _ = upload_channel_tx.send(merged_chunk_info).await; @@ -1155,7 +1215,7 @@ async fn push_index( let upload_options = UploadOptions { compress: true, - encrypt: false, + encrypt: crypt_config.is_some(), index_type, ..UploadOptions::default() }; -- 2.47.3