From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [IPv6:2a01:7e0:0:424::9]) by lore.proxmox.com (Postfix) with ESMTPS id 9FC8C1FF16F for ; Tue, 22 Jul 2025 12:11:23 +0200 (CEST) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id 7482736011; Tue, 22 Jul 2025 12:12:25 +0200 (CEST) From: Christian Ebner To: pbs-devel@lists.proxmox.com Date: Tue, 22 Jul 2025 12:10:55 +0200 Message-ID: <20250722101106.526438-40-c.ebner@proxmox.com> X-Mailer: git-send-email 2.47.2 In-Reply-To: <20250722101106.526438-1-c.ebner@proxmox.com> References: <20250722101106.526438-1-c.ebner@proxmox.com> MIME-Version: 1.0 X-Bm-Milter-Handled: 55990f41-d878-4baa-be0a-ee34c49e34d2 X-Bm-Transport-Timestamp: 1753179086830 X-SPAM-LEVEL: Spam detection results: 0 AWL 0.046 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record Subject: [pbs-devel] [PATCH proxmox-backup v11 35/46] api: backup: add no-cache flag to bypass local datastore cache X-BeenThere: pbs-devel@lists.proxmox.com X-Mailman-Version: 2.1.29 Precedence: list List-Id: Proxmox Backup Server development discussion List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: Proxmox Backup Server development discussion Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Errors-To: pbs-devel-bounces@lists.proxmox.com Sender: "pbs-devel" Adds the `no-cache` flag so the client can request to bypass the local datastore cache for chunk uploads. This is mainly intended for debugging and benchmarking, but can be used in cases the caching is known to be ineffective (no possible deduplication). Signed-off-by: Christian Ebner Reviewed-by: Lukas Wagner Reviewed-by: Hannes Laimer --- changes since version 10: - no changes examples/upload-speed.rs | 1 + pbs-client/src/backup_writer.rs | 3 +++ proxmox-backup-client/src/benchmark.rs | 1 + proxmox-backup-client/src/main.rs | 8 ++++++++ src/api2/backup/environment.rs | 3 +++ src/api2/backup/mod.rs | 3 +++ src/api2/backup/upload_chunk.rs | 9 +++++++++ src/server/push.rs | 1 + 8 files changed, 29 insertions(+) diff --git a/examples/upload-speed.rs b/examples/upload-speed.rs index bbabb37df..ed181330d 100644 --- a/examples/upload-speed.rs +++ b/examples/upload-speed.rs @@ -26,6 +26,7 @@ async fn upload_speed() -> Result { crypt_config: None, debug: false, benchmark: true, + no_cache: false, }, ) .await?; diff --git a/pbs-client/src/backup_writer.rs b/pbs-client/src/backup_writer.rs index 853b1cb4f..abe7c79e2 100644 --- a/pbs-client/src/backup_writer.rs +++ b/pbs-client/src/backup_writer.rs @@ -77,6 +77,8 @@ pub struct BackupWriterOptions<'a> { pub debug: bool, /// Start benchmark pub benchmark: bool, + /// Skip datastore cache + pub no_cache: bool, } impl BackupWriter { @@ -100,6 +102,7 @@ impl BackupWriter { "store": writer_options.datastore, "debug": writer_options.debug, "benchmark": writer_options.benchmark, + "no-cache": writer_options.no_cache, }); if !writer_options.ns.is_root() { diff --git a/proxmox-backup-client/src/benchmark.rs b/proxmox-backup-client/src/benchmark.rs index 6b11e216d..463c2e61e 100644 --- a/proxmox-backup-client/src/benchmark.rs +++ b/proxmox-backup-client/src/benchmark.rs @@ -237,6 +237,7 @@ async fn test_upload_speed( crypt_config: crypt_config.clone(), debug: false, benchmark: true, + no_cache: true, }, ) .await?; diff --git a/proxmox-backup-client/src/main.rs b/proxmox-backup-client/src/main.rs index 44c076627..3f6c5adb9 100644 --- a/proxmox-backup-client/src/main.rs +++ b/proxmox-backup-client/src/main.rs @@ -742,6 +742,12 @@ fn spawn_catalog_upload( optional: true, default: false, }, + "no-cache": { + type: Boolean, + description: "Bypass local datastore cache for network storages.", + optional: true, + default: false, + }, } } )] @@ -754,6 +760,7 @@ async fn create_backup( change_detection_mode: Option, dry_run: bool, skip_e2big_xattr: bool, + no_cache: bool, limit: ClientRateLimitConfig, _info: &ApiMethod, _rpcenv: &mut dyn RpcEnvironment, @@ -961,6 +968,7 @@ async fn create_backup( crypt_config: crypt_config.clone(), debug: true, benchmark: false, + no_cache, }, ) .await?; diff --git a/src/api2/backup/environment.rs b/src/api2/backup/environment.rs index 6a265bcc5..d5e6869cd 100644 --- a/src/api2/backup/environment.rs +++ b/src/api2/backup/environment.rs @@ -112,6 +112,7 @@ pub struct BackupEnvironment { result_attributes: Value, auth_id: Authid, pub debug: bool, + pub no_cache: bool, pub formatter: &'static dyn OutputFormatter, pub worker: Arc, pub datastore: Arc, @@ -128,6 +129,7 @@ impl BackupEnvironment { worker: Arc, datastore: Arc, backup_dir: BackupDir, + no_cache: bool, ) -> Result { let state = SharedBackupState { finished: false, @@ -148,6 +150,7 @@ impl BackupEnvironment { worker, datastore, debug: tracing::enabled!(tracing::Level::DEBUG), + no_cache, formatter: JSON_FORMATTER, backup_dir, last_backup: None, diff --git a/src/api2/backup/mod.rs b/src/api2/backup/mod.rs index 026f1f106..ae61ff697 100644 --- a/src/api2/backup/mod.rs +++ b/src/api2/backup/mod.rs @@ -53,6 +53,7 @@ pub const API_METHOD_UPGRADE_BACKUP: ApiMethod = ApiMethod::new( ("backup-time", false, &BACKUP_TIME_SCHEMA), ("debug", true, &BooleanSchema::new("Enable verbose debug logging.").schema()), ("benchmark", true, &BooleanSchema::new("Job is a benchmark (do not keep data).").schema()), + ("no-cache", true, &BooleanSchema::new("Disable local datastore cache for network storages").schema()), ]), ) ).access( @@ -79,6 +80,7 @@ fn upgrade_to_backup_protocol( async move { let debug = param["debug"].as_bool().unwrap_or(false); let benchmark = param["benchmark"].as_bool().unwrap_or(false); + let no_cache = param["no-cache"].as_bool().unwrap_or(false); let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; @@ -214,6 +216,7 @@ fn upgrade_to_backup_protocol( worker.clone(), datastore, backup_dir, + no_cache, )?; env.debug = debug; diff --git a/src/api2/backup/upload_chunk.rs b/src/api2/backup/upload_chunk.rs index 4514e3b9b..35378377f 100644 --- a/src/api2/backup/upload_chunk.rs +++ b/src/api2/backup/upload_chunk.rs @@ -262,6 +262,15 @@ async fn upload_to_backend( ); } + if env.no_cache { + let object_key = pbs_datastore::s3::object_key_from_digest(&digest)?; + let is_duplicate = s3_client + .upload_no_replace_with_retry(object_key, data) + .await + .context("failed to upload chunk to s3 backend")?; + return Ok((digest, size, encoded_size, is_duplicate)); + } + // Avoid re-upload to S3 if the chunk is either present in the LRU cache or the chunk // file exists on filesystem. The latter means that the chunk has been present in the // past an was not cleaned up by garbage collection, so contained in the S3 object store. diff --git a/src/server/push.rs b/src/server/push.rs index c78063662..4a25d51cf 100644 --- a/src/server/push.rs +++ b/src/server/push.rs @@ -831,6 +831,7 @@ pub(crate) async fn push_snapshot( crypt_config: None, debug: false, benchmark: false, + no_cache: false, }, ) .await?; -- 2.47.2 _______________________________________________ pbs-devel mailing list pbs-devel@lists.proxmox.com https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel