From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [212.224.123.68]) by lore.proxmox.com (Postfix) with ESMTPS id 7F6BC1FF163 for ; Thu, 12 Sep 2024 16:33:55 +0200 (CEST) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id 2E1A0340B4; Thu, 12 Sep 2024 16:33:51 +0200 (CEST) From: Christian Ebner To: pbs-devel@lists.proxmox.com Date: Thu, 12 Sep 2024 16:32:57 +0200 Message-Id: <20240912143322.548839-9-c.ebner@proxmox.com> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20240912143322.548839-1-c.ebner@proxmox.com> References: <20240912143322.548839-1-c.ebner@proxmox.com> MIME-Version: 1.0 X-SPAM-LEVEL: Spam detection results: 0 AWL 0.022 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record Subject: [pbs-devel] [PATCH v3 proxmox-backup 08/33] client: backup writer: allow push uploading index and chunks X-BeenThere: pbs-devel@lists.proxmox.com X-Mailman-Version: 2.1.29 Precedence: list List-Id: Proxmox Backup Server development discussion List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: Proxmox Backup Server development discussion Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Errors-To: pbs-devel-bounces@lists.proxmox.com Sender: "pbs-devel" Add a method `upload_index_chunk_info` to be used for uploading an existing index and the corresponding chunk stream. Instead of taking an input stream of raw bytes as the `upload_stream`, this takes a stream of `ChunkInfo` object provided by the local chunk reader of the sync jobs source. Signed-off-by: Christian Ebner --- changes since version 2: - no changes pbs-client/src/backup_writer.rs | 106 ++++++++++++++++++++++++++++++++ 1 file changed, 106 insertions(+) diff --git a/pbs-client/src/backup_writer.rs b/pbs-client/src/backup_writer.rs index b71157164..dbb7db0e8 100644 --- a/pbs-client/src/backup_writer.rs +++ b/pbs-client/src/backup_writer.rs @@ -288,6 +288,112 @@ impl BackupWriter { .await } + /// Upload chunks and index + pub async fn upload_index_chunk_info( + &self, + archive_name: &str, + stream: impl Stream>, + options: UploadOptions, + known_chunks: Arc>>, + ) -> Result { + let mut param = json!({ "archive-name": archive_name }); + let prefix = if let Some(size) = options.fixed_size { + param["size"] = size.into(); + "fixed" + } else { + "dynamic" + }; + + if options.encrypt && self.crypt_config.is_none() { + bail!("requested encryption without a crypt config"); + } + + let wid = self + .h2 + .post(&format!("{prefix}_index"), Some(param)) + .await? + .as_u64() + .unwrap(); + + let total_chunks = Arc::new(AtomicUsize::new(0)); + let known_chunk_count = Arc::new(AtomicUsize::new(0)); + + let stream_len = Arc::new(AtomicUsize::new(0)); + let compressed_stream_len = Arc::new(AtomicU64::new(0)); + let reused_len = Arc::new(AtomicUsize::new(0)); + + let counters = UploadStatsCounters { + injected_chunk_count: Arc::new(AtomicUsize::new(0)), + known_chunk_count: known_chunk_count.clone(), + total_chunks: total_chunks.clone(), + compressed_stream_len: compressed_stream_len.clone(), + injected_len: Arc::new(AtomicUsize::new(0)), + reused_len: reused_len.clone(), + stream_len: stream_len.clone(), + }; + + let is_fixed_chunk_size = prefix == "fixed"; + + let index_csum = Arc::new(Mutex::new(Some(Sha256::new()))); + let index_csum_2 = index_csum.clone(); + + let stream = stream + .and_then(move |chunk_info| { + total_chunks.fetch_add(1, Ordering::SeqCst); + reused_len.fetch_add(chunk_info.chunk_len as usize, Ordering::SeqCst); + let offset = stream_len.fetch_add(chunk_info.chunk_len as usize, Ordering::SeqCst); + + let end_offset = offset as u64 + chunk_info.chunk_len; + let mut guard = index_csum.lock().unwrap(); + let csum = guard.as_mut().unwrap(); + if !is_fixed_chunk_size { + csum.update(&end_offset.to_le_bytes()); + } + csum.update(&chunk_info.digest); + + let mut known_chunks = known_chunks.lock().unwrap(); + if known_chunks.contains(&chunk_info.digest) { + known_chunk_count.fetch_add(1, Ordering::SeqCst); + future::ok(MergedChunkInfo::Known(vec![( + chunk_info.offset, + chunk_info.digest, + )])) + } else { + known_chunks.insert(chunk_info.digest); + future::ok(MergedChunkInfo::New(chunk_info)) + } + }) + .merge_known_chunks(); + + let upload_stats = Self::upload_merged_chunk_stream( + self.h2.clone(), + wid, + prefix, + stream, + index_csum_2, + counters, + ) + .await?; + + let param = json!({ + "wid": wid , + "chunk-count": upload_stats.chunk_count, + "size": upload_stats.size, + "csum": hex::encode(upload_stats.csum), + }); + let _value = self + .h2 + .post(&format!("{prefix}_close"), Some(param)) + .await?; + + Ok(BackupStats { + size: upload_stats.size as u64, + csum: upload_stats.csum, + duration: upload_stats.duration, + chunk_count: upload_stats.chunk_count as u64, + }) + } + pub async fn upload_stream( &self, archive_name: &str, -- 2.39.2 _______________________________________________ pbs-devel mailing list pbs-devel@lists.proxmox.com https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel