From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [212.224.123.68]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits) server-digest SHA256) (No client certificate requested) by lists.proxmox.com (Postfix) with ESMTPS id 700D96A2E3 for ; Wed, 24 Mar 2021 17:17:23 +0100 (CET) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id 60835D071 for ; Wed, 24 Mar 2021 17:17:23 +0100 (CET) Received: from proxmox-new.maurer-it.com (proxmox-new.maurer-it.com [212.186.127.180]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits)) (No client certificate requested) by firstgate.proxmox.com (Proxmox) with ESMTPS id 98094D05B for ; Wed, 24 Mar 2021 17:17:21 +0100 (CET) Received: from proxmox-new.maurer-it.com (localhost.localdomain [127.0.0.1]) by proxmox-new.maurer-it.com (Proxmox) with ESMTP id 5946C41904 for ; Wed, 24 Mar 2021 17:17:21 +0100 (CET) From: Dominik Csapak To: pbs-devel@lists.proxmox.com Date: Wed, 24 Mar 2021 17:17:18 +0100 Message-Id: <20210324161719.9344-1-d.csapak@proxmox.com> X-Mailer: git-send-email 2.20.1 MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-SPAM-LEVEL: Spam detection results: 0 AWL 0.178 Adjusted score from AWL reputation of From: address KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment RCVD_IN_DNSWL_MED -2.3 Sender listed at https://www.dnswl.org/, medium trust SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record Subject: [pbs-devel] [PATCH proxmox-backup v3 1/2] client/backup_writer: introduce UploadStats struct X-BeenThere: pbs-devel@lists.proxmox.com X-Mailman-Version: 2.1.29 Precedence: list List-Id: Proxmox Backup Server development discussion List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 24 Mar 2021 16:17:23 -0000 instead of using a big anonymous tuple. This way the returned values are properly named. Signed-off-by: Dominik Csapak --- new in v3 (split from single patch) src/client/backup_writer.rs | 95 ++++++++++++++++++++----------------- 1 file changed, 51 insertions(+), 44 deletions(-) diff --git a/src/client/backup_writer.rs b/src/client/backup_writer.rs index 696124ed..9b1cdd67 100644 --- a/src/client/backup_writer.rs +++ b/src/client/backup_writer.rs @@ -47,6 +47,15 @@ pub struct UploadOptions { pub fixed_size: Option, } +struct UploadStats { + chunk_count: usize, + chunk_reused: usize, + size: usize, + size_reused: usize, + duration: std::time::Duration, + csum: [u8; 32], +} + type UploadQueueSender = mpsc::Sender<(MergedChunkInfo, Option)>; type UploadResultReceiver = oneshot::Receiver>; @@ -302,25 +311,24 @@ impl BackupWriter { .as_u64() .unwrap(); - let (chunk_count, chunk_reused, size, size_reused, duration, csum) = - Self::upload_chunk_info_stream( - self.h2.clone(), - wid, - stream, - &prefix, - known_chunks.clone(), - if options.encrypt { - self.crypt_config.clone() - } else { - None - }, - options.compress, - self.verbose, - ) - .await?; + let upload_stats = Self::upload_chunk_info_stream( + self.h2.clone(), + wid, + stream, + &prefix, + known_chunks.clone(), + if options.encrypt { + self.crypt_config.clone() + } else { + None + }, + options.compress, + self.verbose, + ) + .await?; - let uploaded = size - size_reused; - let vsize_h: HumanByte = size.into(); + let uploaded = upload_stats.size - upload_stats.size_reused; + let vsize_h: HumanByte = upload_stats.size.into(); let archive = if self.verbose { archive_name.to_string() } else { @@ -328,55 +336,55 @@ impl BackupWriter { }; if archive_name != CATALOG_NAME { let speed: HumanByte = - ((uploaded * 1_000_000) / (duration.as_micros() as usize)).into(); + ((uploaded * 1_000_000) / (upload_stats.duration.as_micros() as usize)).into(); let uploaded: HumanByte = uploaded.into(); println!( "{}: had to upload {} of {} in {:.2}s, average speed {}/s).", archive, uploaded, vsize_h, - duration.as_secs_f64(), + upload_stats.duration.as_secs_f64(), speed ); } else { println!("Uploaded backup catalog ({})", vsize_h); } - if size_reused > 0 && size > 1024 * 1024 { - let reused_percent = size_reused as f64 * 100. / size as f64; - let reused: HumanByte = size_reused.into(); + if upload_stats.size_reused > 0 && upload_stats.size > 1024 * 1024 { + let reused_percent = upload_stats.size_reused as f64 * 100. / upload_stats.size as f64; + let reused: HumanByte = upload_stats.size_reused.into(); println!( "{}: backup was done incrementally, reused {} ({:.1}%)", archive, reused, reused_percent ); } - if self.verbose && chunk_count > 0 { + if self.verbose && upload_stats.chunk_count > 0 { println!( "{}: Reused {} from {} chunks.", - archive, chunk_reused, chunk_count + archive, upload_stats.chunk_reused, upload_stats.chunk_count ); println!( "{}: Average chunk size was {}.", archive, - HumanByte::from(size / chunk_count) + HumanByte::from(upload_stats.size / upload_stats.chunk_count) ); println!( "{}: Average time per request: {} microseconds.", archive, - (duration.as_micros()) / (chunk_count as u128) + (upload_stats.duration.as_micros()) / (upload_stats.chunk_count as u128) ); } let param = json!({ "wid": wid , - "chunk-count": chunk_count, - "size": size, - "csum": proxmox::tools::digest_to_hex(&csum), + "chunk-count": upload_stats.chunk_count, + "size": upload_stats.size, + "csum": proxmox::tools::digest_to_hex(&upload_stats.csum), }); let _value = self.h2.post(&close_path, Some(param)).await?; Ok(BackupStats { - size: size as u64, - csum, + size: upload_stats.size as u64, + csum: upload_stats.csum, }) } @@ -617,8 +625,7 @@ impl BackupWriter { crypt_config: Option>, compress: bool, verbose: bool, - ) -> impl Future> - { + ) -> impl Future> { let total_chunks = Arc::new(AtomicUsize::new(0)); let total_chunks2 = total_chunks.clone(); let known_chunk_count = Arc::new(AtomicUsize::new(0)); @@ -743,22 +750,22 @@ impl BackupWriter { .then(move |result| async move { upload_result.await?.and(result) }.boxed()) .and_then(move |_| { let duration = start_time.elapsed(); - let total_chunks = total_chunks2.load(Ordering::SeqCst); - let known_chunk_count = known_chunk_count2.load(Ordering::SeqCst); - let stream_len = stream_len2.load(Ordering::SeqCst); - let reused_len = reused_len2.load(Ordering::SeqCst); + let chunk_count = total_chunks2.load(Ordering::SeqCst); + let chunk_reused = known_chunk_count2.load(Ordering::SeqCst); + let size = stream_len2.load(Ordering::SeqCst); + let size_reused = reused_len2.load(Ordering::SeqCst); let mut guard = index_csum_2.lock().unwrap(); let csum = guard.take().unwrap().finish(); - futures::future::ok(( - total_chunks, - known_chunk_count, - stream_len, - reused_len, + futures::future::ok(UploadStats { + chunk_count, + chunk_reused, + size, + size_reused, duration, csum, - )) + }) }) } -- 2.20.1