public inbox for pbs-devel@lists.proxmox.com
 help / color / mirror / Atom feed
* [pbs-devel] [PATCH proxmox-backup v3 1/2] client/backup_writer: introduce UploadStats struct
@ 2021-03-24 16:17 Dominik Csapak
  2021-03-24 16:17 ` [pbs-devel] [PATCH proxmox-backup v3 2/2] client/backup_writer: clarify backup and upload size Dominik Csapak
  2021-03-24 17:27 ` [pbs-devel] applied-series: [PATCH proxmox-backup v3 1/2] client/backup_writer: introduce UploadStats struct Thomas Lamprecht
  0 siblings, 2 replies; 3+ messages in thread
From: Dominik Csapak @ 2021-03-24 16:17 UTC (permalink / raw)
  To: pbs-devel

instead of using a big anonymous tuple. This way the returned values
are properly named.

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
---
new in v3 (split from single patch)
 src/client/backup_writer.rs | 95 ++++++++++++++++++++-----------------
 1 file changed, 51 insertions(+), 44 deletions(-)

diff --git a/src/client/backup_writer.rs b/src/client/backup_writer.rs
index 696124ed..9b1cdd67 100644
--- a/src/client/backup_writer.rs
+++ b/src/client/backup_writer.rs
@@ -47,6 +47,15 @@ pub struct UploadOptions {
     pub fixed_size: Option<u64>,
 }
 
+struct UploadStats {
+    chunk_count: usize,
+    chunk_reused: usize,
+    size: usize,
+    size_reused: usize,
+    duration: std::time::Duration,
+    csum: [u8; 32],
+}
+
 type UploadQueueSender = mpsc::Sender<(MergedChunkInfo, Option<h2::client::ResponseFuture>)>;
 type UploadResultReceiver = oneshot::Receiver<Result<(), Error>>;
 
@@ -302,25 +311,24 @@ impl BackupWriter {
             .as_u64()
             .unwrap();
 
-        let (chunk_count, chunk_reused, size, size_reused, duration, csum) =
-            Self::upload_chunk_info_stream(
-                self.h2.clone(),
-                wid,
-                stream,
-                &prefix,
-                known_chunks.clone(),
-                if options.encrypt {
-                    self.crypt_config.clone()
-                } else {
-                    None
-                },
-                options.compress,
-                self.verbose,
-            )
-            .await?;
+        let upload_stats = Self::upload_chunk_info_stream(
+            self.h2.clone(),
+            wid,
+            stream,
+            &prefix,
+            known_chunks.clone(),
+            if options.encrypt {
+                self.crypt_config.clone()
+            } else {
+                None
+            },
+            options.compress,
+            self.verbose,
+        )
+        .await?;
 
-        let uploaded = size - size_reused;
-        let vsize_h: HumanByte = size.into();
+        let uploaded = upload_stats.size - upload_stats.size_reused;
+        let vsize_h: HumanByte = upload_stats.size.into();
         let archive = if self.verbose {
             archive_name.to_string()
         } else {
@@ -328,55 +336,55 @@ impl BackupWriter {
         };
         if archive_name != CATALOG_NAME {
             let speed: HumanByte =
-                ((uploaded * 1_000_000) / (duration.as_micros() as usize)).into();
+                ((uploaded * 1_000_000) / (upload_stats.duration.as_micros() as usize)).into();
             let uploaded: HumanByte = uploaded.into();
             println!(
                 "{}: had to upload {} of {} in {:.2}s, average speed {}/s).",
                 archive,
                 uploaded,
                 vsize_h,
-                duration.as_secs_f64(),
+                upload_stats.duration.as_secs_f64(),
                 speed
             );
         } else {
             println!("Uploaded backup catalog ({})", vsize_h);
         }
 
-        if size_reused > 0 && size > 1024 * 1024 {
-            let reused_percent = size_reused as f64 * 100. / size as f64;
-            let reused: HumanByte = size_reused.into();
+        if upload_stats.size_reused > 0 && upload_stats.size > 1024 * 1024 {
+            let reused_percent = upload_stats.size_reused as f64 * 100. / upload_stats.size as f64;
+            let reused: HumanByte = upload_stats.size_reused.into();
             println!(
                 "{}: backup was done incrementally, reused {} ({:.1}%)",
                 archive, reused, reused_percent
             );
         }
-        if self.verbose && chunk_count > 0 {
+        if self.verbose && upload_stats.chunk_count > 0 {
             println!(
                 "{}: Reused {} from {} chunks.",
-                archive, chunk_reused, chunk_count
+                archive, upload_stats.chunk_reused, upload_stats.chunk_count
             );
             println!(
                 "{}: Average chunk size was {}.",
                 archive,
-                HumanByte::from(size / chunk_count)
+                HumanByte::from(upload_stats.size / upload_stats.chunk_count)
             );
             println!(
                 "{}: Average time per request: {} microseconds.",
                 archive,
-                (duration.as_micros()) / (chunk_count as u128)
+                (upload_stats.duration.as_micros()) / (upload_stats.chunk_count as u128)
             );
         }
 
         let param = json!({
             "wid": wid ,
-            "chunk-count": chunk_count,
-            "size": size,
-            "csum": proxmox::tools::digest_to_hex(&csum),
+            "chunk-count": upload_stats.chunk_count,
+            "size": upload_stats.size,
+            "csum": proxmox::tools::digest_to_hex(&upload_stats.csum),
         });
         let _value = self.h2.post(&close_path, Some(param)).await?;
         Ok(BackupStats {
-            size: size as u64,
-            csum,
+            size: upload_stats.size as u64,
+            csum: upload_stats.csum,
         })
     }
 
@@ -617,8 +625,7 @@ impl BackupWriter {
         crypt_config: Option<Arc<CryptConfig>>,
         compress: bool,
         verbose: bool,
-    ) -> impl Future<Output = Result<(usize, usize, usize, usize, std::time::Duration, [u8; 32]), Error>>
-    {
+    ) -> impl Future<Output = Result<UploadStats, Error>> {
         let total_chunks = Arc::new(AtomicUsize::new(0));
         let total_chunks2 = total_chunks.clone();
         let known_chunk_count = Arc::new(AtomicUsize::new(0));
@@ -743,22 +750,22 @@ impl BackupWriter {
             .then(move |result| async move { upload_result.await?.and(result) }.boxed())
             .and_then(move |_| {
                 let duration = start_time.elapsed();
-                let total_chunks = total_chunks2.load(Ordering::SeqCst);
-                let known_chunk_count = known_chunk_count2.load(Ordering::SeqCst);
-                let stream_len = stream_len2.load(Ordering::SeqCst);
-                let reused_len = reused_len2.load(Ordering::SeqCst);
+                let chunk_count = total_chunks2.load(Ordering::SeqCst);
+                let chunk_reused = known_chunk_count2.load(Ordering::SeqCst);
+                let size = stream_len2.load(Ordering::SeqCst);
+                let size_reused = reused_len2.load(Ordering::SeqCst);
 
                 let mut guard = index_csum_2.lock().unwrap();
                 let csum = guard.take().unwrap().finish();
 
-                futures::future::ok((
-                    total_chunks,
-                    known_chunk_count,
-                    stream_len,
-                    reused_len,
+                futures::future::ok(UploadStats {
+                    chunk_count,
+                    chunk_reused,
+                    size,
+                    size_reused,
                     duration,
                     csum,
-                ))
+                })
             })
     }
 
-- 
2.20.1





^ permalink raw reply	[flat|nested] 3+ messages in thread

* [pbs-devel] [PATCH proxmox-backup v3 2/2] client/backup_writer: clarify backup and upload size
  2021-03-24 16:17 [pbs-devel] [PATCH proxmox-backup v3 1/2] client/backup_writer: introduce UploadStats struct Dominik Csapak
@ 2021-03-24 16:17 ` Dominik Csapak
  2021-03-24 17:27 ` [pbs-devel] applied-series: [PATCH proxmox-backup v3 1/2] client/backup_writer: introduce UploadStats struct Thomas Lamprecht
  1 sibling, 0 replies; 3+ messages in thread
From: Dominik Csapak @ 2021-03-24 16:17 UTC (permalink / raw)
  To: pbs-devel

The text 'had to upload [KMG]iB' implies that this is the size we
actually had to send to the server, while in reality it is the
raw data size before compression.

Count the size of the compressed chunks and print it separately.
Split the average speed into its own line so they do not get too long.

Rename 'uploaded' into 'size_dirty' and 'vsize_h' into 'size'

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
---
changes in v3:
* split from single patch

 src/client/backup_writer.rs | 31 ++++++++++++++++++++-----------
 1 file changed, 20 insertions(+), 11 deletions(-)

diff --git a/src/client/backup_writer.rs b/src/client/backup_writer.rs
index 9b1cdd67..1e54d39d 100644
--- a/src/client/backup_writer.rs
+++ b/src/client/backup_writer.rs
@@ -1,6 +1,6 @@
 use std::collections::HashSet;
 use std::os::unix::fs::OpenOptionsExt;
-use std::sync::atomic::{AtomicUsize, Ordering};
+use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
 use std::sync::{Arc, Mutex};
 
 use anyhow::{bail, format_err, Error};
@@ -52,6 +52,7 @@ struct UploadStats {
     chunk_reused: usize,
     size: usize,
     size_reused: usize,
+    size_compressed: usize,
     duration: std::time::Duration,
     csum: [u8; 32],
 }
@@ -327,8 +328,8 @@ impl BackupWriter {
         )
         .await?;
 
-        let uploaded = upload_stats.size - upload_stats.size_reused;
-        let vsize_h: HumanByte = upload_stats.size.into();
+        let size_dirty = upload_stats.size - upload_stats.size_reused;
+        let size: HumanByte = upload_stats.size.into();
         let archive = if self.verbose {
             archive_name.to_string()
         } else {
@@ -336,18 +337,20 @@ impl BackupWriter {
         };
         if archive_name != CATALOG_NAME {
             let speed: HumanByte =
-                ((uploaded * 1_000_000) / (upload_stats.duration.as_micros() as usize)).into();
-            let uploaded: HumanByte = uploaded.into();
+                ((size_dirty * 1_000_000) / (upload_stats.duration.as_micros() as usize)).into();
+            let size_dirty: HumanByte = size_dirty.into();
+            let size_compressed: HumanByte = upload_stats.size_compressed.into();
             println!(
-                "{}: had to upload {} of {} in {:.2}s, average speed {}/s).",
+                "{}: had to backup {} of {} (compressed {}) in {:.2}s",
                 archive,
-                uploaded,
-                vsize_h,
-                upload_stats.duration.as_secs_f64(),
-                speed
+                size_dirty,
+                size,
+                size_compressed,
+                upload_stats.duration.as_secs_f64()
             );
+            println!("{}: average backup speed: {}/s", archive, speed);
         } else {
-            println!("Uploaded backup catalog ({})", vsize_h);
+            println!("Uploaded backup catalog ({})", size);
         }
 
         if upload_stats.size_reused > 0 && upload_stats.size > 1024 * 1024 {
@@ -633,6 +636,8 @@ impl BackupWriter {
 
         let stream_len = Arc::new(AtomicUsize::new(0));
         let stream_len2 = stream_len.clone();
+        let compressed_stream_len = Arc::new(AtomicU64::new(0));
+        let compressed_stream_len2 = compressed_stream_len.clone();
         let reused_len = Arc::new(AtomicUsize::new(0));
         let reused_len2 = reused_len.clone();
 
@@ -680,8 +685,10 @@ impl BackupWriter {
                     reused_len.fetch_add(chunk_len, Ordering::SeqCst);
                     future::ok(MergedChunkInfo::Known(vec![(offset, *digest)]))
                 } else {
+                    let compressed_stream_len2 = compressed_stream_len.clone();
                     known_chunks.insert(*digest);
                     future::ready(chunk_builder.build().map(move |(chunk, digest)| {
+                        compressed_stream_len2.fetch_add(chunk.raw_size(), Ordering::SeqCst);
                         MergedChunkInfo::New(ChunkInfo {
                             chunk,
                             digest,
@@ -754,6 +761,7 @@ impl BackupWriter {
                 let chunk_reused = known_chunk_count2.load(Ordering::SeqCst);
                 let size = stream_len2.load(Ordering::SeqCst);
                 let size_reused = reused_len2.load(Ordering::SeqCst);
+                let size_compressed = compressed_stream_len2.load(Ordering::SeqCst) as usize;
 
                 let mut guard = index_csum_2.lock().unwrap();
                 let csum = guard.take().unwrap().finish();
@@ -763,6 +771,7 @@ impl BackupWriter {
                     chunk_reused,
                     size,
                     size_reused,
+                    size_compressed,
                     duration,
                     csum,
                 })
-- 
2.20.1





^ permalink raw reply	[flat|nested] 3+ messages in thread

* [pbs-devel] applied-series: [PATCH proxmox-backup v3 1/2] client/backup_writer: introduce UploadStats struct
  2021-03-24 16:17 [pbs-devel] [PATCH proxmox-backup v3 1/2] client/backup_writer: introduce UploadStats struct Dominik Csapak
  2021-03-24 16:17 ` [pbs-devel] [PATCH proxmox-backup v3 2/2] client/backup_writer: clarify backup and upload size Dominik Csapak
@ 2021-03-24 17:27 ` Thomas Lamprecht
  1 sibling, 0 replies; 3+ messages in thread
From: Thomas Lamprecht @ 2021-03-24 17:27 UTC (permalink / raw)
  To: Proxmox Backup Server development discussion, Dominik Csapak

On 24.03.21 17:17, Dominik Csapak wrote:
> instead of using a big anonymous tuple. This way the returned values
> are properly named.
> 
> Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
> ---
> new in v3 (split from single patch)
>  src/client/backup_writer.rs | 95 ++++++++++++++++++++-----------------
>  1 file changed, 51 insertions(+), 44 deletions(-)
> 
>

applied both patches, thanks!




^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2021-03-24 17:27 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-03-24 16:17 [pbs-devel] [PATCH proxmox-backup v3 1/2] client/backup_writer: introduce UploadStats struct Dominik Csapak
2021-03-24 16:17 ` [pbs-devel] [PATCH proxmox-backup v3 2/2] client/backup_writer: clarify backup and upload size Dominik Csapak
2021-03-24 17:27 ` [pbs-devel] applied-series: [PATCH proxmox-backup v3 1/2] client/backup_writer: introduce UploadStats struct Thomas Lamprecht

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox
Service provided by Proxmox Server Solutions GmbH | Privacy | Legal