From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <d.csapak@proxmox.com>
Received: from firstgate.proxmox.com (firstgate.proxmox.com [212.224.123.68])
 (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)
 key-exchange X25519 server-signature RSA-PSS (2048 bits) server-digest SHA256)
 (No client certificate requested)
 by lists.proxmox.com (Postfix) with ESMTPS id 0BDCE72C1E
 for <pbs-devel@lists.proxmox.com>; Tue, 13 Apr 2021 16:36:09 +0200 (CEST)
Received: from firstgate.proxmox.com (localhost [127.0.0.1])
 by firstgate.proxmox.com (Proxmox) with ESMTP id E39C42CE26
 for <pbs-devel@lists.proxmox.com>; Tue, 13 Apr 2021 16:35:38 +0200 (CEST)
Received: from proxmox-new.maurer-it.com (proxmox-new.maurer-it.com
 [212.186.127.180])
 (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)
 key-exchange X25519 server-signature RSA-PSS (2048 bits) server-digest SHA256)
 (No client certificate requested)
 by firstgate.proxmox.com (Proxmox) with ESMTPS id A05CE2CE1B
 for <pbs-devel@lists.proxmox.com>; Tue, 13 Apr 2021 16:35:37 +0200 (CEST)
Received: from proxmox-new.maurer-it.com (localhost.localdomain [127.0.0.1])
 by proxmox-new.maurer-it.com (Proxmox) with ESMTP id 6AFA8420C7
 for <pbs-devel@lists.proxmox.com>; Tue, 13 Apr 2021 16:35:37 +0200 (CEST)
From: Dominik Csapak <d.csapak@proxmox.com>
To: pbs-devel@lists.proxmox.com
Date: Tue, 13 Apr 2021 16:35:36 +0200
Message-Id: <20210413143536.19004-1-d.csapak@proxmox.com>
X-Mailer: git-send-email 2.20.1
MIME-Version: 1.0
Content-Transfer-Encoding: 8bit
X-SPAM-LEVEL: Spam detection results:  0
 AWL 0.164 Adjusted score from AWL reputation of From: address
 KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment
 RCVD_IN_DNSWL_MED        -2.3 Sender listed at https://www.dnswl.org/,
 medium trust
 SPF_HELO_NONE           0.001 SPF: HELO does not publish an SPF Record
 SPF_PASS               -0.001 SPF: sender matches SPF record
 URIBL_BLOCKED 0.001 ADMINISTRATOR NOTICE: The query to URIBL was blocked. See
 http://wiki.apache.org/spamassassin/DnsBlocklists#dnsbl-block for more
 information. [datastore.rs, verify.rs]
Subject: [pbs-devel] [PATCH proxmox-backup] backup/verify: improve speed by
 sorting chunks by inode
X-BeenThere: pbs-devel@lists.proxmox.com
X-Mailman-Version: 2.1.29
Precedence: list
List-Id: Proxmox Backup Server development discussion
 <pbs-devel.lists.proxmox.com>
List-Unsubscribe: <https://lists.proxmox.com/cgi-bin/mailman/options/pbs-devel>, 
 <mailto:pbs-devel-request@lists.proxmox.com?subject=unsubscribe>
List-Archive: <http://lists.proxmox.com/pipermail/pbs-devel/>
List-Post: <mailto:pbs-devel@lists.proxmox.com>
List-Help: <mailto:pbs-devel-request@lists.proxmox.com?subject=help>
List-Subscribe: <https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel>, 
 <mailto:pbs-devel-request@lists.proxmox.com?subject=subscribe>
X-List-Received-Date: Tue, 13 Apr 2021 14:36:09 -0000

before reading the chunks from disk in the order of the index file,
stat them first and sort them by inode number.

this can have a very positive impact on read speed on spinning disks,
even with the additional stat'ing of the chunks.

memory footprint should be tolerable, for 1_000_000 chunks
we need about ~16MiB of memory (Vec of 64bit position + 64bit inode)
(assuming 4MiB Chunks, such an index would reference 4TiB of data)

two small benchmarks (single spinner, ext4) here showed an improvement from
~430 seconds to ~330 seconds for a 32GiB fixed index
and from
~160 seconds to ~120 seconds for a 10GiB dynamic index

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
---
it would be great if other people could also benchmark this patch on
different setups a little (in addition to me), to verify or disprove my results

 src/backup/datastore.rs |  5 +++++
 src/backup/verify.rs    | 32 +++++++++++++++++++++++++++++---
 2 files changed, 34 insertions(+), 3 deletions(-)

diff --git a/src/backup/datastore.rs b/src/backup/datastore.rs
index 28dda7e7..8162c269 100644
--- a/src/backup/datastore.rs
+++ b/src/backup/datastore.rs
@@ -686,6 +686,11 @@ impl DataStore {
     }
 
 
+    pub fn stat_chunk(&self, digest: &[u8; 32]) -> Result<std::fs::Metadata, Error> {
+        let (chunk_path, _digest_str) = self.chunk_store.chunk_path(digest);
+        std::fs::metadata(chunk_path).map_err(Error::from)
+    }
+
     pub fn load_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
 
         let (chunk_path, digest_str) = self.chunk_store.chunk_path(digest);
diff --git a/src/backup/verify.rs b/src/backup/verify.rs
index ac4a6c29..9173bd9d 100644
--- a/src/backup/verify.rs
+++ b/src/backup/verify.rs
@@ -159,13 +159,16 @@ fn verify_index_chunks(
         }
     );
 
-    for pos in 0..index.index_count() {
+    let index_count = index.index_count();
+    let mut chunk_list = Vec::with_capacity(index_count);
 
+    use std::os::unix::fs::MetadataExt;
+
+    for pos in 0..index_count {
         verify_worker.worker.check_abort()?;
         crate::tools::fail_on_shutdown()?;
 
         let info = index.chunk_info(pos).unwrap();
-        let size = info.size();
 
         if verify_worker.verified_chunks.lock().unwrap().contains(&info.digest) {
             continue; // already verified
@@ -178,15 +181,38 @@ fn verify_index_chunks(
             continue;
         }
 
+        match verify_worker.datastore.stat_chunk(&info.digest) {
+            Err(err) => {
+                verify_worker.corrupt_chunks.lock().unwrap().insert(info.digest);
+                task_log!(verify_worker.worker, "can't verify chunk, stat failed - {}", err);
+                errors.fetch_add(1, Ordering::SeqCst);
+                rename_corrupted_chunk(verify_worker.datastore.clone(), &info.digest, &verify_worker.worker);
+            }
+            Ok(metadata) => {
+                chunk_list.push((pos, metadata.ino()));
+            }
+        }
+    }
+
+    chunk_list.sort_unstable_by(|(_, ino_a), (_, ino_b)| {
+        ino_a.cmp(&ino_b)
+    });
+
+    for (pos, _) in chunk_list {
+        verify_worker.worker.check_abort()?;
+        crate::tools::fail_on_shutdown()?;
+
+        let info = index.chunk_info(pos).unwrap();
+
         match verify_worker.datastore.load_chunk(&info.digest) {
             Err(err) => {
                 verify_worker.corrupt_chunks.lock().unwrap().insert(info.digest);
                 task_log!(verify_worker.worker, "can't verify chunk, load failed - {}", err);
                 errors.fetch_add(1, Ordering::SeqCst);
                 rename_corrupted_chunk(verify_worker.datastore.clone(), &info.digest, &verify_worker.worker);
-                continue;
             }
             Ok(chunk) => {
+                let size = info.size();
                 read_bytes += chunk.raw_size();
                 decoder_pool.send((chunk, info.digest, size))?;
                 decoded_bytes += size;
-- 
2.20.1