From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [212.224.123.68]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits)) (No client certificate requested) by lists.proxmox.com (Postfix) with ESMTPS id BDC5AB8F2D for ; Tue, 12 Mar 2024 10:46:25 +0100 (CET) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id 9D8AE15239 for ; Tue, 12 Mar 2024 10:46:25 +0100 (CET) Received: from proxmox-new.maurer-it.com (proxmox-new.maurer-it.com [94.136.29.106]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits)) (No client certificate requested) by firstgate.proxmox.com (Proxmox) with ESMTPS for ; Tue, 12 Mar 2024 10:46:23 +0100 (CET) Received: from proxmox-new.maurer-it.com (localhost.localdomain [127.0.0.1]) by proxmox-new.maurer-it.com (Proxmox) with ESMTP id B97A9417FF for ; Tue, 12 Mar 2024 10:46:23 +0100 (CET) Date: Tue, 12 Mar 2024 10:46:13 +0100 From: Fabian =?iso-8859-1?q?Gr=FCnbichler?= To: Proxmox Backup Server development discussion References: <20240305092703.126906-1-c.ebner@proxmox.com> <20240305092703.126906-27-c.ebner@proxmox.com> In-Reply-To: <20240305092703.126906-27-c.ebner@proxmox.com> MIME-Version: 1.0 User-Agent: astroid/0.16.0 (https://github.com/astroidmail/astroid) Message-Id: <1710235177.wzah8r9rl2.astroid@yuna.none> Content-Type: text/plain; charset=utf-8 Content-Transfer-Encoding: quoted-printable X-SPAM-LEVEL: Spam detection results: 0 AWL 0.065 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record T_SCC_BODY_TEXT_LINE -0.01 - URIBL_BLOCKED 0.001 ADMINISTRATOR NOTICE: The query to URIBL was blocked. See http://wiki.apache.org/spamassassin/DnsBlocklists#dnsbl-block for more information. [api.rs, catar.rs, proxmox.com, main.rs, create.rs] Subject: Re: [pbs-devel] [RFC v2 proxmox-backup 26/36] client: chunk stream: add chunk injection queues X-BeenThere: pbs-devel@lists.proxmox.com X-Mailman-Version: 2.1.29 Precedence: list List-Id: Proxmox Backup Server development discussion List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Tue, 12 Mar 2024 09:46:25 -0000 On March 5, 2024 10:26 am, Christian Ebner wrote: > Adds a queue to the chunk stream to request forced boundaries at a > given offset within the stream and inject reused chunks after this > boundary. >=20 > The chunks are then passed along to the uploader stream using the > injection queue, which inserts them during upload. >=20 > Signed-off-by: Christian Ebner I think this patch here would benefit from a few more Option<..> wrappings (to make it clear where injection can actually happen), and possibly also combining some stuff into structs (to reduce the number of parameters and group those only set/needed for injection/caching/..) I haven't tested the proposed changes below, but AFAICT they should work.. > --- > changes since version 1: > - refactor bail on non-existing payload target archive name >=20 > examples/test_chunk_speed2.rs | 10 ++- > pbs-client/src/backup_writer.rs | 89 +++++++++++-------- > pbs-client/src/chunk_stream.rs | 42 ++++++++- > pbs-client/src/pxar/create.rs | 6 +- > pbs-client/src/pxar_backup_stream.rs | 8 +- > proxmox-backup-client/src/main.rs | 28 ++++-- > .../src/proxmox_restore_daemon/api.rs | 3 + > pxar-bin/src/main.rs | 5 +- > tests/catar.rs | 3 + > 9 files changed, 147 insertions(+), 47 deletions(-) >=20 > diff --git a/examples/test_chunk_speed2.rs b/examples/test_chunk_speed2.r= s > index 3f69b436..b20a5b59 100644 > --- a/examples/test_chunk_speed2.rs > +++ b/examples/test_chunk_speed2.rs > @@ -1,3 +1,6 @@ > +use std::collections::VecDeque; > +use std::sync::{Arc, Mutex}; > + > use anyhow::Error; > use futures::*; > =20 > @@ -26,7 +29,12 @@ async fn run() -> Result<(), Error> { > .map_err(Error::from); > =20 > //let chunk_stream =3D FixedChunkStream::new(stream, 4*1024*1024); > - let mut chunk_stream =3D ChunkStream::new(stream, None); > + let mut chunk_stream =3D ChunkStream::new( > + stream, > + None, > + Arc::new(Mutex::new(VecDeque::new())), > + Arc::new(Mutex::new(VecDeque::new())), > + ); > =20 > let start_time =3D std::time::Instant::now(); > =20 > diff --git a/pbs-client/src/backup_writer.rs b/pbs-client/src/backup_writ= er.rs > index 8a03d8ea..e66b93df 100644 > --- a/pbs-client/src/backup_writer.rs > +++ b/pbs-client/src/backup_writer.rs > @@ -1,4 +1,4 @@ > -use std::collections::HashSet; > +use std::collections::{HashSet, VecDeque}; > use std::future::Future; > use std::os::unix::fs::OpenOptionsExt; > use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering}; > @@ -23,6 +23,7 @@ use pbs_tools::crypt_config::CryptConfig; > =20 > use proxmox_human_byte::HumanByte; > =20 > +use super::inject_reused_chunks::{InjectChunks, InjectReusedChunks, Inje= ctedChunksInfo}; > use super::merge_known_chunks::{MergeKnownChunks, MergedChunkInfo}; > =20 > use super::{H2Client, HttpClient}; > @@ -265,6 +266,7 @@ impl BackupWriter { > archive_name: &str, > stream: impl Stream>, > options: UploadOptions, > + injection_queue: Option>>>, this one is already properly optional :) > ) -> Result { > let known_chunks =3D Arc::new(Mutex::new(HashSet::new())); > =20 > @@ -341,6 +343,7 @@ impl BackupWriter { > None > }, > options.compress, > + injection_queue, > ) > .await?; > =20 > @@ -637,6 +640,7 @@ impl BackupWriter { > known_chunks: Arc>>, > crypt_config: Option>, > compress: bool, > + injection_queue: Option>>>, > ) -> impl Future> { > let total_chunks =3D Arc::new(AtomicUsize::new(0)); > let total_chunks2 =3D total_chunks.clone(); > @@ -663,48 +667,63 @@ impl BackupWriter { > let index_csum_2 =3D index_csum.clone(); > =20 > stream > - .and_then(move |data| { > - let chunk_len =3D data.len(); > + .inject_reused_chunks( > + injection_queue.unwrap_or_default(), > + stream_len, > + reused_len.clone(), > + index_csum.clone(), > + ) > + .and_then(move |chunk_info| match chunk_info { > + InjectedChunksInfo::Known(chunks) =3D> { > + total_chunks.fetch_add(chunks.len(), Ordering::SeqCs= t); > + future::ok(MergedChunkInfo::Known(chunks)) > + } > + InjectedChunksInfo::Raw((offset, data)) =3D> { > + let chunk_len =3D data.len(); > =20 > - total_chunks.fetch_add(1, Ordering::SeqCst); > - let offset =3D stream_len.fetch_add(chunk_len, Ordering:= :SeqCst) as u64; this house keeping is now split between here and inject_reused_chunks, which makes it a bit hard to follow.. > + total_chunks.fetch_add(1, Ordering::SeqCst); > =20 > - let mut chunk_builder =3D DataChunkBuilder::new(data.as_= ref()).compress(compress); > + let mut chunk_builder =3D DataChunkBuilder::new(data= .as_ref()).compress(compress); > =20 > - if let Some(ref crypt_config) =3D crypt_config { > - chunk_builder =3D chunk_builder.crypt_config(crypt_c= onfig); > - } > + if let Some(ref crypt_config) =3D crypt_config { > + chunk_builder =3D chunk_builder.crypt_config(cry= pt_config); > + } > =20 > - let mut known_chunks =3D known_chunks.lock().unwrap(); > - let digest =3D chunk_builder.digest(); > + let mut known_chunks =3D known_chunks.lock().unwrap(= ); > =20 > - let mut guard =3D index_csum.lock().unwrap(); > - let csum =3D guard.as_mut().unwrap(); > + let digest =3D chunk_builder.digest(); > =20 > - let chunk_end =3D offset + chunk_len as u64; > + let mut guard =3D index_csum.lock().unwrap(); > + let csum =3D guard.as_mut().unwrap(); > =20 > - if !is_fixed_chunk_size { > - csum.update(&chunk_end.to_le_bytes()); > - } > - csum.update(digest); > + let chunk_end =3D offset + chunk_len as u64; > =20 > - let chunk_is_known =3D known_chunks.contains(digest); > - if chunk_is_known { > - known_chunk_count.fetch_add(1, Ordering::SeqCst); > - reused_len.fetch_add(chunk_len, Ordering::SeqCst); > - future::ok(MergedChunkInfo::Known(vec![(offset, *dig= est)])) > - } else { > - let compressed_stream_len2 =3D compressed_stream_len= .clone(); > - known_chunks.insert(*digest); > - future::ready(chunk_builder.build().map(move |(chunk= , digest)| { > - compressed_stream_len2.fetch_add(chunk.raw_size(= ), Ordering::SeqCst); > - MergedChunkInfo::New(ChunkInfo { > - chunk, > - digest, > - chunk_len: chunk_len as u64, > - offset, > - }) > - })) > + if !is_fixed_chunk_size { > + csum.update(&chunk_end.to_le_bytes()); > + } > + csum.update(digest); > + > + let chunk_is_known =3D known_chunks.contains(digest)= ; > + if chunk_is_known { > + known_chunk_count.fetch_add(1, Ordering::SeqCst)= ; > + reused_len.fetch_add(chunk_len, Ordering::SeqCst= ); > + > + future::ok(MergedChunkInfo::Known(vec![(offset, = *digest)])) > + } else { > + let compressed_stream_len2 =3D compressed_stream= _len.clone(); > + known_chunks.insert(*digest); > + > + future::ready(chunk_builder.build().map(move |(c= hunk, digest)| { > + compressed_stream_len2.fetch_add(chunk.raw_s= ize(), Ordering::SeqCst); > + > + MergedChunkInfo::New(ChunkInfo { > + chunk, > + digest, > + chunk_len: chunk_len as u64, > + offset, > + }) > + })) > + } > } > }) > .merge_known_chunks() > diff --git a/pbs-client/src/chunk_stream.rs b/pbs-client/src/chunk_stream= .rs > index 895f6eae..891d6928 100644 > --- a/pbs-client/src/chunk_stream.rs > +++ b/pbs-client/src/chunk_stream.rs > @@ -1,4 +1,6 @@ > +use std::collections::VecDeque; > use std::pin::Pin; > +use std::sync::{Arc, Mutex}; > use std::task::{Context, Poll}; > =20 > use anyhow::Error; > @@ -8,21 +10,34 @@ use futures::stream::{Stream, TryStream}; > =20 > use pbs_datastore::Chunker; > =20 > +use crate::inject_reused_chunks::InjectChunks; > + > /// Split input stream into dynamic sized chunks > pub struct ChunkStream { > input: S, > chunker: Chunker, > buffer: BytesMut, > scan_pos: usize, > + consumed: u64, > + boundaries: Arc>>, > + injections: Arc>>, okay, so boundaries and injections are only either both meaningful, or not. we only set them for the payload stream. they should be an Option ;) technically consumed atm could also go inside that option, and we could make the whole thing a struct? struct InjectionData { boundaries: Arc, injections: Arc, consumed: u64, } and then pass in an Option of that? > } > =20 > impl ChunkStream { > - pub fn new(input: S, chunk_size: Option) -> Self { > + pub fn new( > + input: S, > + chunk_size: Option, > + boundaries: Arc>>, > + injections: Arc>>, > + ) -> Self { > Self { > input, > chunker: Chunker::new(chunk_size.unwrap_or(4 * 1024 * 1024))= , > buffer: BytesMut::new(), > scan_pos: 0, > + consumed: 0, > + boundaries, > + injections, > } > } > } > @@ -40,6 +55,29 @@ where > fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { > let this =3D self.get_mut(); > loop { > + { this scope for lock purposes can then be an `if let Some(..)`, either with the struct or tuple.. > + // Make sure to release this lock as soon as possible > + let mut boundaries =3D this.boundaries.lock().unwrap(); > + if let Some(inject) =3D boundaries.pop_front() { > + let max =3D this.consumed + this.buffer.len() as u64= ; > + if inject.boundary <=3D max { > + let chunk_size =3D (inject.boundary - this.consu= med) as usize; > + let result =3D this.buffer.split_to(chunk_size); > + this.consumed +=3D chunk_size as u64; > + this.scan_pos =3D 0; > + > + // Add the size of the injected chunks to consum= ed, so chunk stream offsets > + // are in sync with the rest of the archive. > + this.consumed +=3D inject.size as u64; > + > + this.injections.lock().unwrap().push_back(inject= ); > + > + return Poll::Ready(Some(Ok(result))); > + } > + boundaries.push_front(inject); > + } > + } > + > if this.scan_pos < this.buffer.len() { > let boundary =3D this.chunker.scan(&this.buffer[this.sca= n_pos..]); > =20 > @@ -50,7 +88,9 @@ where > // continue poll > } else if chunk_size <=3D this.buffer.len() { > let result =3D this.buffer.split_to(chunk_size); > + this.consumed +=3D chunk_size as u64; > this.scan_pos =3D 0; > + > return Poll::Ready(Some(Ok(result))); > } else { > panic!("got unexpected chunk boundary from chunker")= ; > diff --git a/pbs-client/src/pxar/create.rs b/pbs-client/src/pxar/create.r= s > index 59aa4450..9ae84d37 100644 > --- a/pbs-client/src/pxar/create.rs > +++ b/pbs-client/src/pxar/create.rs > @@ -1,4 +1,4 @@ > -use std::collections::{HashMap, HashSet}; > +use std::collections::{HashMap, HashSet, VecDeque}; > use std::ffi::{CStr, CString, OsStr}; > use std::fmt; > use std::io::{self, Read}; > @@ -26,6 +26,7 @@ use proxmox_sys::fs::{self, acl, xattr}; > =20 > use pbs_datastore::catalog::BackupCatalogWriter; > =20 > +use crate::inject_reused_chunks::InjectChunks; > use crate::pxar::metadata::errno_is_unsupported; > use crate::pxar::tools::assert_single_path_component; > use crate::pxar::Flags; > @@ -131,6 +132,7 @@ struct Archiver { > hardlinks: HashMap, > file_copy_buffer: Vec, > skip_e2big_xattr: bool, > + forced_boundaries: Arc>>, > } > =20 > type Encoder<'a, T> =3D pxar::encoder::aio::Encoder<'a, T>; > @@ -143,6 +145,7 @@ pub async fn create_archive( > catalog: Option>>, > mut payload_writer: Option, > options: PxarCreateOptions, > + forced_boundaries: Arc>>, could be combined with the payload_writer and the caching parameters added later on? and then the whole thing can be optional? > ) -> Result<(), Error> > where > T: SeqWrite + Send, > @@ -201,6 +204,7 @@ where > hardlinks: HashMap::new(), > file_copy_buffer: vec::undefined(4 * 1024 * 1024), > skip_e2big_xattr: options.skip_e2big_xattr, > + forced_boundaries, > }; > =20 > archiver > diff --git a/pbs-client/src/pxar_backup_stream.rs b/pbs-client/src/pxar_b= ackup_stream.rs > index 9a600cc1..1a51b0c2 100644 > --- a/pbs-client/src/pxar_backup_stream.rs > +++ b/pbs-client/src/pxar_backup_stream.rs > @@ -1,3 +1,4 @@ > +use std::collections::VecDeque; > use std::io::Write; > //use std::os::unix::io::FromRawFd; > use std::path::Path; > @@ -17,6 +18,8 @@ use proxmox_io::StdChannelWriter; > =20 > use pbs_datastore::catalog::CatalogWriter; > =20 > +use crate::inject_reused_chunks::InjectChunks; > + > /// Stream implementation to encode and upload .pxar archives. > /// > /// The hyper client needs an async Stream for file upload, so we > @@ -40,6 +43,7 @@ impl PxarBackupStream { > dir: Dir, > catalog: Arc>>, > options: crate::pxar::PxarCreateOptions, > + boundaries: Arc>>, > separate_payload_stream: bool, > ) -> Result<(Self, Option), Error> { > let buffer_size =3D 256 * 1024; > @@ -79,6 +83,7 @@ impl PxarBackupStream { > Some(catalog), > payload_writer, > options, > + boundaries, > ) > .await > { > @@ -110,11 +115,12 @@ impl PxarBackupStream { > dirname: &Path, > catalog: Arc>>, > options: crate::pxar::PxarCreateOptions, > + boundaries: Arc>>, > separate_payload_stream: bool, make boundaries optional (and maybe give it a more "readable" name ;)), replace the separate_payload_stream with its Some-ness? > ) -> Result<(Self, Option), Error> { > let dir =3D nix::dir::Dir::open(dirname, OFlag::O_DIRECTORY, Mod= e::empty())?; > =20 > - Self::new(dir, catalog, options, separate_payload_stream) > + Self::new(dir, catalog, options, boundaries, separate_payload_st= ream) > } > } > =20 > diff --git a/proxmox-backup-client/src/main.rs b/proxmox-backup-client/sr= c/main.rs > index e609aa16..f077ddf6 100644 > --- a/proxmox-backup-client/src/main.rs > +++ b/proxmox-backup-client/src/main.rs > @@ -1,4 +1,4 @@ > -use std::collections::HashSet; > +use std::collections::{HashSet, VecDeque}; > use std::io::{self, Read, Seek, SeekFrom, Write}; > use std::path::{Path, PathBuf}; > use std::pin::Pin; > @@ -197,14 +197,19 @@ async fn backup_directory>( > bail!("cannot backup directory with fixed chunk size!"); > } > =20 > + let payload_boundaries =3D Arc::new(Mutex::new(VecDeque::new())); make this an Option, set based on payload_target > let (pxar_stream, payload_stream) =3D PxarBackupStream::open( > dir_path.as_ref(), > catalog, > pxar_create_options, > + payload_boundaries.clone(), > payload_target.is_some(), > )?; > =20 > - let mut chunk_stream =3D ChunkStream::new(pxar_stream, chunk_size); > + let dummy_injections =3D Arc::new(Mutex::new(VecDeque::new())); > + let dummy_boundaries =3D Arc::new(Mutex::new(VecDeque::new())); > + let mut chunk_stream =3D > + ChunkStream::new(pxar_stream, chunk_size, dummy_boundaries, dumm= y_injections); replace these with None > let (tx, rx) =3D mpsc::channel(10); // allow to buffer 10 chunks > =20 > let stream =3D ReceiverStream::new(rx).map_err(Error::from); > @@ -216,15 +221,18 @@ async fn backup_directory>( > } > }); > =20 > - let stats =3D client.upload_stream(archive_name, stream, upload_opti= ons.clone()); > + let stats =3D client.upload_stream(archive_name, stream, upload_opti= ons.clone(), None); > =20 > if let Some(payload_stream) =3D payload_stream { > let payload_target =3D payload_target > .ok_or_else(|| format_err!("got payload stream, but no targe= t archive name"))?; > =20 > + let payload_injections =3D Arc::new(Mutex::new(VecDeque::new()))= ; > let mut payload_chunk_stream =3D ChunkStream::new( > payload_stream, > chunk_size, > + payload_boundaries, > + payload_injections.clone(), > ); > let (payload_tx, payload_rx) =3D mpsc::channel(10); // allow to = buffer 10 chunks > let stream =3D ReceiverStream::new(payload_rx).map_err(Error::fr= om); > @@ -240,6 +248,7 @@ async fn backup_directory>( > &payload_target, > stream, > upload_options, > + Some(payload_injections), > ); > =20 > match futures::join!(stats, payload_stats) { > @@ -276,7 +285,7 @@ async fn backup_image>( > } > =20 > let stats =3D client > - .upload_stream(archive_name, stream, upload_options) > + .upload_stream(archive_name, stream, upload_options, None) > .await?; > =20 > Ok(stats) > @@ -567,7 +576,14 @@ fn spawn_catalog_upload( > let (catalog_tx, catalog_rx) =3D std::sync::mpsc::sync_channel(10); = // allow to buffer 10 writes > let catalog_stream =3D proxmox_async::blocking::StdChannelStream(cat= alog_rx); > let catalog_chunk_size =3D 512 * 1024; > - let catalog_chunk_stream =3D ChunkStream::new(catalog_stream, Some(c= atalog_chunk_size)); > + let boundaries =3D Arc::new(Mutex::new(VecDeque::new())); > + let injections =3D Arc::new(Mutex::new(VecDeque::new())); > + let catalog_chunk_stream =3D ChunkStream::new( > + catalog_stream, > + Some(catalog_chunk_size), > + boundaries, > + injections.clone(), > + ); replace these with None (they are also dummies AFAICT?) > =20 > let catalog_writer =3D Arc::new(Mutex::new(CatalogWriter::new(TokioW= riterAdapter::new( > StdChannelWriter::new(catalog_tx), > @@ -583,7 +599,7 @@ fn spawn_catalog_upload( > =20 > tokio::spawn(async move { > let catalog_upload_result =3D client > - .upload_stream(CATALOG_NAME, catalog_chunk_stream, upload_op= tions) > + .upload_stream(CATALOG_NAME, catalog_chunk_stream, upload_op= tions, None) > .await; > =20 > if let Err(ref err) =3D catalog_upload_result { > diff --git a/proxmox-restore-daemon/src/proxmox_restore_daemon/api.rs b/p= roxmox-restore-daemon/src/proxmox_restore_daemon/api.rs > index bd8ddb20..d912734c 100644 > --- a/proxmox-restore-daemon/src/proxmox_restore_daemon/api.rs > +++ b/proxmox-restore-daemon/src/proxmox_restore_daemon/api.rs > @@ -1,8 +1,10 @@ > ///! File-restore API running inside the restore VM > +use std::collections::VecDeque; > use std::ffi::OsStr; > use std::fs; > use std::os::unix::ffi::OsStrExt; > use std::path::{Path, PathBuf}; > +use std::sync::{Arc, Mutex}; > =20 > use anyhow::{bail, Error}; > use futures::FutureExt; > @@ -364,6 +366,7 @@ fn extract( > None, > None, > options, > + Arc::new(Mutex::new(VecDeque::new())), > ) > .await > } > diff --git a/pxar-bin/src/main.rs b/pxar-bin/src/main.rs > index e3b0faac..74ee04f7 100644 > --- a/pxar-bin/src/main.rs > +++ b/pxar-bin/src/main.rs > @@ -1,10 +1,10 @@ > -use std::collections::HashSet; > +use std::collections::{HashSet, VecDeque}; > use std::ffi::OsStr; > use std::fs::OpenOptions; > use std::os::unix::fs::OpenOptionsExt; > use std::path::{Path, PathBuf}; > use std::sync::atomic::{AtomicBool, Ordering}; > -use std::sync::Arc; > +use std::sync::{Arc, Mutex}; > =20 > use anyhow::{bail, format_err, Error}; > use futures::future::FutureExt; > @@ -385,6 +385,7 @@ async fn create_archive( > None, > None, > options, > + Arc::new(Mutex::new(VecDeque::new())), None / None merged with payload writer > ) > .await?; > =20 > diff --git a/tests/catar.rs b/tests/catar.rs > index 04af4ffd..6edd747d 100644 > --- a/tests/catar.rs > +++ b/tests/catar.rs > @@ -1,4 +1,6 @@ > +use std::collections::VecDeque; > use std::process::Command; > +use std::sync::{Arc, Mutex}; > =20 > use anyhow::Error; > =20 > @@ -41,6 +43,7 @@ fn run_test(dir_name: &str) -> Result<(), Error> { > None, > None, > options, > + Arc::new(Mutex::new(VecDeque::new())), same > ))?; > =20 > Command::new("cmp") > --=20 > 2.39.2 >=20 >=20 >=20 > _______________________________________________ > pbs-devel mailing list > pbs-devel@lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel >=20 >=20 >=20