all lists on lists.proxmox.com
 help / color / mirror / Atom feed
From: Christian Ebner <c.ebner@proxmox.com>
To: pbs-devel@lists.proxmox.com
Subject: [pbs-devel] [PATCH v5 pxar 06/62] decoder/accessor: add optional payload input stream
Date: Tue,  7 May 2024 17:51:48 +0200	[thread overview]
Message-ID: <20240507155244.793819-7-c.ebner@proxmox.com> (raw)
In-Reply-To: <20240507155244.793819-1-c.ebner@proxmox.com>

Implement an optional redirection to read the payload for regular files
from a different input stream.

This allows to decode split stream archives.

Signed-off-by: Christian Ebner <c.ebner@proxmox.com>
---
changes since version 4:
- no changes

 examples/apxar.rs    |  2 +-
 src/accessor/aio.rs  | 10 +++--
 src/accessor/mod.rs  | 78 ++++++++++++++++++++++++++++++------
 src/accessor/sync.rs |  8 ++--
 src/decoder/aio.rs   | 14 ++++---
 src/decoder/mod.rs   | 94 ++++++++++++++++++++++++++++++++++++++------
 src/decoder/sync.rs  | 15 ++++---
 src/lib.rs           |  3 ++
 tests/compat.rs      |  3 +-
 tests/simple/main.rs |  5 ++-
 10 files changed, 188 insertions(+), 44 deletions(-)

diff --git a/examples/apxar.rs b/examples/apxar.rs
index 0c62242..d5eb04e 100644
--- a/examples/apxar.rs
+++ b/examples/apxar.rs
@@ -9,7 +9,7 @@ async fn main() {
         .await
         .expect("failed to open file");
 
-    let mut reader = Decoder::from_tokio(file)
+    let mut reader = Decoder::from_tokio(file, None)
         .await
         .expect("failed to open pxar archive contents");
 
diff --git a/src/accessor/aio.rs b/src/accessor/aio.rs
index 98d7755..06167b4 100644
--- a/src/accessor/aio.rs
+++ b/src/accessor/aio.rs
@@ -39,7 +39,7 @@ impl<T: FileExt> Accessor<FileReader<T>> {
     /// by a blocking file.
     #[inline]
     pub async fn from_file_and_size(input: T, size: u64) -> io::Result<Self> {
-        Accessor::new(FileReader::new(input), size).await
+        Accessor::new(FileReader::new(input), size, None).await
     }
 }
 
@@ -75,7 +75,7 @@ where
         input: T,
         size: u64,
     ) -> io::Result<Accessor<FileRefReader<T>>> {
-        Accessor::new(FileRefReader::new(input), size).await
+        Accessor::new(FileRefReader::new(input), size, None).await
     }
 }
 
@@ -85,9 +85,11 @@ impl<T: ReadAt> Accessor<T> {
     ///
     /// Note that the `input`'s `SeqRead` implementation must always return `Poll::Ready` and is
     /// not allowed to use the `Waker`, as this will cause a `panic!`.
-    pub async fn new(input: T, size: u64) -> io::Result<Self> {
+    /// Optionally take the file payloads from the provided input stream rather than the regular
+    /// pxar stream.
+    pub async fn new(input: T, size: u64, payload_input: Option<(T, u64)>) -> io::Result<Self> {
         Ok(Self {
-            inner: accessor::AccessorImpl::new(input, size).await?,
+            inner: accessor::AccessorImpl::new(input, size, payload_input).await?,
         })
     }
 
diff --git a/src/accessor/mod.rs b/src/accessor/mod.rs
index 6a2de73..46afbe3 100644
--- a/src/accessor/mod.rs
+++ b/src/accessor/mod.rs
@@ -182,10 +182,11 @@ pub(crate) struct AccessorImpl<T> {
     input: T,
     size: u64,
     caches: Arc<Caches>,
+    payload_input: Option<(T, Range<u64>)>,
 }
 
 impl<T: ReadAt> AccessorImpl<T> {
-    pub async fn new(input: T, size: u64) -> io::Result<Self> {
+    pub async fn new(input: T, size: u64, payload_input: Option<(T, u64)>) -> io::Result<Self> {
         if size < (size_of::<GoodbyeItem>() as u64) {
             io_bail!("too small to contain a pxar archive");
         }
@@ -194,6 +195,7 @@ impl<T: ReadAt> AccessorImpl<T> {
             input,
             size,
             caches: Arc::new(Caches::default()),
+            payload_input: payload_input.map(|(input, size)| (input, 0..size)),
         })
     }
 
@@ -207,6 +209,9 @@ impl<T: ReadAt> AccessorImpl<T> {
             self.size,
             "/".into(),
             Arc::clone(&self.caches),
+            self.payload_input
+                .as_ref()
+                .map(|(input, range)| (input as &dyn ReadAt, range.clone())),
         )
         .await
     }
@@ -227,8 +232,15 @@ async fn get_decoder<T: ReadAt>(
     input: T,
     entry_range: Range<u64>,
     path: PathBuf,
+    payload_input: Option<(T, Range<u64>)>,
 ) -> io::Result<DecoderImpl<SeqReadAtAdapter<T>>> {
-    DecoderImpl::new_full(SeqReadAtAdapter::new(input, entry_range), path, true).await
+    DecoderImpl::new_full(
+        SeqReadAtAdapter::new(input, entry_range.clone()),
+        path,
+        true,
+        payload_input.map(|(input, range)| SeqReadAtAdapter::new(input, range)),
+    )
+    .await
 }
 
 // NOTE: This performs the Decoder::read_next_item() behavior! Keep in mind when changing!
@@ -236,6 +248,7 @@ async fn get_decoder_at_filename<T: ReadAt>(
     input: T,
     entry_range: Range<u64>,
     path: PathBuf,
+    payload_input: Option<(T, Range<u64>)>,
 ) -> io::Result<(DecoderImpl<SeqReadAtAdapter<T>>, u64)> {
     // Read the header, it should be a FILENAME, then skip over it and its length:
     let header: format::Header = read_entry_at(&input, entry_range.start).await?;
@@ -251,7 +264,7 @@ async fn get_decoder_at_filename<T: ReadAt>(
     }
 
     Ok((
-        get_decoder(input, entry_offset..entry_range.end, path).await?,
+        get_decoder(input, entry_offset..entry_range.end, path, payload_input).await?,
         entry_offset,
     ))
 }
@@ -263,6 +276,7 @@ impl<T: Clone + ReadAt> AccessorImpl<T> {
             self.size,
             "/".into(),
             Arc::clone(&self.caches),
+            self.payload_input.clone(),
         )
         .await
     }
@@ -274,6 +288,7 @@ impl<T: Clone + ReadAt> AccessorImpl<T> {
             offset,
             "/".into(),
             Arc::clone(&self.caches),
+            self.payload_input.clone(),
         )
         .await
     }
@@ -287,23 +302,30 @@ impl<T: Clone + ReadAt> AccessorImpl<T> {
             self.input.clone(),
             entry_range_info.entry_range.clone(),
             PathBuf::new(),
+            self.payload_input.clone(),
         )
         .await?;
         let entry = decoder
             .next()
             .await
             .ok_or_else(|| io_format_err!("unexpected EOF while decoding file entry"))??;
+
         Ok(FileEntryImpl {
             input: self.input.clone(),
             entry,
             entry_range_info: entry_range_info.clone(),
             caches: Arc::clone(&self.caches),
+            payload_input: self.payload_input.clone(),
         })
     }
 
     /// Allow opening arbitrary contents from a specific range.
     pub unsafe fn open_contents_at_range(&self, range: Range<u64>) -> FileContentsImpl<T> {
-        FileContentsImpl::new(self.input.clone(), range)
+        if let Some((payload_input, _)) = &self.payload_input {
+            FileContentsImpl::new(payload_input.clone(), range)
+        } else {
+            FileContentsImpl::new(self.input.clone(), range)
+        }
     }
 
     /// Following a hardlink breaks a couple of conventions we otherwise have, particularly we will
@@ -326,9 +348,13 @@ impl<T: Clone + ReadAt> AccessorImpl<T> {
 
         let link_offset = entry_file_offset - link_offset;
 
-        let (mut decoder, entry_offset) =
-            get_decoder_at_filename(self.input.clone(), link_offset..self.size, PathBuf::new())
-                .await?;
+        let (mut decoder, entry_offset) = get_decoder_at_filename(
+            self.input.clone(),
+            link_offset..self.size,
+            PathBuf::new(),
+            self.payload_input.clone(),
+        )
+        .await?;
 
         let entry = decoder
             .next()
@@ -342,6 +368,7 @@ impl<T: Clone + ReadAt> AccessorImpl<T> {
             EntryKind::File {
                 offset: Some(offset),
                 size,
+                ..
             } => {
                 let meta_size = offset - link_offset;
                 let entry_end = link_offset + meta_size + size;
@@ -353,6 +380,7 @@ impl<T: Clone + ReadAt> AccessorImpl<T> {
                         entry_range: entry_offset..entry_end,
                     },
                     caches: Arc::clone(&self.caches),
+                    payload_input: self.payload_input.clone(),
                 })
             }
             _ => io_bail!("hardlink does not point to a regular file"),
@@ -369,6 +397,7 @@ pub(crate) struct DirectoryImpl<T> {
     table: Arc<[GoodbyeItem]>,
     path: PathBuf,
     caches: Arc<Caches>,
+    payload_input: Option<(T, Range<u64>)>,
 }
 
 impl<T: Clone + ReadAt> DirectoryImpl<T> {
@@ -378,6 +407,7 @@ impl<T: Clone + ReadAt> DirectoryImpl<T> {
         end_offset: u64,
         path: PathBuf,
         caches: Arc<Caches>,
+        payload_input: Option<(T, Range<u64>)>,
     ) -> io::Result<DirectoryImpl<T>> {
         let tail = Self::read_tail_entry(&input, end_offset).await?;
 
@@ -407,6 +437,7 @@ impl<T: Clone + ReadAt> DirectoryImpl<T> {
             table: table.as_ref().map_or_else(|| Arc::new([]), Arc::clone),
             path,
             caches,
+            payload_input,
         };
 
         // sanity check:
@@ -502,6 +533,7 @@ impl<T: Clone + ReadAt> DirectoryImpl<T> {
                 None => self.path.clone(),
                 Some(file) => self.path.join(file),
             },
+            self.payload_input.clone(),
         )
         .await
     }
@@ -533,6 +565,7 @@ impl<T: Clone + ReadAt> DirectoryImpl<T> {
                 entry_range: self.entry_range(),
             },
             caches: Arc::clone(&self.caches),
+            payload_input: self.payload_input.clone(),
         })
     }
 
@@ -575,6 +608,10 @@ impl<T: Clone + ReadAt> DirectoryImpl<T> {
             cur = next;
         }
 
+        if let Some(cur) = cur.as_mut() {
+            cur.payload_input = self.payload_input.clone();
+        }
+
         Ok(cur)
     }
 
@@ -599,7 +636,9 @@ impl<T: Clone + ReadAt> DirectoryImpl<T> {
 
             let cursor = self.get_cursor(index).await?;
             if cursor.file_name == path {
-                return Ok(Some(cursor.decode_entry().await?));
+                let mut entry = cursor.decode_entry().await?;
+                entry.payload_input = self.payload_input.clone();
+                return Ok(Some(entry));
             }
 
             dup += 1;
@@ -685,6 +724,7 @@ pub(crate) struct FileEntryImpl<T: Clone + ReadAt> {
     entry: Entry,
     entry_range_info: EntryRangeInfo,
     caches: Arc<Caches>,
+    payload_input: Option<(T, Range<u64>)>,
 }
 
 impl<T: Clone + ReadAt> FileEntryImpl<T> {
@@ -698,6 +738,7 @@ impl<T: Clone + ReadAt> FileEntryImpl<T> {
             self.entry_range_info.entry_range.end,
             self.entry.path.clone(),
             Arc::clone(&self.caches),
+            self.payload_input.clone(),
         )
         .await
     }
@@ -711,15 +752,29 @@ impl<T: Clone + ReadAt> FileEntryImpl<T> {
             EntryKind::File {
                 size,
                 offset: Some(offset),
+                payload_offset: None,
             } => Ok(Some(offset..(offset + size))),
+            // Payload offset beats regular offset if some
+            EntryKind::File {
+                size,
+                offset: Some(_offset),
+                payload_offset: Some(payload_offset),
+            } => {
+                let start_offset = payload_offset + size_of::<format::Header>() as u64;
+                Ok(Some(start_offset..start_offset + size))
+            }
             _ => Ok(None),
         }
     }
 
     pub async fn contents(&self) -> io::Result<FileContentsImpl<T>> {
-        match self.content_range()? {
-            Some(range) => Ok(FileContentsImpl::new(self.input.clone(), range)),
-            None => io_bail!("not a file"),
+        let range = self
+            .content_range()?
+            .ok_or_else(|| io_format_err!("not a file"))?;
+        if let Some((ref payload_input, _)) = self.payload_input {
+            Ok(FileContentsImpl::new(payload_input.clone(), range))
+        } else {
+            Ok(FileContentsImpl::new(self.input.clone(), range))
         }
     }
 
@@ -808,6 +863,7 @@ impl<'a, T: Clone + ReadAt> DirEntryImpl<'a, T> {
             entry,
             entry_range_info: self.entry_range_info.clone(),
             caches: Arc::clone(&self.caches),
+            payload_input: self.dir.payload_input.clone(),
         })
     }
 
diff --git a/src/accessor/sync.rs b/src/accessor/sync.rs
index a777152..cd8dff0 100644
--- a/src/accessor/sync.rs
+++ b/src/accessor/sync.rs
@@ -31,7 +31,7 @@ impl<T: FileExt> Accessor<FileReader<T>> {
     /// Decode a `pxar` archive from a standard file implementing `FileExt`.
     #[inline]
     pub fn from_file_and_size(input: T, size: u64) -> io::Result<Self> {
-        Accessor::new(FileReader::new(input), size)
+        Accessor::new(FileReader::new(input), size, None)
     }
 }
 
@@ -64,7 +64,7 @@ where
 {
     /// Open an `Arc` or `Rc` of `File`.
     pub fn from_file_ref_and_size(input: T, size: u64) -> io::Result<Accessor<FileRefReader<T>>> {
-        Accessor::new(FileRefReader::new(input), size)
+        Accessor::new(FileRefReader::new(input), size, None)
     }
 }
 
@@ -74,9 +74,9 @@ impl<T: ReadAt> Accessor<T> {
     ///
     /// Note that the `input`'s `SeqRead` implementation must always return `Poll::Ready` and is
     /// not allowed to use the `Waker`, as this will cause a `panic!`.
-    pub fn new(input: T, size: u64) -> io::Result<Self> {
+    pub fn new(input: T, size: u64, payload_input: Option<(T, u64)>) -> io::Result<Self> {
         Ok(Self {
-            inner: poll_result_once(accessor::AccessorImpl::new(input, size))?,
+            inner: poll_result_once(accessor::AccessorImpl::new(input, size, payload_input))?,
         })
     }
 
diff --git a/src/decoder/aio.rs b/src/decoder/aio.rs
index 4de8c6f..bb032cf 100644
--- a/src/decoder/aio.rs
+++ b/src/decoder/aio.rs
@@ -20,8 +20,12 @@ pub struct Decoder<T> {
 impl<T: tokio::io::AsyncRead> Decoder<TokioReader<T>> {
     /// Decode a `pxar` archive from a `tokio::io::AsyncRead` input.
     #[inline]
-    pub async fn from_tokio(input: T) -> io::Result<Self> {
-        Decoder::new(TokioReader::new(input)).await
+    pub async fn from_tokio(input: T, payload_input: Option<T>) -> io::Result<Self> {
+        Decoder::new(
+            TokioReader::new(input),
+            payload_input.map(|payload_input| TokioReader::new(payload_input)),
+        )
+        .await
     }
 }
 
@@ -30,15 +34,15 @@ impl Decoder<TokioReader<tokio::fs::File>> {
     /// Decode a `pxar` archive from a `tokio::io::AsyncRead` input.
     #[inline]
     pub async fn open<P: AsRef<Path>>(path: P) -> io::Result<Self> {
-        Decoder::from_tokio(tokio::fs::File::open(path.as_ref()).await?).await
+        Decoder::from_tokio(tokio::fs::File::open(path.as_ref()).await?, None).await
     }
 }
 
 impl<T: SeqRead> Decoder<T> {
     /// Create an async decoder from an input implementing our internal read interface.
-    pub async fn new(input: T) -> io::Result<Self> {
+    pub async fn new(input: T, payload_input: Option<T>) -> io::Result<Self> {
         Ok(Self {
-            inner: decoder::DecoderImpl::new(input).await?,
+            inner: decoder::DecoderImpl::new(input, payload_input).await?,
         })
     }
 
diff --git a/src/decoder/mod.rs b/src/decoder/mod.rs
index d19ffd1..07b6c61 100644
--- a/src/decoder/mod.rs
+++ b/src/decoder/mod.rs
@@ -157,6 +157,10 @@ pub(crate) struct DecoderImpl<T> {
     state: State,
     with_goodbye_tables: bool,
 
+    // Payload of regular files might be provided by a different reader
+    payload_input: Option<T>,
+    payload_consumed: u64,
+
     /// The random access code uses decoders for sub-ranges which may not end in a `PAYLOAD` for
     /// entries like FIFOs or sockets, so there we explicitly allow an item to terminate with EOF.
     eof_after_entry: bool,
@@ -167,6 +171,7 @@ enum State {
     Default,
     InPayload {
         offset: u64,
+        size: u64,
     },
 
     /// file entries with no data (fifo, socket)
@@ -195,8 +200,8 @@ pub(crate) enum ItemResult {
 }
 
 impl<I: SeqRead> DecoderImpl<I> {
-    pub async fn new(input: I) -> io::Result<Self> {
-        Self::new_full(input, "/".into(), false).await
+    pub async fn new(input: I, payload_input: Option<I>) -> io::Result<Self> {
+        Self::new_full(input, "/".into(), false, payload_input).await
     }
 
     pub(crate) fn input(&self) -> &I {
@@ -207,6 +212,7 @@ impl<I: SeqRead> DecoderImpl<I> {
         input: I,
         path: PathBuf,
         eof_after_entry: bool,
+        payload_input: Option<I>,
     ) -> io::Result<Self> {
         let this = DecoderImpl {
             input,
@@ -219,6 +225,8 @@ impl<I: SeqRead> DecoderImpl<I> {
             path_lengths: Vec::new(),
             state: State::Begin,
             with_goodbye_tables: false,
+            payload_input,
+            payload_consumed: 0,
             eof_after_entry,
         };
 
@@ -242,9 +250,14 @@ impl<I: SeqRead> DecoderImpl<I> {
                     // hierarchy and parse the next PXAR_FILENAME or the PXAR_GOODBYE:
                     self.read_next_item().await?;
                 }
-                State::InPayload { offset } => {
-                    // We need to skip the current payload first.
-                    self.skip_entry(offset).await?;
+                State::InPayload { offset, .. } => {
+                    if self.payload_input.is_some() {
+                        // Update consumed payload as given by the offset referenced by the content reader
+                        self.payload_consumed += offset;
+                    } else {
+                        // Skip remaining payload of current entry in regular stream
+                        self.skip_entry(offset).await?;
+                    }
                     self.read_next_item().await?;
                 }
                 State::InGoodbyeTable => {
@@ -300,19 +313,23 @@ impl<I: SeqRead> DecoderImpl<I> {
     }
 
     pub fn content_size(&self) -> Option<u64> {
-        if let State::InPayload { .. } = self.state {
-            Some(self.current_header.content_size())
+        if let State::InPayload { size, .. } = self.state {
+            if self.payload_input.is_some() {
+                Some(size)
+            } else {
+                Some(self.current_header.content_size())
+            }
         } else {
             None
         }
     }
 
     pub fn content_reader(&mut self) -> Option<Contents<I>> {
-        if let State::InPayload { offset } = &mut self.state {
+        if let State::InPayload { offset, size } = &mut self.state {
             Some(Contents::new(
-                &mut self.input,
+                self.payload_input.as_mut().unwrap_or(&mut self.input),
                 offset,
-                self.current_header.content_size(),
+                *size,
             ))
         } else {
             None
@@ -531,8 +548,63 @@ impl<I: SeqRead> DecoderImpl<I> {
                 self.entry.kind = EntryKind::File {
                     size: self.current_header.content_size(),
                     offset,
+                    payload_offset: None,
+                };
+                self.state = State::InPayload {
+                    offset: 0,
+                    size: self.current_header.content_size(),
+                };
+                return Ok(ItemResult::Entry);
+            }
+            format::PXAR_PAYLOAD_REF => {
+                let offset = seq_read_position(&mut self.input).await.transpose()?;
+                let payload_ref = self.read_payload_ref().await?;
+
+                if let Some(payload_input) = self.payload_input.as_mut() {
+                    if seq_read_position(payload_input)
+                        .await
+                        .transpose()?
+                        .is_none()
+                    {
+                        if self.payload_consumed > payload_ref.offset {
+                            io_bail!(
+                                "unexpected offset {}, smaller than already consumed payload {}",
+                                payload_ref.offset,
+                                self.payload_consumed,
+                            );
+                        }
+                        let to_skip = payload_ref.offset - self.payload_consumed;
+                        Self::skip(payload_input, to_skip as usize).await?;
+                        self.payload_consumed += to_skip;
+                    }
+
+                    let header: Header = seq_read_entry(payload_input).await?;
+                    if header.htype != format::PXAR_PAYLOAD {
+                        io_bail!(
+                            "unexpected header in payload input: expected {} , got {header}",
+                            format::PXAR_PAYLOAD,
+                        );
+                    }
+                    self.payload_consumed += size_of::<Header>() as u64;
+
+                    if header.content_size() != payload_ref.size {
+                        io_bail!(
+                            "encountered payload size mismatch: got {}, expected {}",
+                            payload_ref.size,
+                            header.content_size(),
+                        );
+                    }
+                }
+
+                self.entry.kind = EntryKind::File {
+                    size: payload_ref.size,
+                    offset,
+                    payload_offset: Some(payload_ref.offset),
+                };
+                self.state = State::InPayload {
+                    offset: 0,
+                    size: payload_ref.size,
                 };
-                self.state = State::InPayload { offset: 0 };
                 return Ok(ItemResult::Entry);
             }
             format::PXAR_FILENAME | format::PXAR_GOODBYE => {
diff --git a/src/decoder/sync.rs b/src/decoder/sync.rs
index 5597a03..caa8bcd 100644
--- a/src/decoder/sync.rs
+++ b/src/decoder/sync.rs
@@ -25,8 +25,11 @@ pub struct Decoder<T> {
 impl<T: io::Read> Decoder<StandardReader<T>> {
     /// Decode a `pxar` archive from a regular `std::io::Read` input.
     #[inline]
-    pub fn from_std(input: T) -> io::Result<Self> {
-        Decoder::new(StandardReader::new(input))
+    pub fn from_std(input: T, payload_input: Option<T>) -> io::Result<Self> {
+        Decoder::new(
+            StandardReader::new(input),
+            payload_input.map(|payload_input| StandardReader::new(payload_input)),
+        )
     }
 
     /// Get a direct reference to the reader contained inside the contained [`StandardReader`].
@@ -38,7 +41,7 @@ impl<T: io::Read> Decoder<StandardReader<T>> {
 impl Decoder<StandardReader<std::fs::File>> {
     /// Convenience shortcut for `File::open` followed by `Accessor::from_file`.
     pub fn open<P: AsRef<Path>>(path: P) -> io::Result<Self> {
-        Self::from_std(std::fs::File::open(path.as_ref())?)
+        Self::from_std(std::fs::File::open(path.as_ref())?, None)
     }
 }
 
@@ -47,9 +50,11 @@ impl<T: SeqRead> Decoder<T> {
     ///
     /// Note that the `input`'s `SeqRead` implementation must always return `Poll::Ready` and is
     /// not allowed to use the `Waker`, as this will cause a `panic!`.
-    pub fn new(input: T) -> io::Result<Self> {
+    /// The optional payload input must be used to restore regular file payloads for payload references
+    /// encountered within the archive.
+    pub fn new(input: T, payload_input: Option<T>) -> io::Result<Self> {
         Ok(Self {
-            inner: poll_result_once(decoder::DecoderImpl::new(input))?,
+            inner: poll_result_once(decoder::DecoderImpl::new(input, payload_input))?,
         })
     }
 
diff --git a/src/lib.rs b/src/lib.rs
index 210c4b1..ef81a85 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -364,6 +364,9 @@ pub enum EntryKind {
 
         /// The file's byte offset inside the archive, if available.
         offset: Option<u64>,
+
+        /// The file's byte offset inside the payload stream, if available.
+        payload_offset: Option<u64>,
     },
 
     /// Directory entry. When iterating through an archive, the contents follow next.
diff --git a/tests/compat.rs b/tests/compat.rs
index 3b43e38..a1514ba 100644
--- a/tests/compat.rs
+++ b/tests/compat.rs
@@ -94,7 +94,8 @@ fn create_archive() -> io::Result<Vec<u8>> {
 fn test_archive() {
     let archive = create_archive().expect("failed to create test archive");
     let mut input = &archive[..];
-    let mut decoder = decoder::Decoder::from_std(&mut input).expect("failed to create decoder");
+    let mut decoder =
+        decoder::Decoder::from_std(&mut input, None).expect("failed to create decoder");
 
     let item = decoder
         .next()
diff --git a/tests/simple/main.rs b/tests/simple/main.rs
index e55457f..6ee93d1 100644
--- a/tests/simple/main.rs
+++ b/tests/simple/main.rs
@@ -61,13 +61,14 @@ fn test1() {
     // std::fs::write("myarchive.pxar", &file).expect("failed to write out test archive");
 
     let mut input = &file[..];
-    let mut decoder = decoder::Decoder::from_std(&mut input).expect("failed to create decoder");
+    let mut decoder =
+        decoder::Decoder::from_std(&mut input, None).expect("failed to create decoder");
     let decoded_fs =
         fs::Entry::decode_from(&mut decoder).expect("failed to decode previously encoded archive");
 
     assert_eq!(test_fs, decoded_fs);
 
-    let accessor = accessor::Accessor::new(&file[..], file.len() as u64)
+    let accessor = accessor::Accessor::new(&file[..], file.len() as u64, None)
         .expect("failed to create random access reader for encoded archive");
 
     check_bunzip2(&accessor);
-- 
2.39.2



_______________________________________________
pbs-devel mailing list
pbs-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel


  parent reply	other threads:[~2024-05-07 15:53 UTC|newest]

Thread overview: 64+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-05-07 15:51 [pbs-devel] [PATCH v5 pxar proxmox-backup 00/62] fix #3174: improve file-level backup Christian Ebner
2024-05-07 15:51 ` [pbs-devel] [PATCH v5 pxar 01/62] format/examples: add header type `PXAR_PAYLOAD_REF` Christian Ebner
2024-05-07 15:51 ` [pbs-devel] [PATCH v5 pxar 02/62] decoder: add method to read payload references Christian Ebner
2024-05-07 15:51 ` [pbs-devel] [PATCH v5 pxar 03/62] decoder: factor out skip part from skip_entry Christian Ebner
2024-05-07 15:51 ` [pbs-devel] [PATCH v5 pxar 04/62] encoder: add optional output writer for file payloads Christian Ebner
2024-05-07 15:51 ` [pbs-devel] [PATCH v5 pxar 05/62] encoder: move to stack based state tracking Christian Ebner
2024-05-07 15:51 ` Christian Ebner [this message]
2024-05-07 15:51 ` [pbs-devel] [PATCH v5 pxar 07/62] decoder: set payload input range when decoding via accessor Christian Ebner
2024-05-07 15:51 ` [pbs-devel] [PATCH v5 pxar 08/62] encoder: add payload reference capability Christian Ebner
2024-05-07 15:51 ` [pbs-devel] [PATCH v5 pxar 09/62] encoder: add payload position capability Christian Ebner
2024-05-07 15:51 ` [pbs-devel] [PATCH v5 pxar 10/62] encoder: add payload advance capability Christian Ebner
2024-05-07 15:51 ` [pbs-devel] [PATCH v5 pxar 11/62] encoder/format: finish payload stream with marker Christian Ebner
2024-05-07 15:51 ` [pbs-devel] [PATCH v5 pxar 12/62] format: add payload stream start marker Christian Ebner
2024-05-07 15:51 ` [pbs-devel] [PATCH v5 pxar 13/62] format/encoder/decoder: new pxar entry type `Version` Christian Ebner
2024-05-07 15:51 ` [pbs-devel] [PATCH v5 pxar 14/62] format/encoder/decoder: new pxar entry type `Prelude` Christian Ebner
2024-05-07 15:51 ` [pbs-devel] [PATCH v5 proxmox-backup 15/62] client: pxar: switch to stack based encoder state Christian Ebner
2024-05-07 15:51 ` [pbs-devel] [PATCH v5 proxmox-backup 16/62] client: backup: factor out extension from backup target Christian Ebner
2024-05-07 15:51 ` [pbs-devel] [PATCH v5 proxmox-backup 17/62] client: pxar: combine writers into struct Christian Ebner
2024-05-07 15:52 ` [pbs-devel] [PATCH v5 proxmox-backup 18/62] client: pxar: add optional pxar payload writer instance Christian Ebner
2024-05-07 15:52 ` [pbs-devel] [PATCH v5 proxmox-backup 19/62] client: pxar: optionally split metadata and payload streams Christian Ebner
2024-05-07 15:52 ` [pbs-devel] [PATCH v5 proxmox-backup 20/62] client: helper: add helpers for creating reader instances Christian Ebner
2024-05-07 15:52 ` [pbs-devel] [PATCH v5 proxmox-backup 21/62] client: helper: add method for split archive name mapping Christian Ebner
2024-05-07 15:52 ` [pbs-devel] [PATCH v5 proxmox-backup 22/62] client: restore: read payload from dedicated index Christian Ebner
2024-05-07 15:52 ` [pbs-devel] [PATCH v5 proxmox-backup 23/62] tools: cover extension for split pxar archives Christian Ebner
2024-05-07 15:52 ` [pbs-devel] [PATCH v5 proxmox-backup 24/62] restore: " Christian Ebner
2024-05-07 15:52 ` [pbs-devel] [PATCH v5 proxmox-backup 25/62] client: mount: make split pxar archives mountable Christian Ebner
2024-05-07 15:52 ` [pbs-devel] [PATCH v5 proxmox-backup 26/62] api: datastore: refactor getting local chunk reader Christian Ebner
2024-05-07 15:52 ` [pbs-devel] [PATCH v5 proxmox-backup 27/62] api: datastore: attach optional payload " Christian Ebner
2024-05-07 15:52 ` [pbs-devel] [PATCH v5 proxmox-backup 28/62] catalog: shell: make split pxar archives accessible Christian Ebner
2024-05-07 15:52 ` [pbs-devel] [PATCH v5 proxmox-backup 29/62] www: cover metadata extension for pxar archives Christian Ebner
2024-05-07 15:52 ` [pbs-devel] [PATCH v5 proxmox-backup 30/62] file restore: factor out getting pxar reader Christian Ebner
2024-05-07 15:52 ` [pbs-devel] [PATCH v5 proxmox-backup 31/62] file restore: cover split metadata and payload archives Christian Ebner
2024-05-07 15:52 ` [pbs-devel] [PATCH v5 proxmox-backup 32/62] file restore: show more error context when extraction fails Christian Ebner
2024-05-07 15:52 ` [pbs-devel] [PATCH v5 proxmox-backup 33/62] pxar: add optional payload input for achive restore Christian Ebner
2024-05-07 15:52 ` [pbs-devel] [PATCH v5 proxmox-backup 34/62] pxar: add more context to extraction error Christian Ebner
2024-05-07 15:52 ` [pbs-devel] [PATCH v5 proxmox-backup 35/62] client: pxar: include payload offset in entry listing Christian Ebner
2024-05-07 15:52 ` [pbs-devel] [PATCH v5 proxmox-backup 36/62] pxar: show padding in debug output on archive list Christian Ebner
2024-05-07 15:52 ` [pbs-devel] [PATCH v5 proxmox-backup 37/62] datastore: dynamic index: add method to get digest Christian Ebner
2024-05-07 15:52 ` [pbs-devel] [PATCH v5 proxmox-backup 38/62] client: pxar: helper for lookup of reusable dynamic entries Christian Ebner
2024-05-07 15:52 ` [pbs-devel] [PATCH v5 proxmox-backup 39/62] upload stream: implement reused chunk injector Christian Ebner
2024-05-07 15:52 ` [pbs-devel] [PATCH v5 proxmox-backup 40/62] client: chunk stream: add struct to hold injection state Christian Ebner
2024-05-07 15:52 ` [pbs-devel] [PATCH v5 proxmox-backup 41/62] client: streams: add channels for dynamic entry injection Christian Ebner
2024-05-07 15:52 ` [pbs-devel] [PATCH v5 proxmox-backup 42/62] specs: add backup detection mode specification Christian Ebner
2024-05-07 15:52 ` [pbs-devel] [PATCH v5 proxmox-backup 43/62] client: implement prepare reference method Christian Ebner
2024-05-07 15:52 ` [pbs-devel] [PATCH v5 proxmox-backup 44/62] client: pxar: add method for metadata comparison Christian Ebner
2024-05-07 15:52 ` [pbs-devel] [PATCH v5 proxmox-backup 45/62] pxar: caching: add look-ahead cache types Christian Ebner
2024-05-07 15:52 ` [pbs-devel] [PATCH v5 proxmox-backup 46/62] fix #3174: client: pxar: enable caching and meta comparison Christian Ebner
2024-05-07 15:52 ` [pbs-devel] [PATCH v5 proxmox-backup 47/62] client: backup writer: add injected chunk count to stats Christian Ebner
2024-05-07 15:52 ` [pbs-devel] [PATCH v5 proxmox-backup 48/62] pxar: create: keep track of reused chunks and files Christian Ebner
2024-05-07 15:52 ` [pbs-devel] [PATCH v5 proxmox-backup 49/62] pxar: create: show chunk injection stats debug output Christian Ebner
2024-05-07 15:52 ` [pbs-devel] [PATCH v5 proxmox-backup 50/62] client: pxar: add helper to handle optional preludes Christian Ebner
2024-05-07 15:52 ` [pbs-devel] [PATCH v5 proxmox-backup 51/62] client: pxar: opt encode cli exclude patterns as Prelude Christian Ebner
2024-05-07 15:52 ` [pbs-devel] [PATCH v5 proxmox-backup 52/62] docs: file formats: describe split pxar archive file layout Christian Ebner
2024-05-07 15:52 ` [pbs-devel] [PATCH v5 proxmox-backup 53/62] docs: add section describing change detection mode Christian Ebner
2024-05-07 15:52 ` [pbs-devel] [PATCH v5 proxmox-backup 54/62] test-suite: add detection mode change benchmark Christian Ebner
2024-05-07 15:52 ` [pbs-devel] [PATCH v5 proxmox-backup 55/62] test-suite: add bin to deb, add shell completions Christian Ebner
2024-05-07 15:52 ` [pbs-devel] [PATCH v5 proxmox-backup 56/62] datastore: chunker: add Chunker trait Christian Ebner
2024-05-07 15:52 ` [pbs-devel] [PATCH v5 proxmox-backup 57/62] datastore: chunker: implement chunker for payload stream Christian Ebner
2024-05-07 15:52 ` [pbs-devel] [PATCH v5 proxmox-backup 58/62] client: chunk stream: switch payload stream chunker Christian Ebner
2024-05-07 15:52 ` [pbs-devel] [PATCH v5 proxmox-backup 59/62] client: pxar: allow to restore prelude to optional path Christian Ebner
2024-05-07 15:52 ` [pbs-devel] [PATCH v5 proxmox-backup 60/62] client: pxar: add archive creation with reference test Christian Ebner
2024-05-07 15:52 ` [pbs-devel] [PATCH v5 proxmox-backup 61/62] client: tools: add helper to raise nofile rlimit Christian Ebner
2024-05-07 15:52 ` [pbs-devel] [PATCH v5 proxmox-backup 62/62] client: pxar: set cache limit based on " Christian Ebner
2024-05-14 10:52 ` [pbs-devel] [PATCH v5 pxar proxmox-backup 00/62] fix #3174: improve file-level backup Christian Ebner

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240507155244.793819-7-c.ebner@proxmox.com \
    --to=c.ebner@proxmox.com \
    --cc=pbs-devel@lists.proxmox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.
Service provided by Proxmox Server Solutions GmbH | Privacy | Legal