From: Thomas Lamprecht <t.lamprecht@proxmox.com>
To: Proxmox Backup Server development discussion
<pbs-devel@lists.proxmox.com>,
Dominik Csapak <d.csapak@proxmox.com>
Subject: Re: [pbs-devel] [PATCH proxmox-backup v2 5/5] server/rest: compress static files
Date: Fri, 2 Apr 2021 14:32:41 +0200 [thread overview]
Message-ID: <59ee51d7-4d1d-18be-ec41-ae299253b8e8@proxmox.com> (raw)
In-Reply-To: <20210401141123.12964-6-d.csapak@proxmox.com>
On 01.04.21 16:11, Dominik Csapak wrote:
> compress them on the fly
>
> Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
> ---
> src/server/rest.rs | 93 ++++++++++++++++++++++++++++++++--------------
> 1 file changed, 66 insertions(+), 27 deletions(-)
>
> diff --git a/src/server/rest.rs b/src/server/rest.rs
> index 357d1b81..6b1bb0cb 100644
> --- a/src/server/rest.rs
> +++ b/src/server/rest.rs
> @@ -40,6 +40,7 @@ use crate::auth_helpers::*;
> use crate::config::cached_user_info::CachedUserInfo;
> use crate::tools;
> use crate::tools::compression::{CompressionMethod, DeflateEncoder, Level};
> +use crate::tools::AsyncReaderStream;
> use crate::tools::FileLogger;
>
> extern "C" {
> @@ -432,16 +433,18 @@ pub async fn handle_api_request<Env: RpcEnvironment, S: 'static + BuildHasher +
>
> let resp = match compression {
> Some(CompressionMethod::Deflate) => {
> - resp.headers_mut()
> - .insert(header::CONTENT_ENCODING, CompressionMethod::Deflate.content_encoding());
> - resp.map(|body|
> + resp.headers_mut().insert(
> + header::CONTENT_ENCODING,
> + CompressionMethod::Deflate.content_encoding(),
> + );
> + resp.map(|body| {
> Body::wrap_stream(DeflateEncoder::with_quality(
> body.map_err(|err| {
> proxmox::io_format_err!("error during compression: {}", err)
> }),
> Level::Fastest,
> - )),
> - )
> + ))
> + })
> }
> Some(_other) => {
> // fixme: implement other compression algorithms
> @@ -546,9 +549,11 @@ fn extension_to_content_type(filename: &Path) -> (&'static str, bool) {
> ("application/octet-stream", false)
> }
>
> -async fn simple_static_file_download(filename: PathBuf) -> Result<Response<Body>, Error> {
> - let (content_type, _nocomp) = extension_to_content_type(&filename);
> -
> +async fn simple_static_file_download(
> + filename: PathBuf,
> + content_type: &'static str,
> + compression: Option<CompressionMethod>,
> +) -> Result<Response<Body>, Error> {
> use tokio::io::AsyncReadExt;
>
> let mut file = File::open(filename)
> @@ -556,46 +561,79 @@ async fn simple_static_file_download(filename: PathBuf) -> Result<Response<Body>
> .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
>
> let mut data: Vec<u8> = Vec::new();
> - file.read_to_end(&mut data)
> - .await
> - .map_err(|err| http_err!(BAD_REQUEST, "File read failed: {}", err))?;
>
> - let mut response = Response::new(data.into());
> + let mut response = match compression {
> + Some(CompressionMethod::Deflate) => {
> + let mut enc = DeflateEncoder::with_quality(data, Level::Fastest);
> + enc.compress_vec(&mut file, 32 * 1024).await?;
as talked off-list, that value should really be in a `const CHUNK_RESPONSE_LIMIT` or the like.
> + let mut response = Response::new(enc.into_inner().into());
> + response.headers_mut().insert(
> + header::CONTENT_ENCODING,
> + CompressionMethod::Deflate.content_encoding(),
> + );
> + response
> + }
> + Some(_) | None => {
> + file.read_to_end(&mut data)
> + .await
> + .map_err(|err| http_err!(BAD_REQUEST, "File read failed: {}", err))?;
> + Response::new(data.into())
> + }
> + };
> +
> response.headers_mut().insert(
> header::CONTENT_TYPE,
> header::HeaderValue::from_static(content_type),
> );
> +
> Ok(response)
> }
>
> -async fn chuncked_static_file_download(filename: PathBuf) -> Result<Response<Body>, Error> {
> - let (content_type, _nocomp) = extension_to_content_type(&filename);
> +async fn chuncked_static_file_download(
> + filename: PathBuf,
> + content_type: &'static str,
> + compression: Option<CompressionMethod>,
> +) -> Result<Response<Body>, Error> {
> + let mut resp = Response::builder()
> + .status(StatusCode::OK)
> + .header(header::CONTENT_TYPE, content_type);
>
> let file = File::open(filename)
> .await
> .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
>
> - let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
> - .map_ok(|bytes| bytes.freeze());
> - let body = Body::wrap_stream(payload);
> + let body = match compression {
> + Some(CompressionMethod::Deflate) => {
> + resp = resp.header(
> + header::CONTENT_ENCODING,
> + CompressionMethod::Deflate.content_encoding(),
> + );
> + Body::wrap_stream(DeflateEncoder::with_quality(
> + AsyncReaderStream::new(file),
> + Level::Fastest,
> + ))
> + }
> + Some(_) | None => Body::wrap_stream(AsyncReaderStream::new(file)),
> + };
>
> - // FIXME: set other headers ?
> - Ok(Response::builder()
> - .status(StatusCode::OK)
> - .header(header::CONTENT_TYPE, content_type)
> - .body(body)
> - .unwrap())
> + Ok(resp.body(body).unwrap())
> }
>
> -async fn handle_static_file_download(filename: PathBuf) -> Result<Response<Body>, Error> {
> +async fn handle_static_file_download(
> + filename: PathBuf,
> + compression: Option<CompressionMethod>,
> +) -> Result<Response<Body>, Error> {
> let metadata = tokio::fs::metadata(filename.clone())
> .map_err(|err| http_err!(BAD_REQUEST, "File access problems: {}", err))
> .await?;
>
> + let (content_type, nocomp) = extension_to_content_type(&filename);
> + let compression = if nocomp { None } else { compression };
> +
> if metadata.len() < 1024 * 32 {
the const from above could then be used here too
> - simple_static_file_download(filename).await
> + simple_static_file_download(filename, content_type, compression).await
> } else {
> - chuncked_static_file_download(filename).await
> + chuncked_static_file_download(filename, content_type, compression).await
> }
> }
>
> @@ -773,7 +811,8 @@ async fn handle_request(
> }
> } else {
> let filename = api.find_alias(&components);
> - return handle_static_file_download(filename).await;
> + let compression = extract_compression_method(&parts.headers);
> + return handle_static_file_download(filename, compression).await;
> }
> }
>
>
prev parent reply other threads:[~2021-04-02 12:32 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-04-01 14:11 [pbs-devel] [PATCH proxmox-backup v2 0/5] add compression to api/static files Dominik Csapak
2021-04-01 14:11 ` [pbs-devel] [PATCH proxmox-backup v2 1/5] tools: add compression module Dominik Csapak
2021-04-02 12:07 ` Thomas Lamprecht
2021-04-01 14:11 ` [pbs-devel] [PATCH proxmox-backup v2 2/5] tools/compression: add DeflateEncoder and helpers Dominik Csapak
2021-04-02 12:14 ` Thomas Lamprecht
2021-04-01 14:11 ` [pbs-devel] [PATCH proxmox-backup v2 3/5] server/rest: add helper to extract compression headers Dominik Csapak
2021-04-02 12:20 ` Thomas Lamprecht
2021-04-01 14:11 ` [pbs-devel] [PATCH proxmox-backup v2 4/5] server/rest: compress api calls Dominik Csapak
2021-04-01 14:11 ` [pbs-devel] [PATCH proxmox-backup v2 5/5] server/rest: compress static files Dominik Csapak
2021-04-02 12:32 ` Thomas Lamprecht [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=59ee51d7-4d1d-18be-ec41-ae299253b8e8@proxmox.com \
--to=t.lamprecht@proxmox.com \
--cc=d.csapak@proxmox.com \
--cc=pbs-devel@lists.proxmox.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox