all lists on lists.proxmox.com
 help / color / mirror / Atom feed
From: Stefan Reiter <s.reiter@proxmox.com>
To: pbs-devel@lists.proxmox.com
Subject: [pbs-devel] [PATCH v3 proxmox-backup 14/20] file-restore: add qemu-helper setuid binary
Date: Wed, 31 Mar 2021 12:21:56 +0200	[thread overview]
Message-ID: <20210331102202.14767-15-s.reiter@proxmox.com> (raw)
In-Reply-To: <20210331102202.14767-1-s.reiter@proxmox.com>

Starting a VM requires root (for /dev/kvm and /dev/vhost-vsock), but we
want a regular user to use this functionality. Implement a setuid binary
that allows one to very specifically only start a restore VM, and
nothing else.

Keeps the log files of the last 16 VM starts (log output generated by
the daemon binary via QEMU's serial-to-logfile interface). Also put them
into a seperate /var/log/proxmox-backup/file-restore directory.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
---

v2:
* split this off from proxmox-file-restore binary

 Makefile                               |   4 +-
 debian/proxmox-file-restore.install    |   1 +
 debian/rules                           |   2 +-
 src/bin/proxmox-restore-qemu-helper.rs | 372 +++++++++++++++++++++++++
 src/buildcfg.rs                        |  21 ++
 5 files changed, 398 insertions(+), 2 deletions(-)
 create mode 100644 src/bin/proxmox-restore-qemu-helper.rs

diff --git a/Makefile b/Makefile
index 269bb80c..fbbf88a2 100644
--- a/Makefile
+++ b/Makefile
@@ -155,8 +155,10 @@ install: $(COMPILED_BINS)
 	install -dm755 $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/file-restore
 	$(foreach i,$(RESTORE_BIN), \
 	    install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/file-restore/ ;)
-	# install sg-tape-cmd as setuid binary
+	# install sg-tape-cmd and proxmox-restore-qemu-helper as setuid binary
 	install -m4755 -o root -g root $(COMPILEDIR)/sg-tape-cmd $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/sg-tape-cmd
+	install -m4755 -o root -g root $(COMPILEDIR)/proxmox-restore-qemu-helper \
+	    $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/file-restore/proxmox-restore-qemu-helper
 	$(foreach i,$(SERVICE_BIN), \
 	    install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/ ;)
 	$(MAKE) -C www install
diff --git a/debian/proxmox-file-restore.install b/debian/proxmox-file-restore.install
index d952836e..0f0e9d56 100644
--- a/debian/proxmox-file-restore.install
+++ b/debian/proxmox-file-restore.install
@@ -2,3 +2,4 @@ usr/bin/proxmox-file-restore
 usr/share/man/man1/proxmox-file-restore.1
 usr/share/zsh/vendor-completions/_proxmox-file-restore
 usr/lib/x86_64-linux-gnu/proxmox-backup/file-restore/proxmox-restore-daemon
+usr/lib/x86_64-linux-gnu/proxmox-backup/file-restore/proxmox-restore-qemu-helper
diff --git a/debian/rules b/debian/rules
index ce2db72e..ac9de7fe 100755
--- a/debian/rules
+++ b/debian/rules
@@ -43,7 +43,7 @@ override_dh_installsystemd:
 	dh_installsystemd --no-start --no-restart-after-upgrade
 
 override_dh_fixperms:
-	dh_fixperms --exclude sg-tape-cmd
+	dh_fixperms --exclude sg-tape-cmd --exclude proxmox-restore-qemu-helper
 
 # workaround https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=933541
 # TODO: remove once available (Debian 11 ?)
diff --git a/src/bin/proxmox-restore-qemu-helper.rs b/src/bin/proxmox-restore-qemu-helper.rs
new file mode 100644
index 00000000..f56a6607
--- /dev/null
+++ b/src/bin/proxmox-restore-qemu-helper.rs
@@ -0,0 +1,372 @@
+//! Starts a QEMU VM for single file restore.
+//! Needs to be setuid, or otherwise able to access /dev/kvm and /dev/vhost-vsock.
+use std::fs::{File, OpenOptions};
+use std::io::prelude::*;
+use std::os::unix::io::{AsRawFd, FromRawFd};
+use std::path::PathBuf;
+use std::time::Duration;
+
+use anyhow::{bail, format_err, Error};
+use serde_json::{json, Value};
+use tokio::time;
+
+use nix::sys::signal::{kill, Signal};
+use nix::unistd::Pid;
+
+use proxmox::{
+    api::{api, cli::*, RpcEnvironment},
+    tools::{
+        fd::Fd,
+        fs::{create_path, file_read_string, make_tmp_file, CreateOptions},
+    },
+};
+
+use proxmox_backup::backup::backup_user;
+use proxmox_backup::client::{VsockClient, DEFAULT_VSOCK_PORT};
+use proxmox_backup::{buildcfg, tools};
+
+pub mod proxmox_client_tools;
+use proxmox_client_tools::REPO_URL_SCHEMA;
+
+const PBS_VM_NAME: &str = "pbs-restore-vm";
+const MAX_CID_TRIES: u64 = 32;
+
+fn create_restore_log_dir() -> Result<String, Error> {
+    let logpath = format!("{}/file-restore", buildcfg::PROXMOX_BACKUP_LOG_DIR);
+
+    proxmox::try_block!({
+        let backup_user = backup_user()?;
+        let opts = CreateOptions::new()
+            .owner(backup_user.uid)
+            .group(backup_user.gid);
+
+        let opts_root = CreateOptions::new()
+            .owner(nix::unistd::ROOT)
+            .group(nix::unistd::Gid::from_raw(0));
+
+        create_path(buildcfg::PROXMOX_BACKUP_LOG_DIR, None, Some(opts))?;
+        create_path(&logpath, None, Some(opts_root))?;
+        Ok(())
+    })
+    .map_err(|err: Error| format_err!("unable to create file-restore log dir - {}", err))?;
+
+    Ok(logpath)
+}
+
+fn validate_img_existance() -> Result<(), Error> {
+    let kernel = PathBuf::from(buildcfg::PROXMOX_BACKUP_KERNEL_FN);
+    let initramfs = PathBuf::from(buildcfg::PROXMOX_BACKUP_INITRAMFS_FN);
+    if !kernel.exists() || !initramfs.exists() {
+        bail!("cannot run file-restore VM: package 'proxmox-file-restore' is not (correctly) installed");
+    }
+    Ok(())
+}
+
+fn try_kill_vm(pid: i32) -> Result<(), Error> {
+    let pid = Pid::from_raw(pid);
+    if let Ok(()) = kill(pid, None) {
+        // process is running (and we could kill it), check if it is actually ours
+        // (if it errors assume we raced with the process's death and ignore it)
+        if let Ok(cmdline) = file_read_string(format!("/proc/{}/cmdline", pid)) {
+            if cmdline.split('\0').any(|a| a == PBS_VM_NAME) {
+                // yes, it's ours, kill it brutally with SIGKILL, no reason to take
+                // any chances - in this state it's most likely broken anyway
+                if let Err(err) = kill(pid, Signal::SIGKILL) {
+                    bail!(
+                        "reaping broken VM (pid {}) with SIGKILL failed: {}",
+                        pid,
+                        err
+                    );
+                }
+            }
+        }
+    }
+
+    Ok(())
+}
+
+async fn create_temp_initramfs(ticket: &str) -> Result<(Fd, String), Error> {
+    use std::ffi::CString;
+    use tokio::fs::File;
+
+    let (tmp_fd, tmp_path) =
+        make_tmp_file("/tmp/file-restore-qemu.initramfs.tmp", CreateOptions::new())?;
+    nix::unistd::unlink(&tmp_path)?;
+    tools::fd_change_cloexec(tmp_fd.0, false)?;
+
+    let mut f = File::from_std(unsafe { std::fs::File::from_raw_fd(tmp_fd.0) });
+    let mut base = File::open(buildcfg::PROXMOX_BACKUP_INITRAMFS_FN).await?;
+
+    tokio::io::copy(&mut base, &mut f).await?;
+
+    let name = CString::new("ticket").unwrap();
+    tools::cpio::append_file(
+        &mut f,
+        ticket.as_bytes(),
+        &name,
+        0,
+        (libc::S_IFREG | 0o400) as u16,
+        0,
+        0,
+        0,
+        ticket.len() as u32,
+    )
+    .await?;
+    tools::cpio::append_trailer(&mut f).await?;
+
+    // forget the tokio file, we close the file descriptor via the returned Fd
+    std::mem::forget(f);
+
+    let path = format!("/dev/fd/{}", &tmp_fd.0);
+    Ok((tmp_fd, path))
+}
+
+async fn start_vm(
+    // u16 so we can do wrapping_add without going too high
+    mut cid: u16,
+    repo: &str,
+    snapshot: &str,
+    files: impl Iterator<Item = &str>,
+    ticket: &str,
+) -> Result<(i32, i32), Error> {
+    validate_img_existance()?;
+
+    if let Err(_) = std::env::var("PBS_PASSWORD") {
+        bail!("environment variable PBS_PASSWORD has to be set for QEMU VM restore");
+    }
+    if let Err(_) = std::env::var("PBS_FINGERPRINT") {
+        bail!("environment variable PBS_FINGERPRINT has to be set for QEMU VM restore");
+    }
+
+    let pid;
+    let (pid_fd, pid_path) = make_tmp_file("/tmp/file-restore-qemu.pid.tmp", CreateOptions::new())?;
+    nix::unistd::unlink(&pid_path)?;
+    tools::fd_change_cloexec(pid_fd.0, false)?;
+
+    let (_ramfs_pid, ramfs_path) = create_temp_initramfs(ticket).await?;
+
+    let logpath = create_restore_log_dir()?;
+    let logfile = &format!("{}/qemu.log", logpath);
+    let mut logrotate = tools::logrotate::LogRotate::new(logfile, false)
+        .ok_or_else(|| format_err!("could not get QEMU log file names"))?;
+
+    if let Err(err) = logrotate.do_rotate(CreateOptions::default(), Some(16)) {
+        eprintln!("warning: logrotate for QEMU log file failed - {}", err);
+    }
+
+    let mut logfd = OpenOptions::new()
+        .append(true)
+        .create_new(true)
+        .open(logfile)?;
+    tools::fd_change_cloexec(logfd.as_raw_fd(), false)?;
+
+    // preface log file with start timestamp so one can see how long QEMU took to start
+    writeln!(logfd, "[{}] PBS file restore VM log", {
+        let now = proxmox::tools::time::epoch_i64();
+        proxmox::tools::time::epoch_to_rfc3339(now)?
+    },)?;
+
+    let base_args = [
+        "-chardev",
+        &format!(
+            "file,id=log,path=/dev/null,logfile=/dev/fd/{},logappend=on",
+            logfd.as_raw_fd()
+        ),
+        "-serial",
+        "chardev:log",
+        "-vnc",
+        "none",
+        "-enable-kvm",
+        "-m",
+        "512",
+        "-kernel",
+        buildcfg::PROXMOX_BACKUP_KERNEL_FN,
+        "-initrd",
+        &ramfs_path,
+        "-append",
+        "quiet",
+        "-daemonize",
+        "-pidfile",
+        &format!("/dev/fd/{}", pid_fd.as_raw_fd()),
+        "-name",
+        PBS_VM_NAME,
+    ];
+
+    // Generate drive arguments for all fidx files in backup snapshot
+    let mut drives = Vec::new();
+    let mut id = 0;
+    for file in files {
+        if !file.ends_with(".img.fidx") {
+            continue;
+        }
+        drives.push("-drive".to_owned());
+        drives.push(format!(
+            "file=pbs:repository={},,snapshot={},,archive={},read-only=on,if=none,id=drive{}",
+            repo, snapshot, file, id
+        ));
+        drives.push("-device".to_owned());
+        // drive serial is used by VM to map .fidx files to /dev paths
+        drives.push(format!("virtio-blk-pci,drive=drive{},serial={}", id, file));
+        id += 1;
+    }
+
+    // Try starting QEMU in a loop to retry if we fail because of a bad 'cid' value
+    let mut attempts = 0;
+    loop {
+        let mut qemu_cmd = std::process::Command::new("qemu-system-x86_64");
+        qemu_cmd.args(base_args.iter());
+        qemu_cmd.args(&drives);
+        qemu_cmd.arg("-device");
+        qemu_cmd.arg(format!(
+            "vhost-vsock-pci,guest-cid={},disable-legacy=on",
+            cid
+        ));
+
+        qemu_cmd.stdout(std::process::Stdio::null());
+        qemu_cmd.stderr(std::process::Stdio::piped());
+
+        let res = tokio::task::block_in_place(|| qemu_cmd.spawn()?.wait_with_output())?;
+
+        if res.status.success() {
+            // at this point QEMU is already daemonized and running, so if anything fails we
+            // technically leave behind a zombie-VM... this shouldn't matter, as it will stop
+            // itself soon enough (timer), and the following operations are unlikely to fail
+            let mut pid_file = unsafe { File::from_raw_fd(pid_fd.as_raw_fd()) };
+            std::mem::forget(pid_fd); // FD ownership is now in pid_fd/File
+            let mut pidstr = String::new();
+            pid_file.read_to_string(&mut pidstr)?;
+            pid = pidstr.trim_end().parse().map_err(|err| {
+                format_err!("cannot parse PID returned by QEMU ('{}'): {}", &pidstr, err)
+            })?;
+            break;
+        } else {
+            let out = String::from_utf8_lossy(&res.stderr);
+            if out.contains("unable to set guest cid: Address already in use") {
+                attempts += 1;
+                if attempts >= MAX_CID_TRIES {
+                    bail!("CID '{}' in use, but max attempts reached, aborting", cid);
+                }
+                // CID in use, try next higher one
+                eprintln!("CID '{}' in use by other VM, attempting next one", cid);
+                // skip special-meaning low values
+                cid = cid.wrapping_add(1).max(10);
+            } else {
+                eprint!("{}", out);
+                bail!("Starting VM failed. See output above for more information.");
+            }
+        }
+    }
+
+    // QEMU has started successfully, now wait for virtio socket to become ready
+    let pid_t = Pid::from_raw(pid);
+    for _ in 0..60 {
+        let client = VsockClient::new(cid as i32, DEFAULT_VSOCK_PORT, Some(ticket.to_owned()));
+        if let Ok(Ok(_)) =
+            time::timeout(Duration::from_secs(2), client.get("api2/json/status", None)).await
+        {
+            return Ok((pid, cid as i32));
+        }
+        if kill(pid_t, None).is_err() {
+            // QEMU exited
+            bail!("VM exited before connection could be established");
+        }
+        time::sleep(Duration::from_millis(200)).await;
+    }
+
+    // start failed
+    if let Err(err) = try_kill_vm(pid) {
+        eprintln!("killing failed VM failed: {}", err);
+    }
+    bail!("starting VM timed out");
+}
+
+#[api(
+   input: {
+       properties: {
+           repository: {
+               schema: REPO_URL_SCHEMA,
+           },
+           snapshot: {
+               type: String,
+               description: "Group/Snapshot path",
+           },
+           ticket: {
+               description: "A unique key acting as a password for communicating with the VM.",
+               type: String,
+           },
+           cid: {
+               description: "Request a specific CID, if it is unavailable the next free one will be used",
+               type: i32,
+               optional: true,
+           },
+           "files": {
+               description: "Files in snapshot to map to VM",
+               type: Array,
+               items: {
+                   description: "A .img.fidx file in the given snapshot",
+                   type: String,
+               },
+           },
+       },
+   },
+   returns: {
+       description: "Information about the started VM",
+       type: Object,
+       properties: {
+           cid: {
+               description: "The vsock CID of the started VM",
+               type: i32,
+           },
+           pid: {
+               description: "The process ID of the started VM",
+               type: i32,
+           },
+       },
+   }
+)]
+/// Start a VM with the given parameters and return its cid
+async fn start(param: Value) -> Result<Value, Error> {
+    let repo = tools::required_string_param(&param, "repository")?;
+    let snapshot = tools::required_string_param(&param, "snapshot")?;
+    let files = tools::required_array_param(&param, "files")?;
+    let ticket = tools::required_string_param(&param, "ticket")?;
+
+    let running_uid = nix::unistd::Uid::current();
+    let cid = (param["cid"].as_i64().unwrap_or(running_uid.as_raw() as i64) & 0xFFFF).max(10);
+
+    let (pid, cid) = start_vm(
+        cid as u16,
+        repo,
+        snapshot,
+        files.iter().map(|f| f.as_str().unwrap()),
+        ticket,
+    )
+    .await?;
+
+    // always print json, this is not supposed to be called manually anyway
+    print!("{}", json!({ "pid": pid, "cid": cid }));
+    Ok(Value::Null)
+}
+
+fn main() -> Result<(), Error> {
+    let effective_uid = nix::unistd::Uid::effective();
+    if !effective_uid.is_root() {
+        bail!("this program needs to be run with setuid root");
+    }
+
+    let cmd_def = CliCommandMap::new().insert(
+        "start",
+        CliCommand::new(&API_METHOD_START).arg_param(&["repository", "snapshot", "ticket", "cid"]),
+    );
+
+    let mut rpcenv = CliEnvironment::new();
+    rpcenv.set_auth_id(Some(String::from("root@pam")));
+
+    run_cli_command(
+        cmd_def,
+        rpcenv,
+        Some(|future| proxmox_backup::tools::runtime::main(future)),
+    );
+
+    Ok(())
+}
diff --git a/src/buildcfg.rs b/src/buildcfg.rs
index 4f333288..d80c5a12 100644
--- a/src/buildcfg.rs
+++ b/src/buildcfg.rs
@@ -10,6 +10,14 @@ macro_rules! PROXMOX_BACKUP_RUN_DIR_M { () => ("/run/proxmox-backup") }
 #[macro_export]
 macro_rules! PROXMOX_BACKUP_LOG_DIR_M { () => ("/var/log/proxmox-backup") }
 
+#[macro_export]
+macro_rules! PROXMOX_BACKUP_CACHE_DIR_M { () => ("/var/cache/proxmox-backup") }
+
+#[macro_export]
+macro_rules! PROXMOX_BACKUP_FILE_RESTORE_BIN_DIR_M {
+    () => ("/usr/lib/x86_64-linux-gnu/proxmox-backup/file-restore")
+}
+
 /// namespaced directory for in-memory (tmpfs) run state
 pub const PROXMOX_BACKUP_RUN_DIR: &str = PROXMOX_BACKUP_RUN_DIR_M!();
 
@@ -30,6 +38,19 @@ pub const PROXMOX_BACKUP_PROXY_PID_FN: &str = concat!(PROXMOX_BACKUP_RUN_DIR_M!(
 /// the PID filename for the privileged api daemon
 pub const PROXMOX_BACKUP_API_PID_FN: &str = concat!(PROXMOX_BACKUP_RUN_DIR_M!(), "/api.pid");
 
+/// filename of the cached initramfs to use for booting single file restore VMs, this file is
+/// automatically created by APT hooks
+pub const PROXMOX_BACKUP_INITRAMFS_FN: &str =
+    concat!(PROXMOX_BACKUP_CACHE_DIR_M!(), "/file-restore-initramfs.img");
+
+/// filename of the kernel to use for booting single file restore VMs
+pub const PROXMOX_BACKUP_KERNEL_FN: &str =
+    concat!(PROXMOX_BACKUP_FILE_RESTORE_BIN_DIR_M!(), "/bzImage");
+
+/// setuid binary location for starting restore VMs
+pub const PROXMOX_RESTORE_QEMU_HELPER_FN: &str =
+    concat!(PROXMOX_BACKUP_FILE_RESTORE_BIN_DIR_M!(), "/proxmox-restore-qemu-helper");
+
 /// Prepend configuration directory to a file name
 ///
 /// This is a simply way to get the full path for configuration files.
-- 
2.20.1





  parent reply	other threads:[~2021-03-31 10:23 UTC|newest]

Thread overview: 32+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-03-31 10:21 [pbs-devel] [PATCH v3 00/20] Single file restore for VM images Stefan Reiter
2021-03-31 10:21 ` [pbs-devel] [PATCH v3 pxar 01/20] decoder/aio: add contents() and content_size() calls Stefan Reiter
2021-03-31 11:54   ` [pbs-devel] applied: " Wolfgang Bumiller
2021-03-31 10:21 ` [pbs-devel] [PATCH v3 proxmox-backup 02/20] vsock_client: remove wrong comment Stefan Reiter
2021-04-01  9:53   ` [pbs-devel] applied: " Thomas Lamprecht
2021-03-31 10:21 ` [pbs-devel] [PATCH v3 proxmox-backup 03/20] vsock_client: remove some &mut restrictions and rustfmt Stefan Reiter
2021-04-01  9:54   ` [pbs-devel] applied: " Thomas Lamprecht
2021-03-31 10:21 ` [pbs-devel] [PATCH v3 proxmox-backup 04/20] vsock_client: support authorization header Stefan Reiter
2021-04-01  9:54   ` [pbs-devel] applied: " Thomas Lamprecht
2021-03-31 10:21 ` [pbs-devel] [PATCH v3 proxmox-backup 05/20] proxmox_client_tools: move common key related functions to key_source.rs Stefan Reiter
2021-04-01  9:54   ` [pbs-devel] applied: " Thomas Lamprecht
2021-03-31 10:21 ` [pbs-devel] [PATCH v3 proxmox-backup 06/20] file-restore: add binary and basic commands Stefan Reiter
2021-03-31 10:21 ` [pbs-devel] [PATCH v3 proxmox-backup 07/20] file-restore: allow specifying output-format Stefan Reiter
2021-03-31 10:21 ` [pbs-devel] [PATCH v3 proxmox-backup 08/20] server/rest: extract auth to seperate module Stefan Reiter
2021-04-01  9:55   ` [pbs-devel] applied: " Thomas Lamprecht
2021-03-31 10:21 ` [pbs-devel] [PATCH v3 proxmox-backup 09/20] server/rest: add ApiAuth trait to make user auth generic Stefan Reiter
2021-03-31 12:55   ` Wolfgang Bumiller
2021-03-31 14:07     ` Thomas Lamprecht
2021-03-31 10:21 ` [pbs-devel] [PATCH v3 proxmox-backup 10/20] file-restore-daemon: add binary with virtio-vsock API server Stefan Reiter
2021-03-31 10:21 ` [pbs-devel] [PATCH v3 proxmox-backup 11/20] file-restore-daemon: add watchdog module Stefan Reiter
2021-03-31 10:21 ` [pbs-devel] [PATCH v3 proxmox-backup 12/20] file-restore-daemon: add disk module Stefan Reiter
2021-03-31 10:21 ` [pbs-devel] [PATCH v3 proxmox-backup 13/20] add tools/cpio encoding module Stefan Reiter
2021-03-31 10:21 ` Stefan Reiter [this message]
2021-03-31 14:15   ` [pbs-devel] [PATCH v3 proxmox-backup 14/20] file-restore: add qemu-helper setuid binary Oguz Bektas
2021-03-31 10:21 ` [pbs-devel] [PATCH v3 proxmox-backup 15/20] file-restore: add basic VM/block device support Stefan Reiter
2021-04-01 15:43   ` [pbs-devel] [PATCH v4 " Stefan Reiter
2021-03-31 10:21 ` [pbs-devel] [PATCH v3 proxmox-backup 16/20] debian/client: add postinst hook to rebuild file-restore initramfs Stefan Reiter
2021-03-31 10:21 ` [pbs-devel] [PATCH v3 proxmox-backup 17/20] file-restore(-daemon): implement list API Stefan Reiter
2021-03-31 10:22 ` [pbs-devel] [PATCH v3 proxmox-backup 18/20] pxar/extract: add sequential variant of extract_sub_dir Stefan Reiter
2021-03-31 10:22 ` [pbs-devel] [PATCH v3 proxmox-backup 19/20] tools/zip: add zip_directory helper Stefan Reiter
2021-03-31 10:22 ` [pbs-devel] [PATCH v3 proxmox-backup 20/20] file-restore: add 'extract' command for VM file restore Stefan Reiter
2021-04-08 14:44 ` [pbs-devel] applied: [PATCH v3 00/20] Single file restore for VM images Thomas Lamprecht

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210331102202.14767-15-s.reiter@proxmox.com \
    --to=s.reiter@proxmox.com \
    --cc=pbs-devel@lists.proxmox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.
Service provided by Proxmox Server Solutions GmbH | Privacy | Legal