public inbox for pbs-devel@lists.proxmox.com
 help / color / mirror / Atom feed
From: Stefan Reiter <s.reiter@proxmox.com>
To: pbs-devel@lists.proxmox.com
Subject: [pbs-devel] [PATCH proxmox-backup 18/22] file-restore: add basic VM/block device support
Date: Tue, 16 Feb 2021 18:07:06 +0100	[thread overview]
Message-ID: <20210216170710.31767-19-s.reiter@proxmox.com> (raw)
In-Reply-To: <20210216170710.31767-1-s.reiter@proxmox.com>

Includes methods to start, stop and list QEMU file-restore VMs, as well
as CLI commands do the latter two.

The implementation is abstracted behind the concept of a
"BlockRestoreDriver", so other methods can be implemented later (e.g.
mapping directly to loop devices on the host, using other hypervisors
then QEMU, etc...).

Starting VMs is currently unused but will be needed for further changes.

The design for the QEMU driver uses a locked 'map' file
(/run/proxmox-backup/restore-vm.map) containing a JSON encoding of
currently running VMs. VMs are addressed by a 'name', which is a
systemd-unit encoded combination of repository and snapshot string, thus
uniquely identifying it.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
---
 debian/proxmox-backup-client.install          |   1 -
 debian/proxmox-file-restore.install           |   1 +
 src/bin/proxmox-file-restore.rs               |  16 +-
 src/bin/proxmox_file_restore/block_driver.rs  | 157 +++++++
 .../proxmox_file_restore/block_driver_qemu.rs | 407 ++++++++++++++++++
 src/bin/proxmox_file_restore/mod.rs           |   5 +
 src/buildcfg.rs                               |  20 +
 7 files changed, 603 insertions(+), 4 deletions(-)
 create mode 100644 src/bin/proxmox_file_restore/block_driver.rs
 create mode 100644 src/bin/proxmox_file_restore/block_driver_qemu.rs
 create mode 100644 src/bin/proxmox_file_restore/mod.rs

diff --git a/debian/proxmox-backup-client.install b/debian/proxmox-backup-client.install
index b203f152..74b568f1 100644
--- a/debian/proxmox-backup-client.install
+++ b/debian/proxmox-backup-client.install
@@ -1,6 +1,5 @@
 usr/bin/proxmox-backup-client
 usr/bin/pxar
-usr/lib/x86_64-linux-gnu/proxmox-backup/file-restore/proxmox-restore-daemon
 usr/share/man/man1/proxmox-backup-client.1
 usr/share/man/man1/pxar.1
 usr/share/zsh/vendor-completions/_proxmox-backup-client
diff --git a/debian/proxmox-file-restore.install b/debian/proxmox-file-restore.install
index 2082e46b..d952836e 100644
--- a/debian/proxmox-file-restore.install
+++ b/debian/proxmox-file-restore.install
@@ -1,3 +1,4 @@
 usr/bin/proxmox-file-restore
 usr/share/man/man1/proxmox-file-restore.1
 usr/share/zsh/vendor-completions/_proxmox-file-restore
+usr/lib/x86_64-linux-gnu/proxmox-backup/file-restore/proxmox-restore-daemon
diff --git a/src/bin/proxmox-file-restore.rs b/src/bin/proxmox-file-restore.rs
index ec3378b0..767cc057 100644
--- a/src/bin/proxmox-file-restore.rs
+++ b/src/bin/proxmox-file-restore.rs
@@ -32,6 +32,9 @@ use proxmox_client_tools::{
     KEYFD_SCHEMA, KEYFILE_SCHEMA, REPO_URL_SCHEMA,
 };
 
+mod proxmox_file_restore;
+use proxmox_file_restore::*;
+
 enum ExtractPath {
     ListArchives,
     Pxar(String, Vec<u8>),
@@ -48,7 +51,7 @@ fn parse_path(path: String, base64: bool) -> Result<ExtractPath, Error> {
         return Ok(ExtractPath::ListArchives);
     }
 
-    while bytes.len() > 0 && bytes[0] == b'/' {
+    while !bytes.is_empty() && bytes[0] == b'/' {
         bytes.remove(0);
     }
 
@@ -319,7 +322,7 @@ async fn extract(param: Value) -> Result<Value, Error> {
             let file = root
                 .lookup(OsStr::from_bytes(&path))
                 .await?
-                .ok_or(format_err!("error opening '{:?}'", path))?;
+                .ok_or_else(|| format_err!("error opening '{:?}'", path))?;
 
             if let Some(target) = target {
                 extract_sub_dir(target, decoder, OsStr::from_bytes(&path), verbose).await?;
@@ -361,9 +364,16 @@ fn main() {
         .completion_cb("snapshot", complete_group_or_snapshot)
         .completion_cb("target", tools::complete_file_name);
 
+    let status_cmd_def = CliCommand::new(&API_METHOD_STATUS);
+    let stop_cmd_def = CliCommand::new(&API_METHOD_STOP)
+        .arg_param(&["name"])
+        .completion_cb("name", complete_block_driver_ids);
+
     let cmd_def = CliCommandMap::new()
         .insert("list", list_cmd_def)
-        .insert("extract", restore_cmd_def);
+        .insert("extract", restore_cmd_def)
+        .insert("status", status_cmd_def)
+        .insert("stop", stop_cmd_def);
 
     let rpcenv = CliEnvironment::new();
     run_cli_command(
diff --git a/src/bin/proxmox_file_restore/block_driver.rs b/src/bin/proxmox_file_restore/block_driver.rs
new file mode 100644
index 00000000..0ba67f34
--- /dev/null
+++ b/src/bin/proxmox_file_restore/block_driver.rs
@@ -0,0 +1,157 @@
+//! Abstraction layer over different methods of accessing a block backup
+use anyhow::{bail, Error};
+use serde::{Deserialize, Serialize};
+use serde_json::{json, Value};
+
+use std::collections::HashMap;
+use std::future::Future;
+use std::hash::BuildHasher;
+use std::pin::Pin;
+
+use proxmox_backup::backup::{BackupDir, BackupManifest};
+use proxmox_backup::client::BackupRepository;
+
+use proxmox::api::{api, cli::*};
+
+use super::block_driver_qemu::QemuBlockDriver;
+
+/// Contains details about a snapshot that is to be accessed by block file restore
+pub struct SnapRestoreDetails {
+    pub repo: BackupRepository,
+    pub snapshot: BackupDir,
+    pub manifest: BackupManifest,
+}
+
+pub type Async<R> = Pin<Box<dyn Future<Output = R> + Send>>;
+
+/// An abstract implementation for retrieving data out of a block file backup
+pub trait BlockRestoreDriver {
+    /// Return status of all running/mapped images, result value is (id, extra data), where id must
+    /// match with the ones returned from list()
+    fn status(&self) -> Async<Result<Vec<(String, Value)>, Error>>;
+    /// Stop/Close a running restore method
+    fn stop(&self, id: String) -> Async<Result<(), Error>>;
+    /// Returned ids must be prefixed with driver type so that they cannot collide between drivers,
+    /// the returned values must be passable to stop()
+    fn list(&self) -> Vec<String>;
+}
+
+#[api()]
+#[derive(Debug, Serialize, Deserialize, PartialEq, Clone, Copy)]
+pub enum BlockDriverType {
+    /// Uses a small QEMU/KVM virtual machine to map images securely. Requires PVE-patched QEMU.
+    Qemu,
+}
+
+impl BlockDriverType {
+    fn resolve(&self) -> impl BlockRestoreDriver {
+        match self {
+            BlockDriverType::Qemu => QemuBlockDriver {},
+        }
+    }
+}
+
+const DEFAULT_DRIVER: BlockDriverType = BlockDriverType::Qemu;
+const ALL_DRIVERS: &[BlockDriverType] = &[BlockDriverType::Qemu];
+
+#[api(
+   input: {
+       properties: {
+            "driver": {
+                type: BlockDriverType,
+                optional: true,
+            },
+            "output-format": {
+                schema: OUTPUT_FORMAT,
+                optional: true,
+            },
+        },
+   },
+)]
+/// Retrieve status information about currently running/mapped restore images
+pub async fn status(driver: Option<BlockDriverType>, param: Value) -> Result<(), Error> {
+    let output_format = get_output_format(&param);
+    let text = output_format == "text";
+
+    let mut ret = json!({});
+
+    for dt in ALL_DRIVERS {
+        if driver.is_some() && &driver.unwrap() != dt {
+            continue;
+        }
+
+        let drv_name = format!("{:?}", dt);
+        let drv = dt.resolve();
+        match drv.status().await {
+            Ok(data) if data.is_empty() => {
+                if text {
+                    println!("{}: no mappings", drv_name);
+                } else {
+                    ret[drv_name] = json!({});
+                }
+            }
+            Ok(data) => {
+                if text {
+                    println!("{}:", drv_name);
+                }
+
+                ret[&drv_name]["ids"] = json!([]);
+                for (id, extra) in data {
+                    if text {
+                        println!("{} \t({})", id, extra);
+                    } else {
+                        ret[&drv_name]["ids"][id] = extra;
+                    }
+                }
+            }
+            Err(err) => {
+                if text {
+                    eprintln!("error getting status from driver '{}' - {}", drv_name, err);
+                } else {
+                    ret[drv_name] = json!({ "error": format!("{}", err) });
+                }
+            }
+        }
+    }
+
+    if !text {
+        format_and_print_result(&ret, &output_format);
+    }
+
+    Ok(())
+}
+
+#[api(
+   input: {
+       properties: {
+            "name": {
+                type: String,
+                description: "The name of the VM to stop.",
+            },
+        },
+   },
+)]
+/// Immediately stop/unmap a given image. Not typically necessary, as VMs will stop themselves
+/// after a timer anyway.
+pub async fn stop(name: String) -> Result<(), Error> {
+    for drv in ALL_DRIVERS.iter().map(BlockDriverType::resolve) {
+        if drv.list().contains(&name) {
+            return drv.stop(name).await;
+        }
+    }
+
+    bail!("no mapping with name '{}' found", name);
+}
+
+/// Autocompletion handler for block mappings
+pub fn complete_block_driver_ids<S: BuildHasher>(
+    _arg: &str,
+    _param: &HashMap<String, String, S>,
+) -> Vec<String> {
+    ALL_DRIVERS
+        .iter()
+        .map(BlockDriverType::resolve)
+        .map(|d| d.list())
+        .flatten()
+        .collect()
+}
diff --git a/src/bin/proxmox_file_restore/block_driver_qemu.rs b/src/bin/proxmox_file_restore/block_driver_qemu.rs
new file mode 100644
index 00000000..8bbea962
--- /dev/null
+++ b/src/bin/proxmox_file_restore/block_driver_qemu.rs
@@ -0,0 +1,407 @@
+//! Block file access via a small QEMU restore VM using the PBS block driver in QEMU
+use anyhow::{bail, format_err, Error};
+use futures::FutureExt;
+use serde::{Deserialize, Serialize};
+use serde_json::{json, Value};
+
+use std::collections::HashMap;
+use std::fs::{File, OpenOptions};
+use std::io::{prelude::*, SeekFrom};
+use std::path::PathBuf;
+use std::time::Duration;
+
+use tokio::time;
+
+use proxmox::tools::fs::{file_read_string, lock_file, make_tmp_file, CreateOptions};
+use proxmox_backup::backup::BackupDir;
+use proxmox_backup::buildcfg;
+use proxmox_backup::client::*;
+use proxmox_backup::tools;
+
+use super::block_driver::*;
+
+pub struct QemuBlockDriver {}
+
+#[derive(Clone, Hash, Serialize, Deserialize)]
+struct VMState {
+    pid: i32,
+    cid: i32,
+}
+
+struct VMStateMap {
+    map: HashMap<String, VMState>,
+    file: File,
+}
+
+impl VMStateMap {
+    fn open_file_raw(write: bool) -> Result<File, Error> {
+        // ensure the file is only created as root to get correct permissions
+        let running_uid = nix::unistd::Uid::effective();
+        if running_uid.is_root() {
+            std::fs::create_dir_all(buildcfg::PROXMOX_BACKUP_RUN_DIR)?;
+        }
+
+        OpenOptions::new()
+            .read(true)
+            .write(write)
+            .create(write && running_uid.is_root())
+            .open(buildcfg::PROXMOX_BACKUP_VM_MAP_FN)
+            .map_err(Error::from)
+    }
+
+    /// Acquire a lock on the state map and retrieve a deserialized version
+    fn load() -> Result<Self, Error> {
+        let mut file = Self::open_file_raw(true)?;
+        lock_file(&mut file, true, Some(std::time::Duration::from_secs(5)))?;
+        let map = serde_json::from_reader(&file).unwrap_or_default();
+        Ok(Self { map, file })
+    }
+
+    /// Load a read-only copy of the current VM map. Only use for informational purposes, like
+    /// shell auto-completion, for anything requiring consistency use load() !
+    fn load_read_only() -> Result<HashMap<String, VMState>, Error> {
+        let file = Self::open_file_raw(false)?;
+        Ok(serde_json::from_reader(&file).unwrap_or_default())
+    }
+
+    /// Write back a potentially modified state map, consuming the held lock
+    fn write(mut self) -> Result<(), Error> {
+        self.file.seek(SeekFrom::Start(0))?;
+        self.file.set_len(0)?;
+        serde_json::to_writer(self.file, &self.map)?;
+
+        // drop ourselves including file lock
+        Ok(())
+    }
+
+    /// Return the map, but drop the lock immediately
+    fn read_only(self) -> HashMap<String, VMState> {
+        self.map
+    }
+}
+
+fn validate_img_existance() -> Result<(), Error> {
+    let kernel = PathBuf::from(buildcfg::PROXMOX_BACKUP_KERNEL_FN);
+    let initramfs = PathBuf::from(buildcfg::PROXMOX_BACKUP_INITRAMFS_FN);
+    if !kernel.exists() || !initramfs.exists() {
+        bail!("cannot run file-restore VM: package 'proxmox-file-restore' is not (correctly) installed");
+    }
+    Ok(())
+}
+
+fn make_name(repo: &BackupRepository, snap: &BackupDir) -> String {
+    let full = format!("qemu_{}/{}", repo, snap);
+    tools::systemd::escape_unit(&full, false)
+}
+
+fn try_kill_vm(pid: i32, name: &str) -> Result<(), Error> {
+    use nix::sys::signal::{kill, Signal};
+    use nix::unistd::Pid;
+
+    let pid = Pid::from_raw(pid);
+    if let Ok(()) = kill(pid, None) {
+        // process is running (and we could kill it), check if it is actually ours
+        if let Ok(cmdline) = file_read_string(format!("/proc/{}/cmdline", pid)) {
+            if cmdline.split('\0').any(|a| a == name) {
+                // yes, it's ours, kill it brutally with SIGKILL, no reason to take
+                // any chances - in this state it's most likely broken anyway
+                if let Err(err) = kill(pid, Signal::SIGKILL) {
+                    bail!(
+                        "reaping broken VM (pid {}) with SIGKILL failed: {}",
+                        pid,
+                        err
+                    );
+                }
+            }
+        }
+    }
+
+    Ok(())
+}
+
+/// remove non-responsive VMs from given map, returns 'true' if map was modified
+async fn cleanup_map(map: &mut HashMap<String, VMState>) -> bool {
+    let mut to_remove = Vec::new();
+    for (name, state) in map.iter() {
+        let client = VsockClient::new(state.cid, DEFAULT_VSOCK_PORT);
+        let res = client
+            .get("api2/json/status", Some(json!({"keep-timeout": true})))
+            .await;
+        if res.is_err() {
+            // VM is not reachable, remove from map then try reap
+            to_remove.push(name.clone());
+            if let Err(err) = try_kill_vm(state.pid, name) {
+                eprintln!("restore VM cleanup: {}", err);
+            }
+        }
+    }
+
+    for tr in &to_remove {
+        map.remove(tr);
+    }
+
+    !to_remove.is_empty()
+}
+
+async fn ensure_running(details: &SnapRestoreDetails) -> Result<VsockClient, Error> {
+    let name = make_name(&details.repo, &details.snapshot);
+    let mut state = VMStateMap::load()?;
+
+    cleanup_map(&mut state.map).await;
+
+    let new_cid;
+    match state.map.get(&name) {
+        Some(vm) => {
+            let client = VsockClient::new(vm.cid, DEFAULT_VSOCK_PORT);
+            let res = client.get("api2/json/status", None).await;
+            match res {
+                Ok(_) => {
+                    // VM is running and we just reset its timeout, nothing to do
+                    return Ok(client);
+                }
+                Err(err) => {
+                    eprintln!("dead VM detected: {}", err);
+                    // VM is dead, restart
+                    try_kill_vm(vm.pid, &name)?;
+                    let vms = start_vm(vm.cid, &name, details).await?;
+                    new_cid = vms.cid;
+                    state.map.insert(name, vms);
+                }
+            }
+        }
+        None => {
+            let cid = state
+                .map
+                .iter()
+                .map(|v| v.1.cid)
+                .max()
+                .unwrap_or(10) // some low CIDs have special meaning, start at 10 to avoid
+                + 1;
+
+            let vms = start_vm(cid, &name, details).await?;
+            new_cid = vms.cid;
+            state.map.insert(name, vms);
+        }
+    }
+
+    state.write()?;
+    Ok(VsockClient::new(new_cid, DEFAULT_VSOCK_PORT))
+}
+
+async fn start_vm(
+    mut cid: i32,
+    name: &str,
+    details: &SnapRestoreDetails,
+) -> Result<VMState, Error> {
+    use nix::sys::signal::kill;
+    use nix::unistd::Pid;
+    use std::os::unix::io::{AsRawFd, FromRawFd};
+
+    validate_img_existance()?;
+
+    if let Err(_) = std::env::var("PBS_PASSWORD") {
+        bail!("environment variable PBS_PASSWORD has to be set for QEMU VM restore");
+    }
+    if let Err(_) = std::env::var("PBS_FINGERPRINT") {
+        bail!("environment variable PBS_FINGERPRINT has to be set for QEMU VM restore");
+    }
+
+    let pid;
+    let (pid_fd, pid_path) = make_tmp_file("/tmp", CreateOptions::new())?;
+    nix::unistd::unlink(&pid_path)?;
+    tools::fd_change_cloexec(pid_fd.0, false)?;
+
+    let base_args = [
+        "-serial",
+        &format!(
+            "file:{}/file_restore_vm_{}.log",
+            buildcfg::PROXMOX_BACKUP_LOG_DIR,
+            {
+                let now = proxmox::tools::time::epoch_i64();
+                proxmox::tools::time::epoch_to_rfc3339(now)?
+            },
+        ),
+        "-vnc",
+        "none",
+        "-enable-kvm",
+        "-m",
+        "512",
+        "-name",
+        name,
+        "-kernel",
+        buildcfg::PROXMOX_BACKUP_KERNEL_FN,
+        "-initrd",
+        buildcfg::PROXMOX_BACKUP_INITRAMFS_FN,
+        "-append",
+        "quiet",
+        "-daemonize",
+        "-pidfile",
+        &format!("/dev/fd/{}", pid_fd.as_raw_fd()),
+    ];
+
+    // Generate drive arguments for all fidx files in backup snapshot
+    let mut drives = Vec::new();
+    let mut id = 0;
+    for file in details.manifest.files() {
+        if !file.filename.ends_with(".img.fidx") {
+            continue;
+        }
+        drives.push("-drive".to_owned());
+        drives.push(format!(
+            "file=pbs:repository={},,snapshot={},,archive={},read-only=on,if=none,id=drive{}",
+            details.repo, details.snapshot, file.filename, id
+        ));
+        drives.push("-device".to_owned());
+        // drive serial is used by VM to map .fidx files to /dev paths
+        drives.push(format!(
+            "virtio-blk-pci,drive=drive{},serial={}",
+            id, file.filename
+        ));
+        id += 1;
+    }
+
+    // Try starting QEMU in a loop to retry if we fail because of a bad 'cid' value
+    loop {
+        let mut qemu_cmd = std::process::Command::new("qemu-system-x86_64");
+        qemu_cmd.args(base_args.iter());
+        qemu_cmd.args(&drives);
+        qemu_cmd.arg("-device");
+        qemu_cmd.arg(format!(
+            "vhost-vsock-pci,guest-cid={},disable-legacy=on",
+            cid
+        ));
+
+        qemu_cmd.stdout(std::process::Stdio::null());
+        qemu_cmd.stderr(std::process::Stdio::piped());
+
+        let res = tokio::task::block_in_place(|| qemu_cmd.spawn()?.wait_with_output())?;
+
+        if res.status.success() {
+            // at this point QEMU is already daemonized and running, so if anything fails we
+            // technically leave behind a zombie-VM... this shouldn't matter, as it will stop
+            // itself soon enough (timer), and the following operations are unlikely to fail
+            let mut pid_file = unsafe { File::from_raw_fd(pid_fd.as_raw_fd()) };
+            std::mem::forget(pid_fd); // FD ownership is now in pid_fd/File
+            let mut pidstr = String::new();
+            pid_file.read_to_string(&mut pidstr)?;
+            pid = pidstr.trim_end().parse().map_err(|err| {
+                format_err!("cannot parse PID returned by QEMU ('{}'): {}", &pidstr, err)
+            })?;
+            break;
+        } else {
+            let out = String::from_utf8_lossy(&res.stderr);
+            if out.contains("unable to set guest cid: Address already in use") {
+                // CID in use, try next higher one
+                eprintln!("CID '{}' in use by other VM, attempting next one", cid);
+                cid += 1;
+            } else {
+                eprint!("{}", out);
+                bail!("Starting VM failed. See QEMU output above for more information.");
+            }
+        }
+    }
+
+    // QEMU has started successfully, now wait for virtio socket to become ready
+    let pid_t = Pid::from_raw(pid);
+    for _ in 0..60 {
+        let client = VsockClient::new(cid, DEFAULT_VSOCK_PORT);
+        if let Ok(Ok(_)) =
+            time::timeout(Duration::from_secs(2), client.get("api2/json/status", None)).await
+        {
+            return Ok(VMState { pid, cid });
+        }
+        if kill(pid_t, None).is_err() {
+            // QEMU exited
+            bail!("VM exited before connection could be established");
+        }
+        time::sleep(Duration::from_millis(500)).await;
+    }
+
+    // start failed
+    if let Err(err) = try_kill_vm(pid, name) {
+        eprintln!("killing failed VM failed: {}", err);
+    }
+    bail!("starting VM timed out");
+}
+
+impl BlockRestoreDriver for QemuBlockDriver {
+    fn status(&self) -> Async<Result<Vec<(String, Value)>, Error>> {
+        async move {
+            let mut state_map = VMStateMap::load()?;
+            let modified = cleanup_map(&mut state_map.map).await;
+            let map = if modified {
+                let m = state_map.map.clone();
+                state_map.write()?;
+                m
+            } else {
+                state_map.read_only()
+            };
+            let mut result = Vec::new();
+
+            for (n, s) in map.iter() {
+                let client = VsockClient::new(s.cid, DEFAULT_VSOCK_PORT);
+                let resp = client
+                    .get("api2/json/status", Some(json!({"keep-timeout": true})))
+                    .await;
+                let name = tools::systemd::unescape_unit(n)
+                    .unwrap_or_else(|_| "<invalid name>".to_owned());
+                let mut extra = json!({"pid": s.pid, "cid": s.cid});
+
+                match resp {
+                    Ok(status) => match status["data"].as_object() {
+                        Some(map) => {
+                            for (k, v) in map.iter() {
+                                extra[k] = v.clone();
+                            }
+                        }
+                        None => {
+                            let err = format!(
+                                "invalid JSON received from /status call: {}",
+                                status.to_string()
+                            );
+                            extra["error"] = json!(err);
+                        }
+                    },
+                    Err(err) => {
+                        let err = format!("error during /status API call: {}", err);
+                        extra["error"] = json!(err);
+                    }
+                }
+
+                result.push((name, extra));
+            }
+
+            Ok(result)
+        }
+        .boxed()
+    }
+
+    fn stop(&self, id: String) -> Async<Result<(), Error>> {
+        async move {
+            let name = tools::systemd::escape_unit(&id, false);
+            let mut map = VMStateMap::load()?;
+            match map.map.get(&name) {
+                Some(state) => {
+                    try_kill_vm(state.pid, &name)?;
+                    map.map.remove(&name);
+                    map.write()?;
+                }
+                None => {
+                    bail!("VM with name '{}' not found", name);
+                }
+            }
+            Ok(())
+        }
+        .boxed()
+    }
+
+    fn list(&self) -> Vec<String> {
+        match VMStateMap::load_read_only() {
+            Ok(state) => state
+                .iter()
+                .filter_map(|(name, _)| tools::systemd::unescape_unit(&name).ok())
+                .collect(),
+            Err(_) => Vec::new(),
+        }
+    }
+}
diff --git a/src/bin/proxmox_file_restore/mod.rs b/src/bin/proxmox_file_restore/mod.rs
new file mode 100644
index 00000000..52a1259e
--- /dev/null
+++ b/src/bin/proxmox_file_restore/mod.rs
@@ -0,0 +1,5 @@
+//! Block device drivers and tools for single file restore
+pub mod block_driver;
+pub use block_driver::*;
+
+mod block_driver_qemu;
diff --git a/src/buildcfg.rs b/src/buildcfg.rs
index 9aff8b4b..28a518ad 100644
--- a/src/buildcfg.rs
+++ b/src/buildcfg.rs
@@ -10,6 +10,14 @@ macro_rules! PROXMOX_BACKUP_RUN_DIR_M { () => ("/run/proxmox-backup") }
 #[macro_export]
 macro_rules! PROXMOX_BACKUP_LOG_DIR_M { () => ("/var/log/proxmox-backup") }
 
+#[macro_export]
+macro_rules! PROXMOX_BACKUP_CACHE_DIR_M { () => ("/var/cache/proxmox-backup") }
+
+#[macro_export]
+macro_rules! PROXMOX_BACKUP_FILE_RESTORE_BIN_DIR_M {
+    () => ("/usr/lib/x86_64-linux-gnu/proxmox-backup/file-restore")
+}
+
 /// namespaced directory for in-memory (tmpfs) run state
 pub const PROXMOX_BACKUP_RUN_DIR: &str = PROXMOX_BACKUP_RUN_DIR_M!();
 
@@ -30,6 +38,18 @@ pub const PROXMOX_BACKUP_PROXY_PID_FN: &str = concat!(PROXMOX_BACKUP_RUN_DIR_M!(
 /// the PID filename for the privileged api daemon
 pub const PROXMOX_BACKUP_API_PID_FN: &str = concat!(PROXMOX_BACKUP_RUN_DIR_M!(), "/api.pid");
 
+/// the filename for the file-restore VM state map
+pub const PROXMOX_BACKUP_VM_MAP_FN: &str = concat!(PROXMOX_BACKUP_RUN_DIR_M!(), "/restore-vm.map");
+
+/// filename of the cached initramfs to use for booting single file restore VMs, this file is
+/// automatically created by APT hooks
+pub const PROXMOX_BACKUP_INITRAMFS_FN: &str =
+    concat!(PROXMOX_BACKUP_CACHE_DIR_M!(), "/file-restore-initramfs.img");
+
+/// filename of the kernel to use for booting single file restore VMs
+pub const PROXMOX_BACKUP_KERNEL_FN: &str =
+    concat!(PROXMOX_BACKUP_FILE_RESTORE_BIN_DIR_M!(), "/bzImage");
+
 /// Prepend configuration directory to a file name
 ///
 /// This is a simply way to get the full path for configuration files.
-- 
2.20.1





  parent reply	other threads:[~2021-02-16 17:08 UTC|newest]

Thread overview: 50+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-02-16 17:06 [pbs-devel] [PATCH 00/22] Single file restore for VM images Stefan Reiter
2021-02-16 17:06 ` [pbs-devel] [PATCH pxar 01/22] decoder/aio: add contents() and content_size() calls Stefan Reiter
2021-02-17  7:56   ` Wolfgang Bumiller
2021-02-16 17:06 ` [pbs-devel] [PATCH pxar 02/22] decoder: add peek() Stefan Reiter
2021-02-17  8:20   ` Wolfgang Bumiller
2021-02-17  8:38     ` Stefan Reiter
2021-02-16 17:06 ` [pbs-devel] [PATCH proxmox-restore-vm-data 03/22] initial commit Stefan Reiter
2021-03-15 18:35   ` [pbs-devel] applied: " Thomas Lamprecht
2021-03-16 15:33     ` Stefan Reiter
2021-02-16 17:06 ` [pbs-devel] [PATCH proxmox-backup 04/22] api2/admin/datastore: refactor list_dir_content in catalog_reader Stefan Reiter
2021-02-17  7:50   ` [pbs-devel] applied: " Thomas Lamprecht
2021-02-16 17:06 ` [pbs-devel] [PATCH proxmox-backup 05/22] api2/admin/datastore: accept "/" as path for root Stefan Reiter
2021-02-17  7:50   ` [pbs-devel] applied: " Thomas Lamprecht
2021-02-16 17:06 ` [pbs-devel] [PATCH proxmox-backup 06/22] api2/admin/datastore: refactor create_zip into pxar/extract Stefan Reiter
2021-02-17  7:50   ` [pbs-devel] applied: " Thomas Lamprecht
2021-02-16 17:06 ` [pbs-devel] [PATCH proxmox-backup 07/22] pxar/extract: add extract_sub_dir Stefan Reiter
2021-02-17  7:51   ` [pbs-devel] applied: " Thomas Lamprecht
2021-02-16 17:06 ` [pbs-devel] [PATCH proxmox-backup 08/22] pxar/extract: add sequential variants to create_zip, extract_sub_dir Stefan Reiter
2021-02-16 17:06 ` [pbs-devel] [PATCH proxmox-backup 09/22] client: extract common functions to proxmox_client_tools module Stefan Reiter
2021-02-17  6:49   ` Dietmar Maurer
2021-02-17  7:58     ` Stefan Reiter
2021-02-17  8:50       ` Dietmar Maurer
2021-02-17  9:47         ` Stefan Reiter
2021-02-17 10:12           ` Dietmar Maurer
2021-02-17  9:13   ` [pbs-devel] applied: " Dietmar Maurer
2021-02-16 17:06 ` [pbs-devel] [PATCH proxmox-backup 10/22] proxmox_client_tools: extract 'key' from client module Stefan Reiter
2021-02-17  9:11   ` Dietmar Maurer
2021-02-16 17:06 ` [pbs-devel] [PATCH proxmox-backup 11/22] file-restore: add binary and basic commands Stefan Reiter
2021-02-16 17:07 ` [pbs-devel] [PATCH proxmox-backup 12/22] file-restore: allow specifying output-format Stefan Reiter
2021-02-16 17:07 ` [pbs-devel] [PATCH proxmox-backup 13/22] rest: implement tower service for UnixStream Stefan Reiter
2021-02-17  6:52   ` [pbs-devel] applied: " Dietmar Maurer
2021-02-16 17:07 ` [pbs-devel] [PATCH proxmox-backup 14/22] client: add VsockClient to connect to virtio-vsock VMs Stefan Reiter
2021-02-17  7:24   ` [pbs-devel] applied: " Dietmar Maurer
2021-02-16 17:07 ` [pbs-devel] [PATCH proxmox-backup 15/22] file-restore-daemon: add binary with virtio-vsock API server Stefan Reiter
2021-02-17 10:17   ` Dietmar Maurer
2021-02-17 10:25   ` Dietmar Maurer
2021-02-17 10:30     ` Stefan Reiter
2021-02-17 11:13       ` Dietmar Maurer
2021-02-17 11:26   ` Dietmar Maurer
2021-02-16 17:07 ` [pbs-devel] [PATCH proxmox-backup 16/22] file-restore-daemon: add watchdog module Stefan Reiter
2021-02-17 10:52   ` Wolfgang Bumiller
2021-02-17 11:14     ` Stefan Reiter
2021-02-17 11:29       ` Wolfgang Bumiller
2021-02-16 17:07 ` [pbs-devel] [PATCH proxmox-backup 17/22] file-restore-daemon: add disk module Stefan Reiter
2021-02-16 17:07 ` Stefan Reiter [this message]
2021-02-16 17:07 ` [pbs-devel] [PATCH proxmox-backup 19/22] file-restore: improve logging of VM with logrotate Stefan Reiter
2021-02-16 17:07 ` [pbs-devel] [PATCH proxmox-backup 20/22] debian/client: add postinst hook to rebuild file-restore initramfs Stefan Reiter
2021-02-16 17:07 ` [pbs-devel] [PATCH proxmox-backup 21/22] file-restore(-daemon): implement list API Stefan Reiter
2021-02-16 17:07 ` [pbs-devel] [PATCH proxmox-backup 22/22] file-restore: add 'extract' command for VM file restore Stefan Reiter
2021-02-16 17:11 ` [pbs-devel] [PATCH 00/22] Single file restore for VM images Stefan Reiter

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210216170710.31767-19-s.reiter@proxmox.com \
    --to=s.reiter@proxmox.com \
    --cc=pbs-devel@lists.proxmox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox
Service provided by Proxmox Server Solutions GmbH | Privacy | Legal