From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [212.224.123.68]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits)) (No client certificate requested) by lists.proxmox.com (Postfix) with ESMTPS id 3CC086A66D for ; Tue, 16 Feb 2021 18:08:26 +0100 (CET) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id 314D51939F for ; Tue, 16 Feb 2021 18:07:56 +0100 (CET) Received: from proxmox-new.maurer-it.com (proxmox-new.maurer-it.com [212.186.127.180]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits)) (No client certificate requested) by firstgate.proxmox.com (Proxmox) with ESMTPS id DD31818E9F for ; Tue, 16 Feb 2021 18:07:34 +0100 (CET) Received: from proxmox-new.maurer-it.com (localhost.localdomain [127.0.0.1]) by proxmox-new.maurer-it.com (Proxmox) with ESMTP id A6CD9461C6 for ; Tue, 16 Feb 2021 18:07:34 +0100 (CET) From: Stefan Reiter To: pbs-devel@lists.proxmox.com Date: Tue, 16 Feb 2021 18:07:06 +0100 Message-Id: <20210216170710.31767-19-s.reiter@proxmox.com> X-Mailer: git-send-email 2.20.1 In-Reply-To: <20210216170710.31767-1-s.reiter@proxmox.com> References: <20210216170710.31767-1-s.reiter@proxmox.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-SPAM-LEVEL: Spam detection results: 0 AWL -0.026 Adjusted score from AWL reputation of From: address KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment RCVD_IN_DNSWL_MED -2.3 Sender listed at https://www.dnswl.org/, medium trust SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record URIBL_BLOCKED 0.001 ADMINISTRATOR NOTICE: The query to URIBL was blocked. See http://wiki.apache.org/spamassassin/DnsBlocklists#dnsbl-block for more information. [mod.rs, proxmox-file-restore.rs, api.pid, restore-vm.map, self.map, buildcfg.rs] Subject: [pbs-devel] [PATCH proxmox-backup 18/22] file-restore: add basic VM/block device support X-BeenThere: pbs-devel@lists.proxmox.com X-Mailman-Version: 2.1.29 Precedence: list List-Id: Proxmox Backup Server development discussion List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Tue, 16 Feb 2021 17:08:26 -0000 Includes methods to start, stop and list QEMU file-restore VMs, as well as CLI commands do the latter two. The implementation is abstracted behind the concept of a "BlockRestoreDriver", so other methods can be implemented later (e.g. mapping directly to loop devices on the host, using other hypervisors then QEMU, etc...). Starting VMs is currently unused but will be needed for further changes. The design for the QEMU driver uses a locked 'map' file (/run/proxmox-backup/restore-vm.map) containing a JSON encoding of currently running VMs. VMs are addressed by a 'name', which is a systemd-unit encoded combination of repository and snapshot string, thus uniquely identifying it. Signed-off-by: Stefan Reiter --- debian/proxmox-backup-client.install | 1 - debian/proxmox-file-restore.install | 1 + src/bin/proxmox-file-restore.rs | 16 +- src/bin/proxmox_file_restore/block_driver.rs | 157 +++++++ .../proxmox_file_restore/block_driver_qemu.rs | 407 ++++++++++++++++++ src/bin/proxmox_file_restore/mod.rs | 5 + src/buildcfg.rs | 20 + 7 files changed, 603 insertions(+), 4 deletions(-) create mode 100644 src/bin/proxmox_file_restore/block_driver.rs create mode 100644 src/bin/proxmox_file_restore/block_driver_qemu.rs create mode 100644 src/bin/proxmox_file_restore/mod.rs diff --git a/debian/proxmox-backup-client.install b/debian/proxmox-backup-client.install index b203f152..74b568f1 100644 --- a/debian/proxmox-backup-client.install +++ b/debian/proxmox-backup-client.install @@ -1,6 +1,5 @@ usr/bin/proxmox-backup-client usr/bin/pxar -usr/lib/x86_64-linux-gnu/proxmox-backup/file-restore/proxmox-restore-daemon usr/share/man/man1/proxmox-backup-client.1 usr/share/man/man1/pxar.1 usr/share/zsh/vendor-completions/_proxmox-backup-client diff --git a/debian/proxmox-file-restore.install b/debian/proxmox-file-restore.install index 2082e46b..d952836e 100644 --- a/debian/proxmox-file-restore.install +++ b/debian/proxmox-file-restore.install @@ -1,3 +1,4 @@ usr/bin/proxmox-file-restore usr/share/man/man1/proxmox-file-restore.1 usr/share/zsh/vendor-completions/_proxmox-file-restore +usr/lib/x86_64-linux-gnu/proxmox-backup/file-restore/proxmox-restore-daemon diff --git a/src/bin/proxmox-file-restore.rs b/src/bin/proxmox-file-restore.rs index ec3378b0..767cc057 100644 --- a/src/bin/proxmox-file-restore.rs +++ b/src/bin/proxmox-file-restore.rs @@ -32,6 +32,9 @@ use proxmox_client_tools::{ KEYFD_SCHEMA, KEYFILE_SCHEMA, REPO_URL_SCHEMA, }; +mod proxmox_file_restore; +use proxmox_file_restore::*; + enum ExtractPath { ListArchives, Pxar(String, Vec), @@ -48,7 +51,7 @@ fn parse_path(path: String, base64: bool) -> Result { return Ok(ExtractPath::ListArchives); } - while bytes.len() > 0 && bytes[0] == b'/' { + while !bytes.is_empty() && bytes[0] == b'/' { bytes.remove(0); } @@ -319,7 +322,7 @@ async fn extract(param: Value) -> Result { let file = root .lookup(OsStr::from_bytes(&path)) .await? - .ok_or(format_err!("error opening '{:?}'", path))?; + .ok_or_else(|| format_err!("error opening '{:?}'", path))?; if let Some(target) = target { extract_sub_dir(target, decoder, OsStr::from_bytes(&path), verbose).await?; @@ -361,9 +364,16 @@ fn main() { .completion_cb("snapshot", complete_group_or_snapshot) .completion_cb("target", tools::complete_file_name); + let status_cmd_def = CliCommand::new(&API_METHOD_STATUS); + let stop_cmd_def = CliCommand::new(&API_METHOD_STOP) + .arg_param(&["name"]) + .completion_cb("name", complete_block_driver_ids); + let cmd_def = CliCommandMap::new() .insert("list", list_cmd_def) - .insert("extract", restore_cmd_def); + .insert("extract", restore_cmd_def) + .insert("status", status_cmd_def) + .insert("stop", stop_cmd_def); let rpcenv = CliEnvironment::new(); run_cli_command( diff --git a/src/bin/proxmox_file_restore/block_driver.rs b/src/bin/proxmox_file_restore/block_driver.rs new file mode 100644 index 00000000..0ba67f34 --- /dev/null +++ b/src/bin/proxmox_file_restore/block_driver.rs @@ -0,0 +1,157 @@ +//! Abstraction layer over different methods of accessing a block backup +use anyhow::{bail, Error}; +use serde::{Deserialize, Serialize}; +use serde_json::{json, Value}; + +use std::collections::HashMap; +use std::future::Future; +use std::hash::BuildHasher; +use std::pin::Pin; + +use proxmox_backup::backup::{BackupDir, BackupManifest}; +use proxmox_backup::client::BackupRepository; + +use proxmox::api::{api, cli::*}; + +use super::block_driver_qemu::QemuBlockDriver; + +/// Contains details about a snapshot that is to be accessed by block file restore +pub struct SnapRestoreDetails { + pub repo: BackupRepository, + pub snapshot: BackupDir, + pub manifest: BackupManifest, +} + +pub type Async = Pin + Send>>; + +/// An abstract implementation for retrieving data out of a block file backup +pub trait BlockRestoreDriver { + /// Return status of all running/mapped images, result value is (id, extra data), where id must + /// match with the ones returned from list() + fn status(&self) -> Async, Error>>; + /// Stop/Close a running restore method + fn stop(&self, id: String) -> Async>; + /// Returned ids must be prefixed with driver type so that they cannot collide between drivers, + /// the returned values must be passable to stop() + fn list(&self) -> Vec; +} + +#[api()] +#[derive(Debug, Serialize, Deserialize, PartialEq, Clone, Copy)] +pub enum BlockDriverType { + /// Uses a small QEMU/KVM virtual machine to map images securely. Requires PVE-patched QEMU. + Qemu, +} + +impl BlockDriverType { + fn resolve(&self) -> impl BlockRestoreDriver { + match self { + BlockDriverType::Qemu => QemuBlockDriver {}, + } + } +} + +const DEFAULT_DRIVER: BlockDriverType = BlockDriverType::Qemu; +const ALL_DRIVERS: &[BlockDriverType] = &[BlockDriverType::Qemu]; + +#[api( + input: { + properties: { + "driver": { + type: BlockDriverType, + optional: true, + }, + "output-format": { + schema: OUTPUT_FORMAT, + optional: true, + }, + }, + }, +)] +/// Retrieve status information about currently running/mapped restore images +pub async fn status(driver: Option, param: Value) -> Result<(), Error> { + let output_format = get_output_format(¶m); + let text = output_format == "text"; + + let mut ret = json!({}); + + for dt in ALL_DRIVERS { + if driver.is_some() && &driver.unwrap() != dt { + continue; + } + + let drv_name = format!("{:?}", dt); + let drv = dt.resolve(); + match drv.status().await { + Ok(data) if data.is_empty() => { + if text { + println!("{}: no mappings", drv_name); + } else { + ret[drv_name] = json!({}); + } + } + Ok(data) => { + if text { + println!("{}:", drv_name); + } + + ret[&drv_name]["ids"] = json!([]); + for (id, extra) in data { + if text { + println!("{} \t({})", id, extra); + } else { + ret[&drv_name]["ids"][id] = extra; + } + } + } + Err(err) => { + if text { + eprintln!("error getting status from driver '{}' - {}", drv_name, err); + } else { + ret[drv_name] = json!({ "error": format!("{}", err) }); + } + } + } + } + + if !text { + format_and_print_result(&ret, &output_format); + } + + Ok(()) +} + +#[api( + input: { + properties: { + "name": { + type: String, + description: "The name of the VM to stop.", + }, + }, + }, +)] +/// Immediately stop/unmap a given image. Not typically necessary, as VMs will stop themselves +/// after a timer anyway. +pub async fn stop(name: String) -> Result<(), Error> { + for drv in ALL_DRIVERS.iter().map(BlockDriverType::resolve) { + if drv.list().contains(&name) { + return drv.stop(name).await; + } + } + + bail!("no mapping with name '{}' found", name); +} + +/// Autocompletion handler for block mappings +pub fn complete_block_driver_ids( + _arg: &str, + _param: &HashMap, +) -> Vec { + ALL_DRIVERS + .iter() + .map(BlockDriverType::resolve) + .map(|d| d.list()) + .flatten() + .collect() +} diff --git a/src/bin/proxmox_file_restore/block_driver_qemu.rs b/src/bin/proxmox_file_restore/block_driver_qemu.rs new file mode 100644 index 00000000..8bbea962 --- /dev/null +++ b/src/bin/proxmox_file_restore/block_driver_qemu.rs @@ -0,0 +1,407 @@ +//! Block file access via a small QEMU restore VM using the PBS block driver in QEMU +use anyhow::{bail, format_err, Error}; +use futures::FutureExt; +use serde::{Deserialize, Serialize}; +use serde_json::{json, Value}; + +use std::collections::HashMap; +use std::fs::{File, OpenOptions}; +use std::io::{prelude::*, SeekFrom}; +use std::path::PathBuf; +use std::time::Duration; + +use tokio::time; + +use proxmox::tools::fs::{file_read_string, lock_file, make_tmp_file, CreateOptions}; +use proxmox_backup::backup::BackupDir; +use proxmox_backup::buildcfg; +use proxmox_backup::client::*; +use proxmox_backup::tools; + +use super::block_driver::*; + +pub struct QemuBlockDriver {} + +#[derive(Clone, Hash, Serialize, Deserialize)] +struct VMState { + pid: i32, + cid: i32, +} + +struct VMStateMap { + map: HashMap, + file: File, +} + +impl VMStateMap { + fn open_file_raw(write: bool) -> Result { + // ensure the file is only created as root to get correct permissions + let running_uid = nix::unistd::Uid::effective(); + if running_uid.is_root() { + std::fs::create_dir_all(buildcfg::PROXMOX_BACKUP_RUN_DIR)?; + } + + OpenOptions::new() + .read(true) + .write(write) + .create(write && running_uid.is_root()) + .open(buildcfg::PROXMOX_BACKUP_VM_MAP_FN) + .map_err(Error::from) + } + + /// Acquire a lock on the state map and retrieve a deserialized version + fn load() -> Result { + let mut file = Self::open_file_raw(true)?; + lock_file(&mut file, true, Some(std::time::Duration::from_secs(5)))?; + let map = serde_json::from_reader(&file).unwrap_or_default(); + Ok(Self { map, file }) + } + + /// Load a read-only copy of the current VM map. Only use for informational purposes, like + /// shell auto-completion, for anything requiring consistency use load() ! + fn load_read_only() -> Result, Error> { + let file = Self::open_file_raw(false)?; + Ok(serde_json::from_reader(&file).unwrap_or_default()) + } + + /// Write back a potentially modified state map, consuming the held lock + fn write(mut self) -> Result<(), Error> { + self.file.seek(SeekFrom::Start(0))?; + self.file.set_len(0)?; + serde_json::to_writer(self.file, &self.map)?; + + // drop ourselves including file lock + Ok(()) + } + + /// Return the map, but drop the lock immediately + fn read_only(self) -> HashMap { + self.map + } +} + +fn validate_img_existance() -> Result<(), Error> { + let kernel = PathBuf::from(buildcfg::PROXMOX_BACKUP_KERNEL_FN); + let initramfs = PathBuf::from(buildcfg::PROXMOX_BACKUP_INITRAMFS_FN); + if !kernel.exists() || !initramfs.exists() { + bail!("cannot run file-restore VM: package 'proxmox-file-restore' is not (correctly) installed"); + } + Ok(()) +} + +fn make_name(repo: &BackupRepository, snap: &BackupDir) -> String { + let full = format!("qemu_{}/{}", repo, snap); + tools::systemd::escape_unit(&full, false) +} + +fn try_kill_vm(pid: i32, name: &str) -> Result<(), Error> { + use nix::sys::signal::{kill, Signal}; + use nix::unistd::Pid; + + let pid = Pid::from_raw(pid); + if let Ok(()) = kill(pid, None) { + // process is running (and we could kill it), check if it is actually ours + if let Ok(cmdline) = file_read_string(format!("/proc/{}/cmdline", pid)) { + if cmdline.split('\0').any(|a| a == name) { + // yes, it's ours, kill it brutally with SIGKILL, no reason to take + // any chances - in this state it's most likely broken anyway + if let Err(err) = kill(pid, Signal::SIGKILL) { + bail!( + "reaping broken VM (pid {}) with SIGKILL failed: {}", + pid, + err + ); + } + } + } + } + + Ok(()) +} + +/// remove non-responsive VMs from given map, returns 'true' if map was modified +async fn cleanup_map(map: &mut HashMap) -> bool { + let mut to_remove = Vec::new(); + for (name, state) in map.iter() { + let client = VsockClient::new(state.cid, DEFAULT_VSOCK_PORT); + let res = client + .get("api2/json/status", Some(json!({"keep-timeout": true}))) + .await; + if res.is_err() { + // VM is not reachable, remove from map then try reap + to_remove.push(name.clone()); + if let Err(err) = try_kill_vm(state.pid, name) { + eprintln!("restore VM cleanup: {}", err); + } + } + } + + for tr in &to_remove { + map.remove(tr); + } + + !to_remove.is_empty() +} + +async fn ensure_running(details: &SnapRestoreDetails) -> Result { + let name = make_name(&details.repo, &details.snapshot); + let mut state = VMStateMap::load()?; + + cleanup_map(&mut state.map).await; + + let new_cid; + match state.map.get(&name) { + Some(vm) => { + let client = VsockClient::new(vm.cid, DEFAULT_VSOCK_PORT); + let res = client.get("api2/json/status", None).await; + match res { + Ok(_) => { + // VM is running and we just reset its timeout, nothing to do + return Ok(client); + } + Err(err) => { + eprintln!("dead VM detected: {}", err); + // VM is dead, restart + try_kill_vm(vm.pid, &name)?; + let vms = start_vm(vm.cid, &name, details).await?; + new_cid = vms.cid; + state.map.insert(name, vms); + } + } + } + None => { + let cid = state + .map + .iter() + .map(|v| v.1.cid) + .max() + .unwrap_or(10) // some low CIDs have special meaning, start at 10 to avoid + + 1; + + let vms = start_vm(cid, &name, details).await?; + new_cid = vms.cid; + state.map.insert(name, vms); + } + } + + state.write()?; + Ok(VsockClient::new(new_cid, DEFAULT_VSOCK_PORT)) +} + +async fn start_vm( + mut cid: i32, + name: &str, + details: &SnapRestoreDetails, +) -> Result { + use nix::sys::signal::kill; + use nix::unistd::Pid; + use std::os::unix::io::{AsRawFd, FromRawFd}; + + validate_img_existance()?; + + if let Err(_) = std::env::var("PBS_PASSWORD") { + bail!("environment variable PBS_PASSWORD has to be set for QEMU VM restore"); + } + if let Err(_) = std::env::var("PBS_FINGERPRINT") { + bail!("environment variable PBS_FINGERPRINT has to be set for QEMU VM restore"); + } + + let pid; + let (pid_fd, pid_path) = make_tmp_file("/tmp", CreateOptions::new())?; + nix::unistd::unlink(&pid_path)?; + tools::fd_change_cloexec(pid_fd.0, false)?; + + let base_args = [ + "-serial", + &format!( + "file:{}/file_restore_vm_{}.log", + buildcfg::PROXMOX_BACKUP_LOG_DIR, + { + let now = proxmox::tools::time::epoch_i64(); + proxmox::tools::time::epoch_to_rfc3339(now)? + }, + ), + "-vnc", + "none", + "-enable-kvm", + "-m", + "512", + "-name", + name, + "-kernel", + buildcfg::PROXMOX_BACKUP_KERNEL_FN, + "-initrd", + buildcfg::PROXMOX_BACKUP_INITRAMFS_FN, + "-append", + "quiet", + "-daemonize", + "-pidfile", + &format!("/dev/fd/{}", pid_fd.as_raw_fd()), + ]; + + // Generate drive arguments for all fidx files in backup snapshot + let mut drives = Vec::new(); + let mut id = 0; + for file in details.manifest.files() { + if !file.filename.ends_with(".img.fidx") { + continue; + } + drives.push("-drive".to_owned()); + drives.push(format!( + "file=pbs:repository={},,snapshot={},,archive={},read-only=on,if=none,id=drive{}", + details.repo, details.snapshot, file.filename, id + )); + drives.push("-device".to_owned()); + // drive serial is used by VM to map .fidx files to /dev paths + drives.push(format!( + "virtio-blk-pci,drive=drive{},serial={}", + id, file.filename + )); + id += 1; + } + + // Try starting QEMU in a loop to retry if we fail because of a bad 'cid' value + loop { + let mut qemu_cmd = std::process::Command::new("qemu-system-x86_64"); + qemu_cmd.args(base_args.iter()); + qemu_cmd.args(&drives); + qemu_cmd.arg("-device"); + qemu_cmd.arg(format!( + "vhost-vsock-pci,guest-cid={},disable-legacy=on", + cid + )); + + qemu_cmd.stdout(std::process::Stdio::null()); + qemu_cmd.stderr(std::process::Stdio::piped()); + + let res = tokio::task::block_in_place(|| qemu_cmd.spawn()?.wait_with_output())?; + + if res.status.success() { + // at this point QEMU is already daemonized and running, so if anything fails we + // technically leave behind a zombie-VM... this shouldn't matter, as it will stop + // itself soon enough (timer), and the following operations are unlikely to fail + let mut pid_file = unsafe { File::from_raw_fd(pid_fd.as_raw_fd()) }; + std::mem::forget(pid_fd); // FD ownership is now in pid_fd/File + let mut pidstr = String::new(); + pid_file.read_to_string(&mut pidstr)?; + pid = pidstr.trim_end().parse().map_err(|err| { + format_err!("cannot parse PID returned by QEMU ('{}'): {}", &pidstr, err) + })?; + break; + } else { + let out = String::from_utf8_lossy(&res.stderr); + if out.contains("unable to set guest cid: Address already in use") { + // CID in use, try next higher one + eprintln!("CID '{}' in use by other VM, attempting next one", cid); + cid += 1; + } else { + eprint!("{}", out); + bail!("Starting VM failed. See QEMU output above for more information."); + } + } + } + + // QEMU has started successfully, now wait for virtio socket to become ready + let pid_t = Pid::from_raw(pid); + for _ in 0..60 { + let client = VsockClient::new(cid, DEFAULT_VSOCK_PORT); + if let Ok(Ok(_)) = + time::timeout(Duration::from_secs(2), client.get("api2/json/status", None)).await + { + return Ok(VMState { pid, cid }); + } + if kill(pid_t, None).is_err() { + // QEMU exited + bail!("VM exited before connection could be established"); + } + time::sleep(Duration::from_millis(500)).await; + } + + // start failed + if let Err(err) = try_kill_vm(pid, name) { + eprintln!("killing failed VM failed: {}", err); + } + bail!("starting VM timed out"); +} + +impl BlockRestoreDriver for QemuBlockDriver { + fn status(&self) -> Async, Error>> { + async move { + let mut state_map = VMStateMap::load()?; + let modified = cleanup_map(&mut state_map.map).await; + let map = if modified { + let m = state_map.map.clone(); + state_map.write()?; + m + } else { + state_map.read_only() + }; + let mut result = Vec::new(); + + for (n, s) in map.iter() { + let client = VsockClient::new(s.cid, DEFAULT_VSOCK_PORT); + let resp = client + .get("api2/json/status", Some(json!({"keep-timeout": true}))) + .await; + let name = tools::systemd::unescape_unit(n) + .unwrap_or_else(|_| "".to_owned()); + let mut extra = json!({"pid": s.pid, "cid": s.cid}); + + match resp { + Ok(status) => match status["data"].as_object() { + Some(map) => { + for (k, v) in map.iter() { + extra[k] = v.clone(); + } + } + None => { + let err = format!( + "invalid JSON received from /status call: {}", + status.to_string() + ); + extra["error"] = json!(err); + } + }, + Err(err) => { + let err = format!("error during /status API call: {}", err); + extra["error"] = json!(err); + } + } + + result.push((name, extra)); + } + + Ok(result) + } + .boxed() + } + + fn stop(&self, id: String) -> Async> { + async move { + let name = tools::systemd::escape_unit(&id, false); + let mut map = VMStateMap::load()?; + match map.map.get(&name) { + Some(state) => { + try_kill_vm(state.pid, &name)?; + map.map.remove(&name); + map.write()?; + } + None => { + bail!("VM with name '{}' not found", name); + } + } + Ok(()) + } + .boxed() + } + + fn list(&self) -> Vec { + match VMStateMap::load_read_only() { + Ok(state) => state + .iter() + .filter_map(|(name, _)| tools::systemd::unescape_unit(&name).ok()) + .collect(), + Err(_) => Vec::new(), + } + } +} diff --git a/src/bin/proxmox_file_restore/mod.rs b/src/bin/proxmox_file_restore/mod.rs new file mode 100644 index 00000000..52a1259e --- /dev/null +++ b/src/bin/proxmox_file_restore/mod.rs @@ -0,0 +1,5 @@ +//! Block device drivers and tools for single file restore +pub mod block_driver; +pub use block_driver::*; + +mod block_driver_qemu; diff --git a/src/buildcfg.rs b/src/buildcfg.rs index 9aff8b4b..28a518ad 100644 --- a/src/buildcfg.rs +++ b/src/buildcfg.rs @@ -10,6 +10,14 @@ macro_rules! PROXMOX_BACKUP_RUN_DIR_M { () => ("/run/proxmox-backup") } #[macro_export] macro_rules! PROXMOX_BACKUP_LOG_DIR_M { () => ("/var/log/proxmox-backup") } +#[macro_export] +macro_rules! PROXMOX_BACKUP_CACHE_DIR_M { () => ("/var/cache/proxmox-backup") } + +#[macro_export] +macro_rules! PROXMOX_BACKUP_FILE_RESTORE_BIN_DIR_M { + () => ("/usr/lib/x86_64-linux-gnu/proxmox-backup/file-restore") +} + /// namespaced directory for in-memory (tmpfs) run state pub const PROXMOX_BACKUP_RUN_DIR: &str = PROXMOX_BACKUP_RUN_DIR_M!(); @@ -30,6 +38,18 @@ pub const PROXMOX_BACKUP_PROXY_PID_FN: &str = concat!(PROXMOX_BACKUP_RUN_DIR_M!( /// the PID filename for the privileged api daemon pub const PROXMOX_BACKUP_API_PID_FN: &str = concat!(PROXMOX_BACKUP_RUN_DIR_M!(), "/api.pid"); +/// the filename for the file-restore VM state map +pub const PROXMOX_BACKUP_VM_MAP_FN: &str = concat!(PROXMOX_BACKUP_RUN_DIR_M!(), "/restore-vm.map"); + +/// filename of the cached initramfs to use for booting single file restore VMs, this file is +/// automatically created by APT hooks +pub const PROXMOX_BACKUP_INITRAMFS_FN: &str = + concat!(PROXMOX_BACKUP_CACHE_DIR_M!(), "/file-restore-initramfs.img"); + +/// filename of the kernel to use for booting single file restore VMs +pub const PROXMOX_BACKUP_KERNEL_FN: &str = + concat!(PROXMOX_BACKUP_FILE_RESTORE_BIN_DIR_M!(), "/bzImage"); + /// Prepend configuration directory to a file name /// /// This is a simply way to get the full path for configuration files. -- 2.20.1