From: Stefan Reiter <s.reiter@proxmox.com>
To: pbs-devel@lists.proxmox.com
Subject: [pbs-devel] [PATCH proxmox-backup 5/5] file-restore-daemon/disk: add LVM (thin) support
Date: Wed, 30 Jun 2021 17:57:59 +0200 [thread overview]
Message-ID: <20210630155759.1894155-6-s.reiter@proxmox.com> (raw)
In-Reply-To: <20210630155759.1894155-1-s.reiter@proxmox.com>
Parses JSON output from 'pvs' and 'lvs' LVM utils and does two passes:
one to scan for thinpools and create a device node for their
metadata_lv, and a second to load all LVs, thin-provisioned or not.
Should support every LV-type that LVM supports, as we only parse LVM
tools and use 'vgscan --mknodes' to create device nodes for us.
Produces a two-layer BucketComponent hierarchy with VGs followed by LVs,
PVs are mapped to their respective disk node.
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
---
src/bin/proxmox_restore_daemon/disk.rs | 189 +++++++++++++++++++++++++
1 file changed, 189 insertions(+)
diff --git a/src/bin/proxmox_restore_daemon/disk.rs b/src/bin/proxmox_restore_daemon/disk.rs
index cae62af3..42b8d496 100644
--- a/src/bin/proxmox_restore_daemon/disk.rs
+++ b/src/bin/proxmox_restore_daemon/disk.rs
@@ -62,6 +62,14 @@ struct ZFSBucketData {
size: Option<u64>,
}
+#[derive(Clone)]
+struct LVMBucketData {
+ vg_name: String,
+ lv_name: String,
+ mountpoint: Option<PathBuf>,
+ size: u64,
+}
+
/// A "Bucket" represents a mapping found on a disk, e.g. a partition, a zfs dataset or an LV. A
/// uniquely identifying path to a file then consists of four components:
/// "/disk/bucket/component/path"
@@ -77,6 +85,7 @@ enum Bucket {
Partition(PartitionBucketData),
RawFs(PartitionBucketData),
ZPool(ZFSBucketData),
+ LVM(LVMBucketData),
}
impl Bucket {
@@ -102,6 +111,13 @@ impl Bucket {
false
}
}
+ Bucket::LVM(data) => {
+ if let (Some(ref vg), Some(ref lv)) = (comp.get(0), comp.get(1)) {
+ ty == "lvm" && vg.as_ref() == &data.vg_name && lv.as_ref() == &data.lv_name
+ } else {
+ false
+ }
+ }
})
}
@@ -110,6 +126,7 @@ impl Bucket {
Bucket::Partition(_) => "part",
Bucket::RawFs(_) => "raw",
Bucket::ZPool(_) => "zpool",
+ Bucket::LVM(_) => "lvm",
}
}
@@ -127,6 +144,13 @@ impl Bucket {
Bucket::Partition(data) => data.number.to_string(),
Bucket::RawFs(_) => "raw".to_owned(),
Bucket::ZPool(data) => data.name.clone(),
+ Bucket::LVM(data) => {
+ if idx == 0 {
+ data.vg_name.clone()
+ } else {
+ data.lv_name.clone()
+ }
+ }
})
}
@@ -135,6 +159,7 @@ impl Bucket {
"part" => 1,
"raw" => 0,
"zpool" => 1,
+ "lvm" => 2,
_ => bail!("invalid bucket type for component depth: {}", type_string),
})
}
@@ -143,6 +168,13 @@ impl Bucket {
match self {
Bucket::Partition(data) | Bucket::RawFs(data) => Some(data.size),
Bucket::ZPool(data) => data.size,
+ Bucket::LVM(data) => {
+ if idx == 1 {
+ Some(data.size)
+ } else {
+ None
+ }
+ }
}
}
}
@@ -264,6 +296,21 @@ impl Filesystems {
data.size = Some(size);
}
+ let mp = PathBuf::from(mntpath);
+ data.mountpoint = Some(mp.clone());
+ Ok(mp)
+ }
+ Bucket::LVM(data) => {
+ if let Some(mp) = &data.mountpoint {
+ return Ok(mp.clone());
+ }
+
+ let mntpath = format!("/mnt/lvm/{}/{}", &data.vg_name, &data.lv_name);
+ create_dir_all(&mntpath)?;
+
+ let mapper_path = format!("/dev/mapper/{}-{}", &data.vg_name, &data.lv_name);
+ self.try_mount(&mapper_path, &mntpath)?;
+
let mp = PathBuf::from(mntpath);
data.mountpoint = Some(mp.clone());
Ok(mp)
@@ -444,12 +491,154 @@ impl DiskState {
}
}
+ Self::scan_lvm(&mut disk_map, &drive_info)?;
+
Ok(Self {
filesystems,
disk_map,
})
}
+ /// scan for LVM volumes and create device nodes for them to later mount on demand
+ fn scan_lvm(
+ disk_map: &mut HashMap<String, Vec<Bucket>>,
+ drive_info: &HashMap<String, String>,
+ ) -> Result<(), Error> {
+ // first get mapping between devices and vgs
+ let mut pv_map: HashMap<String, Vec<String>> = HashMap::new();
+ let mut cmd = Command::new("/sbin/pvs");
+ cmd.args(["-o", "pv_name,vg_name", "--reportformat", "json"].iter());
+ let result = run_command(cmd, None).unwrap();
+ let result: serde_json::Value = serde_json::from_str(&result)?;
+ if let Some(result) = result["report"][0]["pv"].as_array() {
+ for pv in result {
+ let vg_name = pv["vg_name"].as_str().unwrap();
+ if vg_name.is_empty() {
+ continue;
+ }
+ let pv_name = pv["pv_name"].as_str().unwrap();
+ // remove '/dev/' part
+ let pv_name = &pv_name[pv_name.rfind('/').map(|i| i + 1).unwrap_or(0)..];
+ if let Some(fidx) = drive_info.get(pv_name) {
+ info!("LVM: found VG '{}' on '{}' ({})", vg_name, pv_name, fidx);
+ match pv_map.get_mut(vg_name) {
+ Some(list) => list.push(fidx.to_owned()),
+ None => {
+ pv_map.insert(vg_name.to_owned(), vec![fidx.to_owned()]);
+ }
+ }
+ }
+ }
+ }
+
+ let mknodes = || {
+ let mut cmd = Command::new("/sbin/vgscan");
+ cmd.arg("--mknodes");
+ if let Err(err) = run_command(cmd, None) {
+ warn!("LVM: 'vgscan --mknodes' failed: {}", err);
+ }
+ };
+
+ // then scan for LVs and assign their buckets to the correct disks
+ let mut cmd = Command::new("/sbin/lvs");
+ cmd.args(
+ [
+ "-o",
+ "vg_name,lv_name,lv_size,metadata_lv",
+ "--units",
+ "B",
+ "--reportformat",
+ "json",
+ ]
+ .iter(),
+ );
+ let result = run_command(cmd, None).unwrap();
+ let result: serde_json::Value = serde_json::from_str(&result)?;
+ let mut thinpools = Vec::new();
+ if let Some(result) = result["report"][0]["lv"].as_array() {
+ // first, look for thin-pools
+ for lv in result {
+ let metadata = lv["metadata_lv"].as_str().unwrap_or_default();
+ if !metadata.is_empty() {
+ // this is a thin-pool, activate the metadata LV
+ let vg_name = lv["vg_name"].as_str().unwrap();
+ let metadata = metadata.trim_matches(&['[', ']'][..]);
+ info!("LVM: attempting to activate thinpool '{}'", metadata);
+ let mut cmd = Command::new("/sbin/lvchange");
+ cmd.args(["-ay", "-y", &format!("{}/{}", vg_name, metadata)].iter());
+ if let Err(err) = run_command(cmd, None) {
+ // not critical, will simply mean its children can't be loaded
+ warn!("LVM: activating thinpool failed: {}", err);
+ } else {
+ thinpools.push((vg_name, metadata));
+ }
+ }
+ }
+
+ // now give the metadata LVs a device node
+ mknodes();
+
+ // cannot leave the metadata LV active, otherwise child-LVs won't activate
+ for (vg_name, metadata) in thinpools {
+ let mut cmd = Command::new("/sbin/lvchange");
+ cmd.args(["-an", "-y", &format!("{}/{}", vg_name, metadata)].iter());
+ let _ = run_command(cmd, None);
+ }
+
+ for lv in result {
+ let lv_name = lv["lv_name"].as_str().unwrap();
+ let vg_name = lv["vg_name"].as_str().unwrap();
+ let metadata = lv["metadata_lv"].as_str().unwrap_or_default();
+ if lv_name.is_empty() || vg_name.is_empty() || !metadata.is_empty() {
+ continue;
+ }
+ let lv_size = lv["lv_size"].as_str().unwrap();
+ // lv_size is in bytes with a capital 'B' at the end
+ let lv_size = lv_size[..lv_size.len() - 1].parse::<u64>().unwrap_or(0);
+
+ let bucket = Bucket::LVM(LVMBucketData {
+ vg_name: vg_name.to_owned(),
+ lv_name: lv_name.to_owned(),
+ size: lv_size,
+ mountpoint: None,
+ });
+
+ // activate the LV so 'vgscan' can create a node later - this may fail, and if it
+ // does, we ignore it and continue
+ let mut cmd = Command::new("/sbin/lvchange");
+ cmd.args(["-ay", &format!("{}/{}", vg_name, lv_name)].iter());
+ if let Err(err) = run_command(cmd, None) {
+ warn!(
+ "LVM: LV '{}' on '{}' ({}B) failed to activate: {}",
+ lv_name, vg_name, lv_size, err
+ );
+ continue;
+ }
+
+ info!(
+ "LVM: found LV '{}' on '{}' ({}B)",
+ lv_name, vg_name, lv_size
+ );
+
+ if let Some(drives) = pv_map.get(vg_name) {
+ for fidx in drives {
+ match disk_map.get_mut(fidx) {
+ Some(v) => v.push(bucket.clone()),
+ None => {
+ disk_map.insert(fidx.to_owned(), vec![bucket.clone()]);
+ }
+ }
+ }
+ }
+ }
+
+ // now that we've imported and activated all LV's, we let vgscan create the dev nodes
+ mknodes();
+ }
+
+ Ok(())
+ }
+
/// Given a path like "/drive-scsi0.img.fidx/part/0/etc/passwd", this will mount the first
/// partition of 'drive-scsi0' on-demand (i.e. if not already mounted) and return a path
/// pointing to the requested file locally, e.g. "/mnt/vda1/etc/passwd", which can be used to
--
2.30.2
next prev parent reply other threads:[~2021-06-30 15:58 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-06-30 15:57 [pbs-devel] [PATCH 0/5] Add LVM (thin) support for single file restore Stefan Reiter
2021-06-30 15:57 ` [pbs-devel] [PATCH proxmox-backup-restore-image 1/5] add LVM (thin) tooling Stefan Reiter
2021-06-30 15:57 ` [pbs-devel] [PATCH proxmox-backup 2/5] file-restore-daemon/disk: dedup BucketComponents and make size optional Stefan Reiter
2021-06-30 15:57 ` [pbs-devel] [PATCH proxmox-backup 3/5] file-restore-daemon/disk: fix component path errors Stefan Reiter
2021-06-30 15:57 ` [pbs-devel] [PATCH proxmox-backup 4/5] file-restore-daemon/disk: ignore already-mounted error and prefix zpool Stefan Reiter
2021-06-30 15:57 ` Stefan Reiter [this message]
2021-07-02 12:39 ` [pbs-devel] [PATCH 0/5] Add LVM (thin) support for single file restore Fabian Grünbichler
2021-07-05 6:26 ` Thomas Lamprecht
2021-07-05 7:18 ` Fabian Grünbichler
2021-07-05 6:12 ` [pbs-devel] applied-series: " Thomas Lamprecht
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210630155759.1894155-6-s.reiter@proxmox.com \
--to=s.reiter@proxmox.com \
--cc=pbs-devel@lists.proxmox.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.