From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [212.224.123.68]) by lore.proxmox.com (Postfix) with ESMTPS id 150DA1FF16B for ; Mon, 16 Sep 2024 16:56:30 +0200 (CEST) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id 97141A364; Mon, 16 Sep 2024 16:56:34 +0200 (CEST) From: Gabriel Goller To: pbs-devel@lists.proxmox.com Date: Mon, 16 Sep 2024 16:56:26 +0200 Message-Id: <20240916145627.515861-1-g.goller@proxmox.com> X-Mailer: git-send-email 2.39.5 MIME-Version: 1.0 X-SPAM-LEVEL: Spam detection results: 0 AWL -0.045 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment RCVD_IN_VALIDITY_CERTIFIED_BLOCKED 0.001 ADMINISTRATOR NOTICE: The query to Validity was blocked. See https://knowledge.validity.com/hc/en-us/articles/20961730681243 for more information. RCVD_IN_VALIDITY_RPBL_BLOCKED 0.001 ADMINISTRATOR NOTICE: The query to Validity was blocked. See https://knowledge.validity.com/hc/en-us/articles/20961730681243 for more information. RCVD_IN_VALIDITY_SAFE_BLOCKED 0.001 ADMINISTRATOR NOTICE: The query to Validity was blocked. See https://knowledge.validity.com/hc/en-us/articles/20961730681243 for more information. SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record Subject: [pbs-devel] [PATCH proxmox-backup 1/2] api: avoid retrieving lsblk result twice X-BeenThere: pbs-devel@lists.proxmox.com X-Mailman-Version: 2.1.29 Precedence: list List-Id: Proxmox Backup Server development discussion List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: Proxmox Backup Server development discussion Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Errors-To: pbs-devel-bounces@lists.proxmox.com Sender: "pbs-devel" Avoid running `lsblk` twice when executing the `list_disk` endpoint/command. This and the various other small nits improve the performance of the endpoint. Does not really fix, but is related to: #4961. Signed-off-by: Gabriel Goller --- src/api2/node/disks/mod.rs | 6 +++++- src/tools/disks/mod.rs | 10 +++++++--- src/tools/disks/smart.rs | 10 +++------- src/tools/disks/zfs.rs | 15 ++++++++------- src/tools/disks/zpool_list.rs | 2 +- 5 files changed, 24 insertions(+), 19 deletions(-) diff --git a/src/api2/node/disks/mod.rs b/src/api2/node/disks/mod.rs index 4ef4ee2b8ac7..abcb8ee40ef1 100644 --- a/src/api2/node/disks/mod.rs +++ b/src/api2/node/disks/mod.rs @@ -115,7 +115,11 @@ pub fn smart_status(disk: String, healthonly: Option) -> Result, zfs_devices: &HashSet, file_system_devices: &HashSet, + lsblk_infos: &[LsblkInfo], ) -> Vec { - let lsblk_infos = get_lsblk_info().ok(); partitions .values() .map(|disk| { @@ -887,8 +887,8 @@ fn get_partitions_info( let mounted = disk.is_mounted().unwrap_or(false); let mut filesystem = None; - if let (Some(devpath), Some(infos)) = (devpath.as_ref(), lsblk_infos.as_ref()) { - for info in infos.iter().filter(|i| i.path.eq(devpath)) { + if let Some(devpath) = devpath.as_ref() { + for info in lsblk_infos.iter().filter(|i| i.path.eq(devpath)) { used = match info.partition_type.as_deref() { Some("21686148-6449-6e6f-744e-656564454649") => PartitionUsageType::BIOS, Some("c12a7328-f81f-11d2-ba4b-00a0c93ec93b") => PartitionUsageType::EFI, @@ -942,6 +942,7 @@ fn get_disks( // fixme: ceph journals/volumes let mut result = HashMap::new(); + let mut device_paths = Vec::new(); for item in proxmox_sys::fs::scan_subdir(libc::AT_FDCWD, "/sys/block", &BLOCKDEVICE_NAME_REGEX)? { @@ -1009,6 +1010,8 @@ fn get_disks( .map(|p| p.to_owned()) .map(|p| p.to_string_lossy().to_string()); + device_paths.push((name.clone(), devpath.clone())); + let wwn = disk.wwn().map(|s| s.to_string_lossy().into_owned()); let partitions: Option> = if include_partitions { @@ -1018,6 +1021,7 @@ fn get_disks( &lvm_devices, &zfs_devices, &file_system_devices, + &lsblk_info, )) }) } else { diff --git a/src/tools/disks/smart.rs b/src/tools/disks/smart.rs index 3ad782b7b248..4868815f6f32 100644 --- a/src/tools/disks/smart.rs +++ b/src/tools/disks/smart.rs @@ -1,8 +1,8 @@ -use std::collections::{HashMap, HashSet}; use std::sync::LazyLock; +use std::{collections::{HashMap, HashSet}, path::Path}; use ::serde::{Deserialize, Serialize}; -use anyhow::{bail, Error}; +use anyhow::Error; use proxmox_schema::api; @@ -76,7 +76,7 @@ pub struct SmartData { } /// Read smartctl data for a disk (/dev/XXX). -pub fn get_smart_data(disk: &super::Disk, health_only: bool) -> Result { +pub fn get_smart_data(disk_path: &Path, health_only: bool) -> Result { const SMARTCTL_BIN_PATH: &str = "smartctl"; let mut command = std::process::Command::new(SMARTCTL_BIN_PATH); @@ -85,10 +85,6 @@ pub fn get_smart_data(disk: &super::Disk, health_only: bool) -> Result path, - None => bail!("disk {:?} has no node in /dev", disk.syspath()), - }; command.arg(disk_path); let output = proxmox_sys::command::run_command( diff --git a/src/tools/disks/zfs.rs b/src/tools/disks/zfs.rs index 2abb5176c1e6..3b7da1540835 100644 --- a/src/tools/disks/zfs.rs +++ b/src/tools/disks/zfs.rs @@ -70,7 +70,7 @@ pub fn zfs_pool_stats(pool: &OsStr) -> Result, Error> { /// /// The set is indexed by using the unix raw device number (dev_t is u64) pub fn zfs_devices(lsblk_info: &[LsblkInfo], pool: Option) -> Result, Error> { - let list = zpool_list(pool, true)?; + let list = zpool_list(pool.as_ref(), true)?; let mut device_set = HashSet::new(); for entry in list { @@ -79,12 +79,13 @@ pub fn zfs_devices(lsblk_info: &[LsblkInfo], pool: Option) -> Result Result, Error> { /// /// Devices are only included when run with verbose flags /// set. Without, device lists are empty. -pub fn zpool_list(pool: Option, verbose: bool) -> Result, Error> { +pub fn zpool_list(pool: Option<&String>, verbose: bool) -> Result, Error> { // Note: zpools list verbose output can include entries for 'special', 'cache' and 'logs' // and maybe other things. -- 2.39.5 _______________________________________________ pbs-devel mailing list pbs-devel@lists.proxmox.com https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel