From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <c.heiss@proxmox.com>
Received: from firstgate.proxmox.com (firstgate.proxmox.com [212.224.123.68])
 (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)
 key-exchange X25519 server-signature RSA-PSS (2048 bits))
 (No client certificate requested)
 by lists.proxmox.com (Postfix) with ESMTPS id 37343BB2D
 for <pve-devel@lists.proxmox.com>; Thu, 10 Aug 2023 12:33:57 +0200 (CEST)
Received: from firstgate.proxmox.com (localhost [127.0.0.1])
 by firstgate.proxmox.com (Proxmox) with ESMTP id 102241FA30
 for <pve-devel@lists.proxmox.com>; Thu, 10 Aug 2023 12:33:27 +0200 (CEST)
Received: from proxmox-new.maurer-it.com (proxmox-new.maurer-it.com
 [94.136.29.106])
 (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)
 key-exchange X25519 server-signature RSA-PSS (2048 bits))
 (No client certificate requested)
 by firstgate.proxmox.com (Proxmox) with ESMTPS
 for <pve-devel@lists.proxmox.com>; Thu, 10 Aug 2023 12:33:25 +0200 (CEST)
Received: from proxmox-new.maurer-it.com (localhost.localdomain [127.0.0.1])
 by proxmox-new.maurer-it.com (Proxmox) with ESMTP id 7943A43EA3
 for <pve-devel@lists.proxmox.com>; Thu, 10 Aug 2023 12:33:25 +0200 (CEST)
From: Christoph Heiss <c.heiss@proxmox.com>
To: pve-devel@lists.proxmox.com
Date: Thu, 10 Aug 2023 12:31:57 +0200
Message-ID: <20230810103158.436341-3-c.heiss@proxmox.com>
X-Mailer: git-send-email 2.41.0
In-Reply-To: <20230810103158.436341-1-c.heiss@proxmox.com>
References: <20230810103158.436341-1-c.heiss@proxmox.com>
MIME-Version: 1.0
Content-Transfer-Encoding: 8bit
X-SPAM-LEVEL: Spam detection results:  0
 AWL -0.043 Adjusted score from AWL reputation of From: address
 BAYES_00                 -1.9 Bayes spam probability is 0 to 1%
 DMARC_MISSING             0.1 Missing DMARC policy
 KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment
 SPF_HELO_NONE           0.001 SPF: HELO does not publish an SPF Record
 SPF_PASS               -0.001 SPF: sender matches SPF record
Subject: [pve-devel] [PATCH installer 2/2] tui: disallow legacy BIOS boot
 from 4Kn disks for all filesystems
X-BeenThere: pve-devel@lists.proxmox.com
X-Mailman-Version: 2.1.29
Precedence: list
List-Id: Proxmox VE development discussion <pve-devel.lists.proxmox.com>
List-Unsubscribe: <https://lists.proxmox.com/cgi-bin/mailman/options/pve-devel>, 
 <mailto:pve-devel-request@lists.proxmox.com?subject=unsubscribe>
List-Archive: <http://lists.proxmox.com/pipermail/pve-devel/>
List-Post: <mailto:pve-devel@lists.proxmox.com>
List-Help: <mailto:pve-devel-request@lists.proxmox.com?subject=help>
List-Subscribe: <https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel>, 
 <mailto:pve-devel-request@lists.proxmox.com?subject=subscribe>
X-List-Received-Date: Thu, 10 Aug 2023 10:33:57 -0000

The GUI installer already has the same rules in place, not allowing to
boot from 4Kn disks when booting in legacy BIOS mode. The TUI installer
currently only checks that for ZFS RAIDs, so extend that check to all
filesystem configurations.

Signed-off-by: Christoph Heiss <c.heiss@proxmox.com>
---
 proxmox-tui-installer/src/main.rs           |   2 +-
 proxmox-tui-installer/src/setup.rs          |   2 +-
 proxmox-tui-installer/src/views/bootdisk.rs | 171 ++++++++------------
 3 files changed, 70 insertions(+), 105 deletions(-)

diff --git a/proxmox-tui-installer/src/main.rs b/proxmox-tui-installer/src/main.rs
index 580cb34..23a4ead 100644
--- a/proxmox-tui-installer/src/main.rs
+++ b/proxmox-tui-installer/src/main.rs
@@ -431,7 +431,7 @@ fn bootdisk_dialog(siv: &mut Cursive) -> InstallerView {
 
     InstallerView::new(
         &state,
-        BootdiskOptionsView::new(&state.runtime_info.disks, &state.options.bootdisk)
+        BootdiskOptionsView::new(siv, &state.runtime_info.disks, &state.options.bootdisk)
             .with_name("bootdisk-options"),
         Box::new(|siv| {
             let options = siv.call_on_name("bootdisk-options", BootdiskOptionsView::get_values);
diff --git a/proxmox-tui-installer/src/setup.rs b/proxmox-tui-installer/src/setup.rs
index dec91cb..46b47cd 100644
--- a/proxmox-tui-installer/src/setup.rs
+++ b/proxmox-tui-installer/src/setup.rs
@@ -392,7 +392,7 @@ pub struct RuntimeInfo {
     pub hvm_supported: bool,
 }
 
-#[derive(Clone, Eq, Deserialize, PartialEq)]
+#[derive(Copy, Clone, Eq, Deserialize, PartialEq)]
 #[serde(rename_all = "lowercase")]
 pub enum BootType {
     Bios,
diff --git a/proxmox-tui-installer/src/views/bootdisk.rs b/proxmox-tui-installer/src/views/bootdisk.rs
index d01495e..dbd13ea 100644
--- a/proxmox-tui-installer/src/views/bootdisk.rs
+++ b/proxmox-tui-installer/src/views/bootdisk.rs
@@ -16,17 +16,18 @@ use crate::{
         FsType, LvmBootdiskOptions, ZfsBootdiskOptions, ZfsRaidLevel, FS_TYPES,
         ZFS_CHECKSUM_OPTIONS, ZFS_COMPRESS_OPTIONS,
     },
-    setup::{BootType, RuntimeInfo},
+    setup::BootType,
 };
 use crate::{setup::ProxmoxProduct, InstallerState};
 
 pub struct BootdiskOptionsView {
     view: LinearLayout,
     advanced_options: Rc<RefCell<BootdiskOptions>>,
+    boot_type: BootType,
 }
 
 impl BootdiskOptionsView {
-    pub fn new(disks: &[Disk], options: &BootdiskOptions) -> Self {
+    pub fn new(siv: &mut Cursive, disks: &[Disk], options: &BootdiskOptions) -> Self {
         let bootdisk_form = FormView::new()
             .child(
                 "Target harddisk",
@@ -53,9 +54,15 @@ impl BootdiskOptionsView {
             .child(DummyView)
             .child(advanced_button);
 
+        let boot_type = siv
+            .user_data::<InstallerState>()
+            .map(|state| state.runtime_info.boot_type)
+            .unwrap_or(BootType::Bios);
+
         Self {
             view,
             advanced_options,
+            boot_type,
         }
     }
 
@@ -74,6 +81,7 @@ impl BootdiskOptionsView {
             options.disks = vec![disk];
         }
 
+        check_disks_4kn_legacy_boot(self.boot_type, &options.disks)?;
         Ok(options)
     }
 }
@@ -168,7 +176,7 @@ impl AdvancedBootdiskOptionsView {
         );
     }
 
-    fn get_values(&mut self, runinfo: &RuntimeInfo) -> Result<BootdiskOptions, String> {
+    fn get_values(&mut self) -> Result<BootdiskOptions, String> {
         let fstype = self
             .view
             .get_child(1)
@@ -198,8 +206,7 @@ impl AdvancedBootdiskOptionsView {
                 .ok_or("Failed to retrieve advanced bootdisk options")?;
 
             if let FsType::Zfs(level) = fstype {
-                check_zfs_raid_config(runinfo, level, &disks)
-                    .map_err(|err| format!("{fstype}: {err}"))?;
+                check_zfs_raid_config(level, &disks).map_err(|err| format!("{fstype}: {err}"))?;
             }
 
             Ok(BootdiskOptions {
@@ -554,17 +561,11 @@ fn advanced_options_view(disks: &[Disk], options: Rc<RefCell<BootdiskOptions>>)
     .button("Ok", {
         let options_ref = options.clone();
         move |siv| {
-            let runinfo = siv
-                .user_data::<InstallerState>()
-                .unwrap()
-                .runtime_info
-                .clone();
-
             let options = siv
                 .call_on_name("advanced-bootdisk-options-dialog", |view: &mut Dialog| {
                     view.get_content_mut()
                         .downcast_mut::<AdvancedBootdiskOptionsView>()
-                        .map(|v| v.get_values(&runinfo))
+                        .map(AdvancedBootdiskOptionsView::get_values)
                 })
                 .flatten();
 
@@ -626,29 +627,33 @@ fn check_raid_min_disks(disks: &[Disk], min: usize) -> Result<(), String> {
     }
 }
 
-/// Checks whether a user-supplied ZFS RAID setup is valid or not, such as disk sizes, minimum
-/// number of disks and legacy BIOS compatibility.
+/// Checks all disks for legacy BIOS boot compatibility and reports an error as appropriate. 4Kn
+/// disks are generally broken with legacy BIOS and cannot be booted from.
 ///
 /// # Arguments
 ///
 /// * `runinfo` - `RuntimeInfo` instance of currently running system
+/// * `disks` - List of disks designated as bootdisk targets.
+fn check_disks_4kn_legacy_boot(boot_type: BootType, disks: &[Disk]) -> Result<(), &str> {
+    let is_blocksize_4096 = |disk: &Disk| disk.block_size.map(|s| s == 4096).unwrap_or(false);
+
+    if boot_type == BootType::Bios && disks.iter().any(is_blocksize_4096) {
+        return Err("Booting from 4Kn drive in legacy BIOS mode is not supported.");
+    }
+
+    Ok(())
+}
+
+/// Checks whether a user-supplied ZFS RAID setup is valid or not, such as disk sizes andminimum
+/// number of disks.
+///
+/// # Arguments
+///
 /// * `level` - The targeted ZFS RAID level by the user.
 /// * `disks` - List of disks designated as RAID targets.
-fn check_zfs_raid_config(
-    runinfo: &RuntimeInfo,
-    level: ZfsRaidLevel,
-    disks: &[Disk],
-) -> Result<(), String> {
+fn check_zfs_raid_config(level: ZfsRaidLevel, disks: &[Disk]) -> Result<(), String> {
     // See also Proxmox/Install.pm:get_zfs_raid_setup()
 
-    for disk in disks {
-        if runinfo.boot_type != BootType::Efi
-            && disk.block_size.map(|v| v == 4096).unwrap_or_default()
-        {
-            return Err("Booting from 4Kn drive in legacy BIOS mode is not supported.".to_owned());
-        }
-    }
-
     let check_mirror_size = |disk1: &Disk, disk2: &Disk| {
         if (disk1.size - disk2.size).abs() > disk1.size / 10. {
             Err(format!(
@@ -719,10 +724,7 @@ fn check_btrfs_raid_config(level: BtrfsRaidLevel, disks: &[Disk]) -> Result<(),
 
 #[cfg(test)]
 mod tests {
-    use std::collections::HashMap;
-
     use super::*;
-    use crate::setup::{Dns, NetworkInfo};
 
     fn dummy_disk(index: usize) -> Disk {
         Disk {
@@ -738,24 +740,6 @@ mod tests {
         (0..num).map(dummy_disk).collect()
     }
 
-    fn dummy_runinfo(boot_type: BootType) -> RuntimeInfo {
-        RuntimeInfo {
-            boot_type,
-            country: Some("at".to_owned()),
-            disks: dummy_disks(4),
-            network: NetworkInfo {
-                dns: Dns {
-                    domain: None,
-                    dns: vec![],
-                },
-                routes: None,
-                interfaces: HashMap::new(),
-            },
-            total_memory: 1024 * 1024 * 1024 * 64,
-            hvm_supported: true,
-        }
-    }
-
     #[test]
     fn duplicate_disks() {
         assert!(check_for_duplicate_disks(&dummy_disks(2)).is_ok());
@@ -780,6 +764,19 @@ mod tests {
         assert!(check_raid_min_disks(&disks, 1).is_ok());
     }
 
+    #[test]
+    fn bios_boot_compat_4kn() {
+        for i in 0..10 {
+            let mut disks = dummy_disks(10);
+            disks[i].block_size = Some(4096);
+
+            // Must fail if /any/ of the disks are 4Kn
+            assert!(check_disks_4kn_legacy_boot(BootType::Bios, &disks).is_err());
+            // For UEFI, we allow it for every configuration
+            assert!(check_disks_4kn_legacy_boot(BootType::Efi, &disks).is_ok());
+        }
+    }
+
     #[test]
     fn btrfs_raid() {
         let disks = dummy_disks(10);
@@ -800,66 +797,34 @@ mod tests {
     }
 
     #[test]
-    fn zfs_raid_bios() {
-        let runinfo = dummy_runinfo(BootType::Bios);
-
-        let mut disks = dummy_disks(10);
-        zfs_common_tests(&disks, &runinfo);
+    fn zfs_raid() {
+        let disks = dummy_disks(10);
 
-        for disk in &mut disks {
-            disk.block_size = None;
-        }
-        // Should behave the same as if an explicit block size of 512 was set
-        zfs_common_tests(&disks, &runinfo);
+        assert!(check_zfs_raid_config(ZfsRaidLevel::Raid0, &[]).is_err());
+        assert!(check_zfs_raid_config(ZfsRaidLevel::Raid0, &disks[..1]).is_ok());
+        assert!(check_zfs_raid_config(ZfsRaidLevel::Raid0, &disks).is_ok());
 
-        for i in 0..10 {
-            let mut disks = dummy_disks(10);
-            disks[i].block_size = Some(4096);
+        assert!(check_zfs_raid_config(ZfsRaidLevel::Raid1, &[]).is_err());
+        assert!(check_zfs_raid_config(ZfsRaidLevel::Raid1, &disks[..2]).is_ok());
+        assert!(check_zfs_raid_config(ZfsRaidLevel::Raid1, &disks).is_ok());
 
-            // Must fail if /any/ of the disks are 4Kn
-            assert!(check_zfs_raid_config(&runinfo, ZfsRaidLevel::Raid0, &disks).is_err());
-            assert!(check_zfs_raid_config(&runinfo, ZfsRaidLevel::Raid1, &disks).is_err());
-            assert!(check_zfs_raid_config(&runinfo, ZfsRaidLevel::Raid10, &disks).is_err());
-            assert!(check_zfs_raid_config(&runinfo, ZfsRaidLevel::RaidZ, &disks).is_err());
-            assert!(check_zfs_raid_config(&runinfo, ZfsRaidLevel::RaidZ2, &disks).is_err());
-            assert!(check_zfs_raid_config(&runinfo, ZfsRaidLevel::RaidZ3, &disks).is_err());
-        }
-    }
+        assert!(check_zfs_raid_config(ZfsRaidLevel::Raid10, &[]).is_err());
+        assert!(check_zfs_raid_config(ZfsRaidLevel::Raid10, &dummy_disks(4)).is_ok());
+        assert!(check_zfs_raid_config(ZfsRaidLevel::Raid10, &disks).is_ok());
 
-    #[test]
-    fn zfs_raid_efi() {
-        let disks = dummy_disks(10);
-        let runinfo = dummy_runinfo(BootType::Efi);
+        assert!(check_zfs_raid_config(ZfsRaidLevel::RaidZ, &[]).is_err());
+        assert!(check_zfs_raid_config(ZfsRaidLevel::RaidZ, &disks[..2]).is_err());
+        assert!(check_zfs_raid_config(ZfsRaidLevel::RaidZ, &disks[..3]).is_ok());
+        assert!(check_zfs_raid_config(ZfsRaidLevel::RaidZ, &disks).is_ok());
 
-        zfs_common_tests(&disks, &runinfo);
-    }
+        assert!(check_zfs_raid_config(ZfsRaidLevel::RaidZ2, &[]).is_err());
+        assert!(check_zfs_raid_config(ZfsRaidLevel::RaidZ2, &disks[..3]).is_err());
+        assert!(check_zfs_raid_config(ZfsRaidLevel::RaidZ2, &disks[..4]).is_ok());
+        assert!(check_zfs_raid_config(ZfsRaidLevel::RaidZ2, &disks).is_ok());
 
-    fn zfs_common_tests(disks: &[Disk], runinfo: &RuntimeInfo) {
-        assert!(check_zfs_raid_config(runinfo, ZfsRaidLevel::Raid0, &[]).is_err());
-        assert!(check_zfs_raid_config(runinfo, ZfsRaidLevel::Raid0, &disks[..1]).is_ok());
-        assert!(check_zfs_raid_config(runinfo, ZfsRaidLevel::Raid0, disks).is_ok());
-
-        assert!(check_zfs_raid_config(runinfo, ZfsRaidLevel::Raid1, &[]).is_err());
-        assert!(check_zfs_raid_config(runinfo, ZfsRaidLevel::Raid1, &disks[..2]).is_ok());
-        assert!(check_zfs_raid_config(runinfo, ZfsRaidLevel::Raid1, disks).is_ok());
-
-        assert!(check_zfs_raid_config(runinfo, ZfsRaidLevel::Raid10, &[]).is_err());
-        assert!(check_zfs_raid_config(runinfo, ZfsRaidLevel::Raid10, &dummy_disks(4)).is_ok());
-        assert!(check_zfs_raid_config(runinfo, ZfsRaidLevel::Raid10, disks).is_ok());
-
-        assert!(check_zfs_raid_config(runinfo, ZfsRaidLevel::RaidZ, &[]).is_err());
-        assert!(check_zfs_raid_config(runinfo, ZfsRaidLevel::RaidZ, &disks[..2]).is_err());
-        assert!(check_zfs_raid_config(runinfo, ZfsRaidLevel::RaidZ, &disks[..3]).is_ok());
-        assert!(check_zfs_raid_config(runinfo, ZfsRaidLevel::RaidZ, disks).is_ok());
-
-        assert!(check_zfs_raid_config(runinfo, ZfsRaidLevel::RaidZ2, &[]).is_err());
-        assert!(check_zfs_raid_config(runinfo, ZfsRaidLevel::RaidZ2, &disks[..3]).is_err());
-        assert!(check_zfs_raid_config(runinfo, ZfsRaidLevel::RaidZ2, &disks[..4]).is_ok());
-        assert!(check_zfs_raid_config(runinfo, ZfsRaidLevel::RaidZ2, disks).is_ok());
-
-        assert!(check_zfs_raid_config(runinfo, ZfsRaidLevel::RaidZ3, &[]).is_err());
-        assert!(check_zfs_raid_config(runinfo, ZfsRaidLevel::RaidZ3, &disks[..4]).is_err());
-        assert!(check_zfs_raid_config(runinfo, ZfsRaidLevel::RaidZ3, &disks[..5]).is_ok());
-        assert!(check_zfs_raid_config(runinfo, ZfsRaidLevel::RaidZ3, disks).is_ok());
+        assert!(check_zfs_raid_config(ZfsRaidLevel::RaidZ3, &[]).is_err());
+        assert!(check_zfs_raid_config(ZfsRaidLevel::RaidZ3, &disks[..4]).is_err());
+        assert!(check_zfs_raid_config(ZfsRaidLevel::RaidZ3, &disks[..5]).is_ok());
+        assert!(check_zfs_raid_config(ZfsRaidLevel::RaidZ3, &disks).is_ok());
     }
 }
-- 
2.41.0