public inbox for pve-devel@lists.proxmox.com
 help / color / mirror / Atom feed
From: Stefan Hanreich <s.hanreich@proxmox.com>
To: pve-devel@lists.proxmox.com
Subject: [pve-devel] [PATCH proxmox-firewall 1/1] firewall: merge management ipset with local_network
Date: Thu, 25 Sep 2025 16:31:18 +0200	[thread overview]
Message-ID: <20250925143119.330179-1-s.hanreich@proxmox.com> (raw)

To override the local_network, which is used in the management ipset,
pve-firewall used a specific alias on datacenter-level
'local_network'. If an ipset called 'management' exists on the
datacenter-level then those entries would additionally get added to
the management ipset.

proxmox-firewall had a different behavior where the alias was ignored
and the management ipset was completely overridden if a custom ipset
was defined in the datacenter-level configuration. This could
potentially lead to users locking themselves out of their PVE instance
if they create a new ipset called 'management' and the firewall daemon
recreated the ruleset while there still weren't any entries in the
ipset. This commit make proxmox-firewall behave like pve-firewall with
regards to management ipset creation.

Signed-off-by: Stefan Hanreich <s.hanreich@proxmox.com>
---
Luckily no reports of this have popped up yet, but found this while
working on the proxmox-firewall in general.

 proxmox-firewall/src/firewall.rs              | 49 ++++++++++++-------
 proxmox-firewall/tests/input/cluster.fw       |  1 +
 .../integration_tests__firewall.snap          | 12 +++++
 3 files changed, 45 insertions(+), 17 deletions(-)

diff --git a/proxmox-firewall/src/firewall.rs b/proxmox-firewall/src/firewall.rs
index 30bc642..690520d 100644
--- a/proxmox-firewall/src/firewall.rs
+++ b/proxmox-firewall/src/firewall.rs
@@ -190,28 +190,41 @@ impl Firewall {
         ]
     }
 
-    fn create_management_ipset(&self, commands: &mut Commands) -> Result<(), Error> {
-        if self.config.cluster().ipsets().get("management").is_none() {
-            log::trace!("auto-generating management ipset");
-
-            let management_ips = HostConfig::management_ips()?;
+    fn create_management_ipset(
+        &self,
+        commands: &mut Commands,
+        table: &TablePart,
+    ) -> Result<(), Error> {
+        let mut management_ipset = Ipset::new(IpsetName::new(IpsetScope::Datacenter, "management"));
 
-            let mut ipset = Ipset::new(IpsetName::new(IpsetScope::Datacenter, "management"));
-            ipset.reserve(management_ips.len());
+        if let Some(config_ipset) = self.config.cluster().ipsets().get("management") {
+            log::trace!("adding custom entries from management ipset");
 
-            let entries = management_ips.into_iter().map(IpsetEntry::from);
+            for entry in config_ipset.iter() {
+                management_ipset.push(entry.clone());
+            }
+        }
 
-            ipset.extend(entries);
+        if let Some(local_network_alias) = self.config.cluster().alias("local_network") {
+            log::trace!("using local_network alias for determining management ips");
 
-            let env = NftObjectEnv {
-                table: &Self::cluster_table(),
-                firewall_config: &self.config,
-                vmid: None,
-            };
+            management_ipset.push(IpsetEntry::from(*local_network_alias.address()));
+        } else {
+            log::trace!("using network configuration for determining management ips");
 
-            commands.append(&mut ipset.to_nft_objects(&env)?);
+            for management_ip in HostConfig::management_ips()? {
+                management_ipset.push(IpsetEntry::from(management_ip));
+            }
         }
 
+        let env = NftObjectEnv {
+            table,
+            firewall_config: &self.config,
+            vmid: None,
+        };
+
+        commands.append(&mut management_ipset.to_nft_objects(&env)?);
+
         Ok(())
     }
 
@@ -248,7 +261,7 @@ impl Firewall {
         if self.config.host().is_enabled() {
             log::info!("creating cluster / host configuration");
 
-            self.create_management_ipset(&mut commands)?;
+            self.create_management_ipset(&mut commands, &cluster_host_table)?;
 
             self.create_ipsets(
                 &mut commands,
@@ -315,6 +328,8 @@ impl Firewall {
         if !(enabled_guests.is_empty() && enabled_bridges.is_empty()) {
             log::info!("creating guest configuration");
 
+            self.create_management_ipset(&mut commands, &guest_table)?;
+
             self.create_ipsets(
                 &mut commands,
                 self.config.cluster().ipsets(),
@@ -776,7 +791,7 @@ impl Firewall {
         };
 
         for (name, ipset) in ipsets {
-            if ipset.ipfilter().is_some() {
+            if ipset.ipfilter().is_some() || ipset.name().name() == "management" {
                 continue;
             }
 
diff --git a/proxmox-firewall/tests/input/cluster.fw b/proxmox-firewall/tests/input/cluster.fw
index 376d2f1..4f50cc1 100644
--- a/proxmox-firewall/tests/input/cluster.fw
+++ b/proxmox-firewall/tests/input/cluster.fw
@@ -7,6 +7,7 @@ enable: 1
 
 network1 172.16.100.0/24
 network2 172.16.200.0/24
+local_network 198.51.100.100/32
 
 [IPSET network1]
 
diff --git a/proxmox-firewall/tests/snapshots/integration_tests__firewall.snap b/proxmox-firewall/tests/snapshots/integration_tests__firewall.snap
index 1a19ea7..127d634 100644
--- a/proxmox-firewall/tests/snapshots/integration_tests__firewall.snap
+++ b/proxmox-firewall/tests/snapshots/integration_tests__firewall.snap
@@ -1600,6 +1600,12 @@ expression: "firewall.full_host_fw().expect(\"firewall can be generated\")"
                 "addr": "127.0.0.1",
                 "len": 8
               }
+            },
+            {
+              "prefix": {
+                "addr": "198.51.100.100",
+                "len": 32
+              }
             }
           ]
         }
@@ -3907,6 +3913,12 @@ expression: "firewall.full_host_fw().expect(\"firewall can be generated\")"
                 "addr": "127.0.0.1",
                 "len": 8
               }
+            },
+            {
+              "prefix": {
+                "addr": "198.51.100.100",
+                "len": 32
+              }
             }
           ]
         }
-- 
2.47.3


_______________________________________________
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


                 reply	other threads:[~2025-09-25 14:30 UTC|newest]

Thread overview: [no followups] expand[flat|nested]  mbox.gz  Atom feed

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250925143119.330179-1-s.hanreich@proxmox.com \
    --to=s.hanreich@proxmox.com \
    --cc=pve-devel@lists.proxmox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox
Service provided by Proxmox Server Solutions GmbH | Privacy | Legal