all lists on lists.proxmox.com
 help / color / mirror / Atom feed
* [pve-devel] [PATCH container 1/1] Signed-off-by: Maurice Klein <klein@aetherus.de>
       [not found] <20260109121049.70740-1-klein@aetherus.de>
@ 2026-01-09 12:10 ` Maurice Klein via pve-devel
       [not found] ` <20260109121049.70740-2-klein@aetherus.de>
  1 sibling, 0 replies; 11+ messages in thread
From: Maurice Klein via pve-devel @ 2026-01-09 12:10 UTC (permalink / raw)
  To: pve-devel; +Cc: Maurice Klein

[-- Attachment #1: Type: message/rfc822, Size: 7577 bytes --]

From: Maurice Klein <klein@aetherus.de>
To: pve-devel@lists.proxmox.com
Subject: [PATCH container 1/1] Signed-off-by: Maurice Klein <klein@aetherus.de>
Date: Fri,  9 Jan 2026 13:10:49 +0100
Message-ID: <20260109121049.70740-2-klein@aetherus.de>

qemu-server: add routed tap and helper scripts
---
 src/PVE/QemuServer.pm         |  9 +++++-
 src/PVE/QemuServer/Network.pm | 19 +++++++++++
 src/usr/pve-tap               | 59 +++++++++++++++++++++++++++++++++++
 src/usr/pve-tap-hotplug       |  3 ++
 src/usr/pve-tapdown           | 16 ++++++++++
 5 files changed, 105 insertions(+), 1 deletion(-)
 create mode 100755 src/usr/pve-tap
 create mode 100755 src/usr/pve-tap-hotplug
 create mode 100755 src/usr/pve-tapdown

diff --git a/src/PVE/QemuServer.pm b/src/PVE/QemuServer.pm
index 69991843..2c0b784e 100644
--- a/src/PVE/QemuServer.pm
+++ b/src/PVE/QemuServer.pm
@@ -1443,8 +1443,15 @@ sub print_netdev_full {
 
     my $netdev = "";
     my $script = $hotplug ? "pve-bridge-hotplug" : "pve-bridge";
+    if ($net->{taprouted}) {
+            $script = $hotplug ? "pve-tap" : "pve-tap-hotplug";
+    }
+
 
-    if ($net->{bridge}) {
+    if ($net->{taprouted}) {
+		    $netdev= "type=tap,id=$netid,ifname=${ifname},script=/usr/libexec/qemu-server/$script"
+            . ",downscript=/usr/libexec/qemu-server/pve-tapdown$vhostparam";
+    } elsif ($net->{bridge}) {
         $netdev = "type=tap,id=$netid,ifname=${ifname},script=/usr/libexec/qemu-server/$script"
             . ",downscript=/usr/libexec/qemu-server/pve-bridgedown$vhostparam";
     } else {
diff --git a/src/PVE/QemuServer/Network.pm b/src/PVE/QemuServer/Network.pm
index eb8222e8..c11f002c 100644
--- a/src/PVE/QemuServer/Network.pm
+++ b/src/PVE/QemuServer/Network.pm
@@ -116,6 +116,25 @@ my $net_fmt = {
             "Force MTU of network device (VirtIO only). Setting to '1' or empty will use the bridge MTU",
         optional => 1,
     },
+    taprouted => {
+        type => 'boolean',
+        description => "routed network, just make tap interface and execute routing script",
+        optional => 1,
+    },
+    hostip => {
+        type => 'string',
+        format => 'ipv4',
+        format_description => 'IPv4Format',
+        description => 'IPv4 address for the host.',
+        optional => 1,
+    },
+    guestip => {
+        type => 'string',
+        format => 'ipv4',
+        format_description => 'GuestIPv4',
+        description => 'IPv4 address for the guest.',
+        optional => 1,
+    },
 };
 
 our $netdesc = {
diff --git a/src/usr/pve-tap b/src/usr/pve-tap
new file mode 100755
index 00000000..10623c17
--- /dev/null
+++ b/src/usr/pve-tap
@@ -0,0 +1,59 @@
+#!/usr/bin/perl
+
+use strict;
+use warnings;
+
+use PVE::Tools qw(run_command);
+use PVE::Firewall;
+
+use PVE::QemuServer::Network;
+
+my $iface = shift;
+
+my $hotplug = 0;
+if ($iface eq '--hotplug') {
+    $hotplug = 1;
+    $iface = shift;
+}
+
+die "no interface specified\n" if !$iface;
+
+die "got strange interface name '$iface'\n"
+    if $iface !~ m/^tap(\d+)i(\d+)$/;
+
+my $vmid = $1;
+my $netid = "net$2";
+
+my $migratedfrom = $hotplug ? undef : $ENV{PVE_MIGRATED_FROM};
+
+my $conf = PVE::QemuConfig->load_config($vmid, $migratedfrom);
+
+my $netconf = $conf->{$netid};
+
+$netconf = $conf->{pending}->{$netid} if !$migratedfrom && defined($conf->{pending}->{$netid});
+
+die "unable to get network config '$netid'\n"
+    if !defined($netconf);
+
+my $net = PVE::QemuServer::Network::parse_net($netconf);
+die "unable to parse network config '$netid'\n" if !$net;
+
+
+# Bring up the tap interface
+run_command(['ip', 'link', 'set', $iface, 'up']);
+#set host ip if specified
+if (defined($net->{hostip})) {
+    run_command(['ip', 'addr', 'add', $net->{hostip}, 'dev', $iface]);
+}
+
+#set route to guest if specified
+if (defined($net->{guestip})) {
+run_command(['ip', 'route', 'add', $net->{guestip}, 'dev', $iface]);
+}
+
+
+
+
+
+
+exit 0;
diff --git a/src/usr/pve-tap-hotplug b/src/usr/pve-tap-hotplug
new file mode 100755
index 00000000..6fcdcd2a
--- /dev/null
+++ b/src/usr/pve-tap-hotplug
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exec /usr/libexec/qemu-server/pve-tap --hotplug "$@"
diff --git a/src/usr/pve-tapdown b/src/usr/pve-tapdown
new file mode 100755
index 00000000..e867b640
--- /dev/null
+++ b/src/usr/pve-tapdown
@@ -0,0 +1,16 @@
+#!/usr/bin/perl
+
+use strict;
+use warnings;
+use PVE::Network;
+
+my $iface = shift;
+
+die "no interface specified\n" if !$iface;
+
+die "got strange interface name '$iface'\n"
+    if $iface !~ m/^tap(\d+)i(\d+)$/;
+
+PVE::Network::tap_unplug($iface);
+
+exit 0;
-- 
2.39.5 (Apple Git-154)



[-- Attachment #2: Type: text/plain, Size: 160 bytes --]

_______________________________________________
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [pve-devel] [PATCH container 1/1] Signed-off-by: Maurice Klein <klein@aetherus.de>
       [not found] ` <20260109121049.70740-2-klein@aetherus.de>
@ 2026-01-19  8:37   ` Maurice Klein via pve-devel
  2026-01-19 14:35     ` Stefan Hanreich
  0 siblings, 1 reply; 11+ messages in thread
From: Maurice Klein via pve-devel @ 2026-01-19  8:37 UTC (permalink / raw)
  To: pve-devel; +Cc: Maurice Klein

[-- Attachment #1: Type: message/rfc822, Size: 8526 bytes --]

From: Maurice Klein <klein@aetherus.de>
To: pve-devel@lists.proxmox.com
Subject: Re: [PATCH container 1/1] Signed-off-by: Maurice Klein <klein@aetherus.de>
Date: Mon, 19 Jan 2026 09:37:05 +0100
Message-ID: <ec4caf97-a205-4586-b371-89caafc27b06@aetherus.de>

Hi,

just a gentle ping on this series.
Happy to rework or adjust anything if I missed something or did 
something the wrong way.

Thanks,
Maurice


Am 09.01.26 um 13:10 schrieb Maurice Klein:
> qemu-server: add routed tap and helper scripts
> ---
> src/PVE/QemuServer.pm | 9 +++++-
> src/PVE/QemuServer/Network.pm | 19 +++++++++++
> src/usr/pve-tap | 59 +++++++++++++++++++++++++++++++++++
> src/usr/pve-tap-hotplug | 3 ++
> src/usr/pve-tapdown | 16 ++++++++++
> 5 files changed, 105 insertions(+), 1 deletion(-)
> create mode 100755 src/usr/pve-tap
> create mode 100755 src/usr/pve-tap-hotplug
> create mode 100755 src/usr/pve-tapdown
>
> diff --git a/src/PVE/QemuServer.pm b/src/PVE/QemuServer.pm
> index 69991843..2c0b784e 100644
> --- a/src/PVE/QemuServer.pm
> +++ b/src/PVE/QemuServer.pm
> @@ -1443,8 +1443,15 @@ sub print_netdev_full {
> my $netdev = "";
> my $script = $hotplug ? "pve-bridge-hotplug" : "pve-bridge";
> + if ($net->{taprouted}) {
> + $script = $hotplug ? "pve-tap" : "pve-tap-hotplug";
> + }
> +
> - if ($net->{bridge}) {
> + if ($net->{taprouted}) {
> + $netdev= 
> "type=tap,id=$netid,ifname=${ifname},script=/usr/libexec/qemu-server/$script"
> + . ",downscript=/usr/libexec/qemu-server/pve-tapdown$vhostparam";
> + } elsif ($net->{bridge}) {
> $netdev = 
> "type=tap,id=$netid,ifname=${ifname},script=/usr/libexec/qemu-server/$script"
> . ",downscript=/usr/libexec/qemu-server/pve-bridgedown$vhostparam";
> } else {
> diff --git a/src/PVE/QemuServer/Network.pm b/src/PVE/QemuServer/Network.pm
> index eb8222e8..c11f002c 100644
> --- a/src/PVE/QemuServer/Network.pm
> +++ b/src/PVE/QemuServer/Network.pm
> @@ -116,6 +116,25 @@ my $net_fmt = {
> "Force MTU of network device (VirtIO only). Setting to '1' or empty 
> will use the bridge MTU",
> optional => 1,
> },
> + taprouted => {
> + type => 'boolean',
> + description => "routed network, just make tap interface and execute 
> routing script",
> + optional => 1,
> + },
> + hostip => {
> + type => 'string',
> + format => 'ipv4',
> + format_description => 'IPv4Format',
> + description => 'IPv4 address for the host.',
> + optional => 1,
> + },
> + guestip => {
> + type => 'string',
> + format => 'ipv4',
> + format_description => 'GuestIPv4',
> + description => 'IPv4 address for the guest.',
> + optional => 1,
> + },
> };
> our $netdesc = {
> diff --git a/src/usr/pve-tap b/src/usr/pve-tap
> new file mode 100755
> index 00000000..10623c17
> --- /dev/null
> +++ b/src/usr/pve-tap
> @@ -0,0 +1,59 @@
> +#!/usr/bin/perl
> +
> +use strict;
> +use warnings;
> +
> +use PVE::Tools qw(run_command);
> +use PVE::Firewall;
> +
> +use PVE::QemuServer::Network;
> +
> +my $iface = shift;
> +
> +my $hotplug = 0;
> +if ($iface eq '--hotplug') {
> + $hotplug = 1;
> + $iface = shift;
> +}
> +
> +die "no interface specified\n" if !$iface;
> +
> +die "got strange interface name '$iface'\n"
> + if $iface !~ m/^tap(\d+)i(\d+)$/;
> +
> +my $vmid = $1;
> +my $netid = "net$2";
> +
> +my $migratedfrom = $hotplug ? undef : $ENV{PVE_MIGRATED_FROM};
> +
> +my $conf = PVE::QemuConfig->load_config($vmid, $migratedfrom);
> +
> +my $netconf = $conf->{$netid};
> +
> +$netconf = $conf->{pending}->{$netid} if !$migratedfrom && 
> defined($conf->{pending}->{$netid});
> +
> +die "unable to get network config '$netid'\n"
> + if !defined($netconf);
> +
> +my $net = PVE::QemuServer::Network::parse_net($netconf);
> +die "unable to parse network config '$netid'\n" if !$net;
> +
> +
> +# Bring up the tap interface
> +run_command(['ip', 'link', 'set', $iface, 'up']);
> +#set host ip if specified
> +if (defined($net->{hostip})) {
> + run_command(['ip', 'addr', 'add', $net->{hostip}, 'dev', $iface]);
> +}
> +
> +#set route to guest if specified
> +if (defined($net->{guestip})) {
> +run_command(['ip', 'route', 'add', $net->{guestip}, 'dev', $iface]);
> +}
> +
> +
> +
> +
> +
> +
> +exit 0;
> diff --git a/src/usr/pve-tap-hotplug b/src/usr/pve-tap-hotplug
> new file mode 100755
> index 00000000..6fcdcd2a
> --- /dev/null
> +++ b/src/usr/pve-tap-hotplug
> @@ -0,0 +1,3 @@
> +#!/bin/sh
> +
> +exec /usr/libexec/qemu-server/pve-tap --hotplug "$@"
> diff --git a/src/usr/pve-tapdown b/src/usr/pve-tapdown
> new file mode 100755
> index 00000000..e867b640
> --- /dev/null
> +++ b/src/usr/pve-tapdown
> @@ -0,0 +1,16 @@
> +#!/usr/bin/perl
> +
> +use strict;
> +use warnings;
> +use PVE::Network;
> +
> +my $iface = shift;
> +
> +die "no interface specified\n" if !$iface;
> +
> +die "got strange interface name '$iface'\n"
> + if $iface !~ m/^tap(\d+)i(\d+)$/;
> +
> +PVE::Network::tap_unplug($iface);
> +
> +exit 0;



[-- Attachment #2: Type: text/plain, Size: 160 bytes --]

_______________________________________________
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [pve-devel] [PATCH container 1/1] Signed-off-by: Maurice Klein <klein@aetherus.de>
  2026-01-19  8:37   ` Maurice Klein via pve-devel
@ 2026-01-19 14:35     ` Stefan Hanreich
  2026-01-21 19:04       ` Maurice Klein via pve-devel
       [not found]       ` <d18928a0-6ab0-4e90-ad3a-0674bbdedb72@aetherus.de>
  0 siblings, 2 replies; 11+ messages in thread
From: Stefan Hanreich @ 2026-01-19 14:35 UTC (permalink / raw)
  To: pve-devel

Hi!

Thanks for your contribution, did you already check out our guidelines
[1] [2] and send a signed CLA? Without it, we cannot accept any
contributions.


I've looked at the proposal, but I wanted to take some time to think
more about the general concept. It seems like you want to build
something similar to current Kubernetes networking solutions that
utilize BGP but without the whole EVPN / VXLAN stuff?

Maybe it'd make more sense to discuss about how we could improve the
EVPN zone or SDN in general to make such setups easier - potentially a
new zone that is something of an inbetween of the simple zone and EVPN
zone could make sense. In any case, I think removing the bridge and
implementing it this way is the wrong way to go about this.

[1] https://www.proxmox.com/en/about/open-source/developers
[2] https://pve.proxmox.com/wiki/Developer_Documentation


_______________________________________________
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [pve-devel] [PATCH container 1/1] Signed-off-by: Maurice Klein <klein@aetherus.de>
  2026-01-19 14:35     ` Stefan Hanreich
@ 2026-01-21 19:04       ` Maurice Klein via pve-devel
       [not found]       ` <d18928a0-6ab0-4e90-ad3a-0674bbdedb72@aetherus.de>
  1 sibling, 0 replies; 11+ messages in thread
From: Maurice Klein via pve-devel @ 2026-01-21 19:04 UTC (permalink / raw)
  To: pve-devel; +Cc: Maurice Klein

[-- Attachment #1: Type: message/rfc822, Size: 6320 bytes --]

From: Maurice Klein <klein@aetherus.de>
To: pve-devel@lists.proxmox.com
Subject: Re: [pve-devel] [PATCH container 1/1] Signed-off-by: Maurice Klein <klein@aetherus.de>
Date: Wed, 21 Jan 2026 20:04:48 +0100
Message-ID: <d18928a0-6ab0-4e90-ad3a-0674bbdedb72@aetherus.de>

Hi,

Thanks for getting back to me!
I did sign a CLA and it is on record since 12.01.

I agree it would work nicely as a SDN plugin and I was also considering
that approach.
The problem I saw with that is that SDN relies on there being a bridge
for every zone and making it work without one seems to be a huge refactor.
Do you think the bridge should not be removed at all, even for a pure l3
routed setup?
It could also work with one bridge per guest but that would, in my
opinion, bring unnecessary overhead.

The motivation on my side comes from setups where L2 between guests is
not required at all, and where using routing protocols (OSPF, IS-IS, 
BGP) to the hosts
simplifies redundancy and failure handling significantly.

I'd love to get a conversation going on how something could be
implemented, and what would be the best way to go about it.



Mit freundlichen Grüßen,

Maurice Klein

Aetherus

TEL: 0212 7846460
Mail:Klein@aetherus.de


Am 19.01.26 um 15:35 schrieb Stefan Hanreich:
> Hi!
>
> Thanks for your contribution, did you already check out our guidelines
> [1] [2] and send a signed CLA? Without it, we cannot accept any
> contributions.
>
>
> I've looked at the proposal, but I wanted to take some time to think
> more about the general concept. It seems like you want to build
> something similar to current Kubernetes networking solutions that
> utilize BGP but without the whole EVPN / VXLAN stuff?
>
> Maybe it'd make more sense to discuss about how we could improve the
> EVPN zone or SDN in general to make such setups easier - potentially a
> new zone that is something of an inbetween of the simple zone and EVPN
> zone could make sense. In any case, I think removing the bridge and
> implementing it this way is the wrong way to go about this.
>
> [1]https://www.proxmox.com/en/about/open-source/developers
> [2]https://pve.proxmox.com/wiki/Developer_Documentation
>
>
> _______________________________________________
> pve-devel mailing list
> pve-devel@lists.proxmox.com
> https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel
>



[-- Attachment #2: Type: text/plain, Size: 160 bytes --]

_______________________________________________
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [pve-devel] [PATCH container 1/1] Signed-off-by: Maurice Klein <klein@aetherus.de>
       [not found]       ` <d18928a0-6ab0-4e90-ad3a-0674bbdedb72@aetherus.de>
@ 2026-01-27 10:02         ` Stefan Hanreich
  2026-01-27 10:37           ` Maurice Klein via pve-devel
       [not found]           ` <321bd4ff-f147-4329-9788-50061d569fa6@aetherus.de>
  0 siblings, 2 replies; 11+ messages in thread
From: Stefan Hanreich @ 2026-01-27 10:02 UTC (permalink / raw)
  To: Maurice Klein, pve-devel

On 1/21/26 8:04 PM, Maurice Klein wrote:

Some thoughts below from my side, but I'm still unsure on what would be
the best approach for this.

> I agree it would work nicely as a SDN plugin and I was also considering
> that approach.
> The problem I saw with that is that SDN relies on there being a bridge
> for every zone and making it work without one seems to be a huge refactor.
> Do you think the bridge should not be removed at all, even for a pure l3
> routed setup?

That would be one reason, but there are others. The gateway IP only
needs to be configured once on the bridge / vnet itself then, whereas it
needs to be specified explicitly for every guest with your approach.
You'd most likely also need to generate a MAC address that is the same
for the GW on all PVE hosts, so VM mobility works properly. With tap
interfaces that is even more complicated as you'd need to handle setting
the MAC for each tap interface. It's cleaner and simpler that way imo,
since you can just set up the gateway once and be done.

A simple zone with port isolation is already quite similar to what
you're trying to achieve imo. It denies L2 connectivity between guests
via the isolated flag [1] on bridge members and the PVE node acts as a
router for the zone. I think that could be used as a starting point and
then build upon it. Simple zones have IPAM support, so we could utilize
that for managing the guest IPs. It would probably also make sense to
manage neighbor / fdb table entries statically for this kind of setup.

[1] https://man7.org/linux/man-pages/man8/bridge.8.html


_______________________________________________
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [pve-devel] [PATCH container 1/1] Signed-off-by: Maurice Klein <klein@aetherus.de>
  2026-01-27 10:02         ` Stefan Hanreich
@ 2026-01-27 10:37           ` Maurice Klein via pve-devel
       [not found]           ` <321bd4ff-f147-4329-9788-50061d569fa6@aetherus.de>
  1 sibling, 0 replies; 11+ messages in thread
From: Maurice Klein via pve-devel @ 2026-01-27 10:37 UTC (permalink / raw)
  To: Stefan Hanreich, pve-devel; +Cc: Maurice Klein

[-- Attachment #1: Type: message/rfc822, Size: 7343 bytes --]

From: Maurice Klein <klein@aetherus.de>
To: Stefan Hanreich <s.hanreich@proxmox.com>, pve-devel@lists.proxmox.com
Subject: Re: [pve-devel] [PATCH container 1/1] Signed-off-by: Maurice Klein <klein@aetherus.de>
Date: Tue, 27 Jan 2026 11:37:00 +0100
Message-ID: <321bd4ff-f147-4329-9788-50061d569fa6@aetherus.de>

Hi,

I didn't even think about mac address, since arp does recover quickly 
but it would be a unwanted interruption.

I like the idea with using a bridge with l2 isolation.
I'd like to implement the zone then, with the use of a l2 isolated bridge.

I'd propose the first step would be to get the zone working, to get the 
mechanism working to add host routes.
I would use one VRF then per zone.

A proposed name would be "Routed".
Do you have better Ideas?

A roadmap I'd have in my head then would look the following way:

- implement zone "Routed"
    ensuring that all routing between guests and host works and that 
default routes get put in the vrf as well

- implement possibility to export host routes of routed zones via BGP

- implement possibility to add static routes per Routed zone, like 
different default routes or others

- implement dynamic routing updates into routed zones vrf tables

Where I'm still not sure is how to get routing in a cluster running 
between the same zone.
It could be implemented via IBGP Sessions between the hosts, but i don't 
know if that is the preffered way and it needs to be clear to the user 
which path will be taken and how it works.


Let me know if that is a way you agree with and if it is i would like to 
get started implementing the first step.


Mit freundlichen Grüßen,

Maurice Klein

Aetherus

TEL: 0212 7846460
Mail: Klein@aetherus.de


Am 27.01.26 um 11:02 schrieb Stefan Hanreich:
> On 1/21/26 8:04 PM, Maurice Klein wrote:
>
> Some thoughts below from my side, but I'm still unsure on what would be
> the best approach for this.
>
>> I agree it would work nicely as a SDN plugin and I was also considering
>> that approach.
>> The problem I saw with that is that SDN relies on there being a bridge
>> for every zone and making it work without one seems to be a huge refactor.
>> Do you think the bridge should not be removed at all, even for a pure l3
>> routed setup?
> That would be one reason, but there are others. The gateway IP only
> needs to be configured once on the bridge / vnet itself then, whereas it
> needs to be specified explicitly for every guest with your approach.
> You'd most likely also need to generate a MAC address that is the same
> for the GW on all PVE hosts, so VM mobility works properly. With tap
> interfaces that is even more complicated as you'd need to handle setting
> the MAC for each tap interface. It's cleaner and simpler that way imo,
> since you can just set up the gateway once and be done.
>
> A simple zone with port isolation is already quite similar to what
> you're trying to achieve imo. It denies L2 connectivity between guests
> via the isolated flag [1] on bridge members and the PVE node acts as a
> router for the zone. I think that could be used as a starting point and
> then build upon it. Simple zones have IPAM support, so we could utilize
> that for managing the guest IPs. It would probably also make sense to
> manage neighbor / fdb table entries statically for this kind of setup.
>
> [1] https://man7.org/linux/man-pages/man8/bridge.8.html
>



[-- Attachment #2: Type: text/plain, Size: 160 bytes --]

_______________________________________________
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [pve-devel] [PATCH container 1/1] Signed-off-by: Maurice Klein <klein@aetherus.de>
       [not found]           ` <321bd4ff-f147-4329-9788-50061d569fa6@aetherus.de>
@ 2026-01-29 12:20             ` Stefan Hanreich
  2026-02-01 14:32               ` Maurice Klein
  0 siblings, 1 reply; 11+ messages in thread
From: Stefan Hanreich @ 2026-01-29 12:20 UTC (permalink / raw)
  To: Maurice Klein, pve-devel

On 1/27/26 11:36 AM, Maurice Klein wrote:
> Hi,
> 
> I didn't even think about mac address, since arp does recover quickly
> but it would be a unwanted interruption.
> 
> I like the idea with using a bridge with l2 isolation.
> I'd like to implement the zone then, with the use of a l2 isolated bridge.
> 
> I'd propose the first step would be to get the zone working, to get the
> mechanism working to add host routes.
> I would use one VRF then per zone.

For a PoC we could just utilize the default routing table I think? VRF
support for SDN entities is something that is on the mid-term roadmap,
so if we want VRF support we'd need to be careful not to put any
barriers in that make implementing that feature harder. We'd also have
to make sure that VRFs stay unique across all entities that are using
them (currently only EVPN zone). If we generate the names analogous to
the EVPN zones we should be fine since all zones regardless of type
share the same ID space.

With multiple VRFs, you'd ideally want to have the option to announce
the routes per-VRF. Utilizing transit VLANs for zones would make sense
then and also being able to map each VNet in a zone to a VLAN. That'd
require creating VLAN subinterfaces inside a VRF as well via our stack
and adding them to a VNet (might be done automatically) / VRF. At that
point we're building essentially what is VRF-lite though, so that's a
bit more involved.

> A proposed name would be "Routed".
> Do you have better Ideas?
> 
> A roadmap I'd have in my head then would look the following way:
> 
> - implement zone "Routed"
>    ensuring that all routing between guests and host works and that
> default routes get put in the vrf as well

I'd not necessarily put in the default route unconditionally, either as
a config option, or - if we skip VRF support for now - we'd get this for
free via implementing the VRF feature afterwards.

> - implement possibility to export host routes of routed zones via BGP

see below

> - implement possibility to add static routes per Routed zone, like
> different default routes or others

we would get that via VRF support as well

> - implement dynamic routing updates into routed zones vrf tables
> 
> Where I'm still not sure is how to get routing in a cluster running
> between the same zone.
> It could be implemented via IBGP Sessions between the hosts, but i don't
> know if that is the preffered way and it needs to be clear to the user
> which path will be taken and how it works.

One option would be to utilize e.g. the fabrics where we plan to add
initial support for route redistribution soon. This should get
integrated with a future VRF feature as well. That would decouple the
zone from the redistribution / announcing itself and give users
flexibility in choosing their routing protocol. Users can create
fabrics, then select which VRFs to announce via them (and apply filters
via route-maps).




^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [pve-devel] [PATCH container 1/1] Signed-off-by: Maurice Klein <klein@aetherus.de>
  2026-01-29 12:20             ` Stefan Hanreich
@ 2026-02-01 14:32               ` Maurice Klein
  2026-02-06  8:23                 ` Stefan Hanreich
  0 siblings, 1 reply; 11+ messages in thread
From: Maurice Klein @ 2026-02-01 14:32 UTC (permalink / raw)
  To: Stefan Hanreich, pve-devel

So i was looking into implementation today.

Unfortunately I found some points where I'm kinda stuck with the current 
way SDN is designed.

I'm hoping you have some ideas or advise how to continue.

Basicly the vnet and subnet part I see as in issue.
Since in this kind of setup there is no defined subsets required the 
current configuration doesn't fully make sense.
I guess you could still have a subnet configuration and configure all 
the host addresses inside that subnet, but it's not really necessary .

Every VM route would be a /32 route and also the configured address on 
that bridge (gateway field) would be a /32.
When the tap interface of a vm gets plugged a route needs to be created.
Routes per VM get created with the comand ip route add 192.168.1.5/32 
dev routedbridge.
The /32 gateway address needs to be configured on the bridge as well.

There needs to be some way to configure the guests IPs as well, but in 
ipam there is currently no way to set a ip for a vm, it's only ip mac 
bindings.

A potential security flaw is also devices on that bridge can steal a 
configured ip by just replying to arp.
That could be mitigated by disabling bridge learning and creating static 
atp entires as well for those configured IPs.

I don't know yet what's the best way to configure guest ip addresses.
My initial idea was to have that done on a per interface vm level, that 
is incompatible with sdn though.
I don't know how it could be integrated in IPAM though, especially 
considering that pve IPAM isn't the only one available to be used.




Mit freundlichen Grüßen,

Maurice Klein

Aetherus

TEL: 0212 7846460
Mail: Klein@aetherus.de


Am 29.01.26 um 13:20 schrieb Stefan Hanreich:
> On 1/27/26 11:36 AM, Maurice Klein wrote:
>> Hi,
>>
>> I didn't even think about mac address, since arp does recover quickly
>> but it would be a unwanted interruption.
>>
>> I like the idea with using a bridge with l2 isolation.
>> I'd like to implement the zone then, with the use of a l2 isolated bridge.
>>
>> I'd propose the first step would be to get the zone working, to get the
>> mechanism working to add host routes.
>> I would use one VRF then per zone.
> For a PoC we could just utilize the default routing table I think? VRF
> support for SDN entities is something that is on the mid-term roadmap,
> so if we want VRF support we'd need to be careful not to put any
> barriers in that make implementing that feature harder. We'd also have
> to make sure that VRFs stay unique across all entities that are using
> them (currently only EVPN zone). If we generate the names analogous to
> the EVPN zones we should be fine since all zones regardless of type
> share the same ID space.
>
> With multiple VRFs, you'd ideally want to have the option to announce
> the routes per-VRF. Utilizing transit VLANs for zones would make sense
> then and also being able to map each VNet in a zone to a VLAN. That'd
> require creating VLAN subinterfaces inside a VRF as well via our stack
> and adding them to a VNet (might be done automatically) / VRF. At that
> point we're building essentially what is VRF-lite though, so that's a
> bit more involved.
>
>> A proposed name would be "Routed".
>> Do you have better Ideas?
>>
>> A roadmap I'd have in my head then would look the following way:
>>
>> - implement zone "Routed"
>>     ensuring that all routing between guests and host works and that
>> default routes get put in the vrf as well
> I'd not necessarily put in the default route unconditionally, either as
> a config option, or - if we skip VRF support for now - we'd get this for
> free via implementing the VRF feature afterwards.
>
>> - implement possibility to export host routes of routed zones via BGP
> see below
>
>> - implement possibility to add static routes per Routed zone, like
>> different default routes or others
> we would get that via VRF support as well
>
>> - implement dynamic routing updates into routed zones vrf tables
>>
>> Where I'm still not sure is how to get routing in a cluster running
>> between the same zone.
>> It could be implemented via IBGP Sessions between the hosts, but i don't
>> know if that is the preffered way and it needs to be clear to the user
>> which path will be taken and how it works.
> One option would be to utilize e.g. the fabrics where we plan to add
> initial support for route redistribution soon. This should get
> integrated with a future VRF feature as well. That would decouple the
> zone from the redistribution / announcing itself and give users
> flexibility in choosing their routing protocol. Users can create
> fabrics, then select which VRFs to announce via them (and apply filters
> via route-maps).
>




^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [pve-devel] [PATCH container 1/1] Signed-off-by: Maurice Klein <klein@aetherus.de>
  2026-02-01 14:32               ` Maurice Klein
@ 2026-02-06  8:23                 ` Stefan Hanreich
  2026-02-06 11:22                   ` Maurice Klein
  0 siblings, 1 reply; 11+ messages in thread
From: Stefan Hanreich @ 2026-02-06  8:23 UTC (permalink / raw)
  To: Maurice Klein, pve-devel

On 2/1/26 3:31 PM, Maurice Klein wrote:
> Basicly the vnet and subnet part I see as in issue.
> Since in this kind of setup there is no defined subsets required the
> current configuration doesn't fully make sense.
> I guess you could still have a subnet configuration and configure all
> the host addresses inside that subnet, but it's not really necessary .
> Every VM route would be a /32 route and also the configured address on
> that bridge (gateway field) would be a /32.

We would still need a local IP on the PVE host that acts as a gateway
and preferably an IP for the VM inside the subnet so you can route the
traffic for the /32 IPs there. So we'd need to configure e.g.
192.0.2.0/24 as subnet, then have the host as gateway (e.g. 192.0.2.1)
and each VM gets an IP inside that subnet (which could automatically be
handled via IPAM / DHCP). Looking at other implementations (e.g.
kube-router) there's even a whole subnet pool and each node gets one
subnet from that pool - but that's easier done with containers than VMs,
so I think the approach with one shared subnet seems easier
(particularly for VM mobility).


> When the tap interface of a vm gets plugged a route needs to be created.
> Routes per VM get created with the comand ip route add 192.168.1.5/32
> dev routedbridge.
> The /32 gateway address needs to be configured on the bridge as well.

This could be done in in the respective tap_plug / veth_create functions
inside pve-network [1]. You can override them on a per-zone basis so
that would fit right in. We'd have to implement analogous functions for
teardown though so we can remove the routes when updating / deleting the
tap / veth.

Someone has actually implemented a quite similar thing via utilizing
hooks and a dedicated config files for each VM - see [2]. They're using
IPv6-LL addresses though (which I would personally also prefer), but I'm
unsure how it would work with windows guests for instance and it might
be weird / unintuitive for some users (see my previous mail).


> There needs to be some way to configure the guests IPs as well, but in
> ipam there is currently no way to set a ip for a vm, it's only ip mac
> bindings.

That's imo the real question left, where to store the additional IPs.
Zone config is awkward, PVE IPAM might be workable with introducing
additional fields (and, for a PoC we could just deny using any other
IPAM plugin than that and implement it later).

Network device is probably the best bet, since we can then utilize the
hotplug code in case an IP gets reassigned, which would be more
complicated with the other approaches. The only reason why I'm reluctant
is because we're introducing a property there that is specific to one
particular SDN zone and unused by everything else.


> A potential security flaw is also devices on that bridge can steal a
> configured ip by just replying to arp.
> That could be mitigated by disabling bridge learning and creating static
> atp entires as well for those configured IPs.

That setting should be exposed in the zone configuration and probably be
on by default. There's also always the option of using IP / MAC filters
in the firewall although the static fdb / neighbor table approach is
preferable imo.


[0] https://docs.cilium.io/en/stable/network/lb-ipam/#requesting-ips
[1]
https://git.proxmox.com/?p=pve-network.git;a=blob;f=src/PVE/Network/SDN/Zones.pm;h=4da94580e07d6b3dcb794f19ce9335412fa7bc41;hb=HEAD#l298
[2] https://siewert.io/posts/2022/announce-proxmox-vm-ips-via-bgp-1/




^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [pve-devel] [PATCH container 1/1] Signed-off-by: Maurice Klein <klein@aetherus.de>
  2026-02-06  8:23                 ` Stefan Hanreich
@ 2026-02-06 11:22                   ` Maurice Klein
  0 siblings, 0 replies; 11+ messages in thread
From: Maurice Klein @ 2026-02-06 11:22 UTC (permalink / raw)
  To: Stefan Hanreich, pve-devel

Am 06.02.26 um 09:23 schrieb Stefan Hanreich:
> On 2/1/26 3:31 PM, Maurice Klein wrote:
>> Basicly the vnet and subnet part I see as in issue.
>> Since in this kind of setup there is no defined subsets required the
>> current configuration doesn't fully make sense.
>> I guess you could still have a subnet configuration and configure all
>> the host addresses inside that subnet, but it's not really necessary .
>> Every VM route would be a /32 route and also the configured address on
>> that bridge (gateway field) would be a /32.
> We would still need a local IP on the PVE host that acts as a gateway
> and preferably an IP for the VM inside the subnet so you can route the
> traffic for the /32 IPs there. So we'd need to configure e.g.
> 192.0.2.0/24 as subnet, then have the host as gateway (e.g. 192.0.2.1)
> and each VM gets an IP inside that subnet (which could automatically be
> handled via IPAM / DHCP). Looking at other implementations (e.g.
> kube-router) there's even a whole subnet pool and each node gets one
> subnet from that pool - but that's easier done with containers than VMs,
> so I think the approach with one shared subnet seems easier
> (particularly for VM mobility).

I think I didn't explain properly about that.
Basically the whole Idea is to have a gateway IP like 192.0.2.1/32 on 
the pve host on that bridge and not have a /24 or so route then.
Guests then also have addresses whatever they might look like.
For example a guest could have 1.1.1.1/32 but usually always /32, 
although I guess for some use cases it could be beneficial to be able to 
have a guest that gets more then a /32 but let's put that aside for now.
Now there is no need/reason to define which subnet a guest is on and no 
need to be in the same with the host.

The guest would configure it's ip statically inside and it would be a 
/32 usually.

Now on the pve a host route to 1.1.1.1/32 would be added by the 
following comand:
ip route add 1.1.1.1/32 dev bridgetest

Guest configuration would look like this (simpliefied and shortend):
eth0: <BROADCAST,MULTICAST,UP,LOWER_UP>
     inet 1.1.1.1/32

Kernel IP routing table
Destination     Gateway         Genmask         Flags Metric Ref Use Iface
0.0.0.0         192.0.2.1          0.0.0.0         UG       1.00 0      
   0 eth0

Now the biggest thing this enables us to do is in pve clusters if we 
build for example a ibgp full mesh the routes get shared.
There could be any topology now and routing would adapt.
just as an example while that is a shity topology it can illustrate the 
point.:

       GW-1        GW-2
         | \        / |
         |  \      /  |
         |   \    /   |
        pve1--pve3
            \      /
             \    /
              pve2

Any pve can fail and there would still be everything reachable.
Always the shortest path will be chosen.
Any link can Fail.
Any Gateway can Fail.
Even multiple links failing is ok.
No chance for loops because every link is p2p.
Much like at the full mesh ceph setup with ospf or openfabric.

That can be archived with evpn/vxlan and anycast gateway and multiple 
exit nodes.
Problem is the complexity and by giving bigger routes then /24 to 
gateways they will not always use the optimal path thus increasing 
latancy and putting unnesisary routing load on hosts where the vm isn't 
living right now.
And all that to have one L2 domain which often brings more disadvantages 
then advantages.

I hope I explained it well now, if not feel free to ask anything, I 
could also provide some bigger documentation with screenshots of everything.




>
>
>> When the tap interface of a vm gets plugged a route needs to be created.
>> Routes per VM get created with the comand ip route add 192.168.1.5/32
>> dev routedbridge.
>> The /32 gateway address needs to be configured on the bridge as well.
> This could be done in in the respective tap_plug / veth_create functions
> inside pve-network [1]. You can override them on a per-zone basis so
> that would fit right in. We'd have to implement analogous functions for
> teardown though so we can remove the routes when updating / deleting the
> tap / veth.
>
> Someone has actually implemented a quite similar thing via utilizing
> hooks and a dedicated config files for each VM - see [2]. They're using
> IPv6-LL addresses though (which I would personally also prefer), but I'm
> unsure how it would work with windows guests for instance and it might
> be weird / unintuitive for some users (see my previous mail).
Yeah, sounds good.
IPv6 Support needs to be implemented as well for all of this, I'm just 
starting with v4.

>
>
>> There needs to be some way to configure the guests IPs as well, but in
>> ipam there is currently no way to set a ip for a vm, it's only ip mac
>> bindings.
> That's imo the real question left, where to store the additional IPs.
> Zone config is awkward, PVE IPAM might be workable with introducing
> additional fields (and, for a PoC we could just deny using any other
> IPAM plugin than that and implement it later).
>
> Network device is probably the best bet, since we can then utilize the
> hotplug code in case an IP gets reassigned, which would be more
> complicated with the other approaches. The only reason why I'm reluctant
> is because we're introducing a property there that is specific to one
> particular SDN zone and unused by everything else.

I also feel like it would make sense in the network device, since it is 
part of specific configuration for that vm but I get why you are 
reluctant to that.
This honestly makes me reconsider the sdn approach a little bit.
I have an Idea here that could be something workable.
What if we add a field not saying guest ip, what if we instead call id 
routes.
Essentially that is what it is and might have extra use cases apart from 
what I'm trying to archive.
That way for this use case you can use those fields to add the needed 
/32 host routes.
It wouldn't be specific to the sdn feature we build.
The SDN feature could then be more about configuring the bridge with the 
right addresses and fetures and enable us to later distribute the routes 
via bgp and other ways.
I looked into the hotplug scenarios as well and that way those would be 
solved.

>
>> A potential security flaw is also devices on that bridge can steal a
>> configured ip by just replying to arp.
>> That could be mitigated by disabling bridge learning and creating static
>> atp entires as well for those configured IPs.
> That setting should be exposed in the zone configuration and probably be
> on by default. There's also always the option of using IP / MAC filters
> in the firewall although the static fdb / neighbor table approach is
> preferable imo.

Perfect, I'm on the same page.
Implementing it in fdb / neighbor also ensures that crucial feature is 
there for users with firewall disabled.
>
> [0] https://docs.cilium.io/en/stable/network/lb-ipam/#requesting-ips
> [1]
> https://git.proxmox.com/?p=pve-network.git;a=blob;f=src/PVE/Network/SDN/Zones.pm;h=4da94580e07d6b3dcb794f19ce9335412fa7bc41;hb=HEAD#l298
> [2] https://siewert.io/posts/2022/announce-proxmox-vm-ips-via-bgp-1/
>




^ permalink raw reply	[flat|nested] 11+ messages in thread

* [pve-devel] [PATCH container 1/1] Signed-off-by: Maurice Klein <klein@aetherus.de>
       [not found] <20260109124514.72991-1-klein@aetherus.de>
@ 2026-01-09 12:45 ` Maurice Klein via pve-devel
  0 siblings, 0 replies; 11+ messages in thread
From: Maurice Klein via pve-devel @ 2026-01-09 12:45 UTC (permalink / raw)
  To: pve-devel; +Cc: Maurice Klein

[-- Attachment #1: Type: message/rfc822, Size: 8366 bytes --]

From: Maurice Klein <klein@aetherus.de>
To: pve-devel@lists.proxmox.com
Subject: [PATCH container 1/1] Signed-off-by: Maurice Klein <klein@aetherus.de>
Date: Fri,  9 Jan 2026 13:45:14 +0100
Message-ID: <20260109124514.72991-2-klein@aetherus.de>

qemu-server: add routed tap and helper scripts
---
 src/PVE/QemuServer.pm         |  9 +++++-
 src/PVE/QemuServer/Network.pm | 19 +++++++++++
 src/usr/pve-tap               | 59 +++++++++++++++++++++++++++++++++++
 src/usr/pve-tap-hotplug       |  3 ++
 src/usr/pve-tapdown           | 16 ++++++++++
 5 files changed, 105 insertions(+), 1 deletion(-)
 create mode 100755 src/usr/pve-tap
 create mode 100755 src/usr/pve-tap-hotplug
 create mode 100755 src/usr/pve-tapdown

diff --git a/src/PVE/QemuServer.pm b/src/PVE/QemuServer.pm
index 69991843..2c0b784e 100644
--- a/src/PVE/QemuServer.pm
+++ b/src/PVE/QemuServer.pm
@@ -1443,8 +1443,15 @@ sub print_netdev_full {
 
     my $netdev = "";
     my $script = $hotplug ? "pve-bridge-hotplug" : "pve-bridge";
+    if ($net->{taprouted}) {
+            $script = $hotplug ? "pve-tap" : "pve-tap-hotplug";
+    }
+
 
-    if ($net->{bridge}) {
+    if ($net->{taprouted}) {
+		    $netdev= "type=tap,id=$netid,ifname=${ifname},script=/usr/libexec/qemu-server/$script"
+            . ",downscript=/usr/libexec/qemu-server/pve-tapdown$vhostparam";
+    } elsif ($net->{bridge}) {
         $netdev = "type=tap,id=$netid,ifname=${ifname},script=/usr/libexec/qemu-server/$script"
             . ",downscript=/usr/libexec/qemu-server/pve-bridgedown$vhostparam";
     } else {
diff --git a/src/PVE/QemuServer/Network.pm b/src/PVE/QemuServer/Network.pm
index eb8222e8..c11f002c 100644
--- a/src/PVE/QemuServer/Network.pm
+++ b/src/PVE/QemuServer/Network.pm
@@ -116,6 +116,25 @@ my $net_fmt = {
             "Force MTU of network device (VirtIO only). Setting to '1' or empty will use the bridge MTU",
         optional => 1,
     },
+    taprouted => {
+        type => 'boolean',
+        description => "routed network, just make tap interface and execute routing script",
+        optional => 1,
+    },
+    hostip => {
+        type => 'string',
+        format => 'ipv4',
+        format_description => 'IPv4Format',
+        description => 'IPv4 address for the host.',
+        optional => 1,
+    },
+    guestip => {
+        type => 'string',
+        format => 'ipv4',
+        format_description => 'GuestIPv4',
+        description => 'IPv4 address for the guest.',
+        optional => 1,
+    },
 };
 
 our $netdesc = {
diff --git a/src/usr/pve-tap b/src/usr/pve-tap
new file mode 100755
index 00000000..10623c17
--- /dev/null
+++ b/src/usr/pve-tap
@@ -0,0 +1,59 @@
+#!/usr/bin/perl
+
+use strict;
+use warnings;
+
+use PVE::Tools qw(run_command);
+use PVE::Firewall;
+
+use PVE::QemuServer::Network;
+
+my $iface = shift;
+
+my $hotplug = 0;
+if ($iface eq '--hotplug') {
+    $hotplug = 1;
+    $iface = shift;
+}
+
+die "no interface specified\n" if !$iface;
+
+die "got strange interface name '$iface'\n"
+    if $iface !~ m/^tap(\d+)i(\d+)$/;
+
+my $vmid = $1;
+my $netid = "net$2";
+
+my $migratedfrom = $hotplug ? undef : $ENV{PVE_MIGRATED_FROM};
+
+my $conf = PVE::QemuConfig->load_config($vmid, $migratedfrom);
+
+my $netconf = $conf->{$netid};
+
+$netconf = $conf->{pending}->{$netid} if !$migratedfrom && defined($conf->{pending}->{$netid});
+
+die "unable to get network config '$netid'\n"
+    if !defined($netconf);
+
+my $net = PVE::QemuServer::Network::parse_net($netconf);
+die "unable to parse network config '$netid'\n" if !$net;
+
+
+# Bring up the tap interface
+run_command(['ip', 'link', 'set', $iface, 'up']);
+#set host ip if specified
+if (defined($net->{hostip})) {
+    run_command(['ip', 'addr', 'add', $net->{hostip}, 'dev', $iface]);
+}
+
+#set route to guest if specified
+if (defined($net->{guestip})) {
+run_command(['ip', 'route', 'add', $net->{guestip}, 'dev', $iface]);
+}
+
+
+
+
+
+
+exit 0;
diff --git a/src/usr/pve-tap-hotplug b/src/usr/pve-tap-hotplug
new file mode 100755
index 00000000..6fcdcd2a
--- /dev/null
+++ b/src/usr/pve-tap-hotplug
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exec /usr/libexec/qemu-server/pve-tap --hotplug "$@"
diff --git a/src/usr/pve-tapdown b/src/usr/pve-tapdown
new file mode 100755
index 00000000..e867b640
--- /dev/null
+++ b/src/usr/pve-tapdown
@@ -0,0 +1,16 @@
+#!/usr/bin/perl
+
+use strict;
+use warnings;
+use PVE::Network;
+
+my $iface = shift;
+
+die "no interface specified\n" if !$iface;
+
+die "got strange interface name '$iface'\n"
+    if $iface !~ m/^tap(\d+)i(\d+)$/;
+
+PVE::Network::tap_unplug($iface);
+
+exit 0;
-- 
2.39.5 (Apple Git-154)



[-- Attachment #2: Type: text/plain, Size: 160 bytes --]

_______________________________________________
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel

^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2026-02-06 11:22 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
     [not found] <20260109121049.70740-1-klein@aetherus.de>
2026-01-09 12:10 ` [pve-devel] [PATCH container 1/1] Signed-off-by: Maurice Klein <klein@aetherus.de> Maurice Klein via pve-devel
     [not found] ` <20260109121049.70740-2-klein@aetherus.de>
2026-01-19  8:37   ` Maurice Klein via pve-devel
2026-01-19 14:35     ` Stefan Hanreich
2026-01-21 19:04       ` Maurice Klein via pve-devel
     [not found]       ` <d18928a0-6ab0-4e90-ad3a-0674bbdedb72@aetherus.de>
2026-01-27 10:02         ` Stefan Hanreich
2026-01-27 10:37           ` Maurice Klein via pve-devel
     [not found]           ` <321bd4ff-f147-4329-9788-50061d569fa6@aetherus.de>
2026-01-29 12:20             ` Stefan Hanreich
2026-02-01 14:32               ` Maurice Klein
2026-02-06  8:23                 ` Stefan Hanreich
2026-02-06 11:22                   ` Maurice Klein
     [not found] <20260109124514.72991-1-klein@aetherus.de>
2026-01-09 12:45 ` Maurice Klein via pve-devel

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.
Service provided by Proxmox Server Solutions GmbH | Privacy | Legal