* [PATCH manager v2 0/3] ui: split out disks and nics into grids
@ 2026-05-08 13:38 Dominik Csapak
2026-05-08 13:38 ` [PATCH manager v2 1/3] ui: utils: factor out 'media=cdrom' check Dominik Csapak
` (3 more replies)
0 siblings, 4 replies; 7+ messages in thread
From: Dominik Csapak @ 2026-05-08 13:38 UTC (permalink / raw)
To: pve-devel
Resending with @Davids comments fixed, sending qemu only for now again,
still working on the lxc side.
see commit message 3/3 for details and rationale
changes from v1:
* refactor some regexes/checks
* fix some bugs I encountered during testing again
Dominik Csapak (3):
ui: utils: factor out 'media=cdrom' check
ui: factor out the guest key nic regex check
ui: qemu hardware view: split out disks and nics into grids
www/manager6/Utils.js | 16 +-
www/manager6/button/Revert.js | 2 +-
www/manager6/lxc/Network.js | 4 +-
www/manager6/qemu/BootOrderEdit.js | 12 +-
www/manager6/qemu/CreateWizard.js | 4 +-
www/manager6/qemu/HardwareView.js | 557 +++++++++++++++++++++++++++--
www/manager6/window/GuestImport.js | 2 +-
7 files changed, 555 insertions(+), 42 deletions(-)
--
2.47.3
^ permalink raw reply [flat|nested] 7+ messages in thread* [PATCH manager v2 1/3] ui: utils: factor out 'media=cdrom' check 2026-05-08 13:38 [PATCH manager v2 0/3] ui: split out disks and nics into grids Dominik Csapak @ 2026-05-08 13:38 ` Dominik Csapak 2026-05-08 13:38 ` [PATCH manager v2 2/3] ui: factor out the guest key nic regex check Dominik Csapak ` (2 subsequent siblings) 3 siblings, 0 replies; 7+ messages in thread From: Dominik Csapak @ 2026-05-08 13:38 UTC (permalink / raw) To: pve-devel this is a check we have all over the place and is prone to typos when one tries to check it in a new place. introduce a 'diskIsCdrom' check in Utils and use that in every place. Signed-off-by: Dominik Csapak <d.csapak@proxmox.com> --- www/manager6/Utils.js | 9 ++++++++- www/manager6/qemu/BootOrderEdit.js | 6 +++--- www/manager6/qemu/CreateWizard.js | 4 ++-- www/manager6/qemu/HardwareView.js | 6 +++--- www/manager6/window/GuestImport.js | 2 +- 5 files changed, 17 insertions(+), 10 deletions(-) diff --git a/www/manager6/Utils.js b/www/manager6/Utils.js index be95d216..6a27f253 100644 --- a/www/manager6/Utils.js +++ b/www/manager6/Utils.js @@ -1962,6 +1962,13 @@ Ext.define('PVE.Utils', { return true; }, + diskIsCdrom: function (value) { + if (!value) { + return false; + } + return !!value.toString().match(/media=cdrom/); + }, + sortByPreviousUsage: function (vmconfig, nodename) { let controllerList = ['ide', 'virtio', 'scsi', 'sata']; let usedControllers = {}; @@ -1972,7 +1979,7 @@ Ext.define('PVE.Utils', { for (const property of Object.keys(vmconfig)) { if ( property.match(PVE.Utils.bus_match) && - !vmconfig[property].match(/media=cdrom/) + !PVE.Utils.diskIsCdrom(vmconfig[property]) ) { const foundController = property.match(PVE.Utils.bus_match)[1]; usedControllers[foundController]++; diff --git a/www/manager6/qemu/BootOrderEdit.js b/www/manager6/qemu/BootOrderEdit.js index 521a3d6e..c7117c1a 100644 --- a/www/manager6/qemu/BootOrderEdit.js +++ b/www/manager6/qemu/BootOrderEdit.js @@ -51,7 +51,7 @@ Ext.define('PVE.qemu.BootOrderPanel', { }, }, - isCloudinit: (v) => v.match(/media=cdrom/) && v.match(/[:/]vm-\d+-cloudinit/), + isCloudinit: (v) => PVE.Utils.diskIsCdrom(v) && v.match(/[:/]vm-\d+-cloudinit/), isDisk: function (value) { return PVE.Utils.bus_match.test(value); @@ -97,7 +97,7 @@ Ext.define('PVE.qemu.BootOrderPanel', { Ext.Object.each(me.vmconfig, function (key, value) { if ( me.isDisk(key) && - value.match(/media=cdrom/) && + PVE.Utils.diskIsCdrom(value) && !me.isCloudinit(value) ) { list.push(key); @@ -196,7 +196,7 @@ Ext.define('PVE.qemu.BootOrderPanel', { iconCls; if (value.match(/^net\d+$/)) { iconCls = 'exchange'; - } else if (desc.match(/media=cdrom/)) { + } else if (PVE.Utils.diskIsCdrom(desc)) { metaData.tdCls = 'pve-itype-icon-cdrom'; } else { iconCls = 'hdd-o'; diff --git a/www/manager6/qemu/CreateWizard.js b/www/manager6/qemu/CreateWizard.js index 5eb784c4..2ee209da 100644 --- a/www/manager6/qemu/CreateWizard.js +++ b/www/manager6/qemu/CreateWizard.js @@ -33,14 +33,14 @@ Ext.define('PVE.qemu.CreateWizard', { // cannot know which one is a bootable iso and hardcodes the known values (ide0/2, net0) calculateBootOrder: function (values) { // user selected windows + second cdrom - if (values.ide0 && values.ide0.match(/media=cdrom/)) { + if (values.ide0 && PVE.Utils.diskIsCdrom(values.ide0)) { let disk; PVE.Utils.forEachBus(['ide', 'scsi', 'virtio', 'sata'], (type, id) => { let confId = type + id; if (!values[confId]) { return undefined; } - if (values[confId].match(/media=cdrom/)) { + if (PVE.Utils.diskIsCdrom(values[confId])) { return undefined; } disk = confId; diff --git a/www/manager6/qemu/HardwareView.js b/www/manager6/qemu/HardwareView.js index 64cb4a7b..471e3a8f 100644 --- a/www/manager6/qemu/HardwareView.js +++ b/www/manager6/qemu/HardwareView.js @@ -22,7 +22,7 @@ Ext.define('PVE.qemu.HardwareView', { if (value.match(/vm-.*-cloudinit/)) { iconCls = 'cloud'; txt = rowdef.cloudheader; - } else if (value.match(/media=cdrom/)) { + } else if (PVE.Utils.diskIsCdrom(value)) { metaData.tdCls = 'pve-itype-icon-cdrom'; return rowdef.cdheader; } @@ -417,7 +417,7 @@ Ext.define('PVE.qemu.HardwareView', { let value = me.getObjectValue(rec.data.key, '', true); if (isCloudInitKey(value)) { return; - } else if (value.match(/media=cdrom/)) { + } else if (PVE.Utils.diskIsCdrom(value)) { editor = 'PVE.qemu.CDEdit'; } else if (!diskCap) { return; @@ -757,7 +757,7 @@ Ext.define('PVE.qemu.HardwareView', { const isRunning = me.pveSelNode.data.running; const isCloudInit = isCloudInitKey(value); - const isCDRom = value && !!value.toString().match(/media=cdrom/); + const isCDRom = value && PVE.Utils.diskIsCdrom(value); const isUnusedDisk = key.match(/^unused\d+/); const isUsedDisk = !isUnusedDisk && row.isOnStorageBus && !isCDRom; diff --git a/www/manager6/window/GuestImport.js b/www/manager6/window/GuestImport.js index 9c9b0f1b..9a01e81d 100644 --- a/www/manager6/window/GuestImport.js +++ b/www/manager6/window/GuestImport.js @@ -1065,7 +1065,7 @@ Ext.define('PVE.window.GuestImport', { let cdroms = []; for (const [id, value] of Object.entries(me.vmConfig)) { - if (!Ext.isString(value) || !value.match(/media=cdrom/)) { + if (!Ext.isString(value) || !PVE.Utils.diskIsCdrom(value)) { continue; } cdroms.push({ -- 2.47.3 ^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH manager v2 2/3] ui: factor out the guest key nic regex check 2026-05-08 13:38 [PATCH manager v2 0/3] ui: split out disks and nics into grids Dominik Csapak 2026-05-08 13:38 ` [PATCH manager v2 1/3] ui: utils: factor out 'media=cdrom' check Dominik Csapak @ 2026-05-08 13:38 ` Dominik Csapak 2026-05-08 13:38 ` [PATCH manager v2 3/3] ui: qemu hardware view: split out disks and nics into grids Dominik Csapak 2026-05-15 9:21 ` superseded: [PATCH manager v2 0/3] ui: " Dominik Csapak 3 siblings, 0 replies; 7+ messages in thread From: Dominik Csapak @ 2026-05-08 13:38 UTC (permalink / raw) To: pve-devel It's better to have these regexes in one place, otherwise they're easily typoed. I noticed that some of the checks were not anchored at the end, but all valid values these can have, should match the new check as well. Signed-off-by: Dominik Csapak <d.csapak@proxmox.com> --- www/manager6/Utils.js | 7 +++++++ www/manager6/lxc/Network.js | 4 ++-- www/manager6/qemu/BootOrderEdit.js | 6 +++--- 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/www/manager6/Utils.js b/www/manager6/Utils.js index 6a27f253..035539fb 100644 --- a/www/manager6/Utils.js +++ b/www/manager6/Utils.js @@ -1969,6 +1969,13 @@ Ext.define('PVE.Utils', { return !!value.toString().match(/media=cdrom/); }, + keyIsNic: function (value) { + if (!value) { + return false; + } + return !!value.toString().match(/^net\d+$/); + }, + sortByPreviousUsage: function (vmconfig, nodename) { let controllerList = ['ide', 'virtio', 'scsi', 'sata']; let usedControllers = {}; diff --git a/www/manager6/lxc/Network.js b/www/manager6/lxc/Network.js index e56d47c0..b2737471 100644 --- a/www/manager6/lxc/Network.js +++ b/www/manager6/lxc/Network.js @@ -89,7 +89,7 @@ Ext.define('PVE.lxc.NetworkInputPanel', { value: cdata.name, validator: function (value) { for (const [key, netRaw] of Object.entries(me.dataCache)) { - if (!key.match(/^net\d+/) || key === me.ifname) { + if (!PVE.Utils.keyIsNic(key) || key === me.ifname) { continue; } let net = PVE.Parser.parseLxcNetwork(netRaw); @@ -385,7 +385,7 @@ Ext.define( let records = []; me.dataCache = confResponse.result.data || {}; for (const [key, value] of Object.entries(confResponse.result.data)) { - if (!key.match(/^net\d+/)) { + if (!PVE.Utils.keyIsNic(key)) { continue; } let config = PVE.Parser.parseLxcNetwork(value); diff --git a/www/manager6/qemu/BootOrderEdit.js b/www/manager6/qemu/BootOrderEdit.js index c7117c1a..12f3b672 100644 --- a/www/manager6/qemu/BootOrderEdit.js +++ b/www/manager6/qemu/BootOrderEdit.js @@ -60,7 +60,7 @@ Ext.define('PVE.qemu.BootOrderPanel', { isBootdev: function (dev, value) { return ( (this.isDisk(dev) && !this.isCloudinit(value)) || - /^net\d+/.test(dev) || + PVE.Utils.keyIsNic(dev) || /^hostpci\d+/.test(dev) || (/^usb\d+/.test(dev) && !/spice/.test(value)) ); @@ -105,7 +105,7 @@ Ext.define('PVE.qemu.BootOrderPanel', { }); } else if (orderList[i] === 'n') { Ext.Object.each(me.vmconfig, function (key, value) { - if (/^net\d+/.test(key)) { + if (PVE.Utils.keyIsNic(key)) { list.push(key); } }); @@ -194,7 +194,7 @@ Ext.define('PVE.qemu.BootOrderPanel', { let icon = '', iconCls; - if (value.match(/^net\d+$/)) { + if (PVE.Utils.keyIsNic(value)) { iconCls = 'exchange'; } else if (PVE.Utils.diskIsCdrom(desc)) { metaData.tdCls = 'pve-itype-icon-cdrom'; -- 2.47.3 ^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH manager v2 3/3] ui: qemu hardware view: split out disks and nics into grids 2026-05-08 13:38 [PATCH manager v2 0/3] ui: split out disks and nics into grids Dominik Csapak 2026-05-08 13:38 ` [PATCH manager v2 1/3] ui: utils: factor out 'media=cdrom' check Dominik Csapak 2026-05-08 13:38 ` [PATCH manager v2 2/3] ui: factor out the guest key nic regex check Dominik Csapak @ 2026-05-08 13:38 ` Dominik Csapak 2026-05-12 2:06 ` Thomas Lamprecht 2026-05-15 9:21 ` superseded: [PATCH manager v2 0/3] ui: " Dominik Csapak 3 siblings, 1 reply; 7+ messages in thread From: Dominik Csapak @ 2026-05-08 13:38 UTC (permalink / raw) To: pve-devel This is done to improve visibility of configuration of disks and nics. Currently only the raw property string is shown in the UI, which leads to bad UX when having many options configured and especially when having pending changes in these situations because it's hard to parse visually. The disks and nics are shown below the main options, each as their own grids with relevant columns. By default all columns that can be edited in the gui are shown (with the exception of the bandwidth limits, since they're not often used and blow up the number of columns). All other options from the config are there but hidden by default. As a fallback, in case there are new options not yet in the ui, a 'Raw Config' column was added, which contains the raw property string again (but it's hidden by default). Some notes on the implementation: The stores for the new grids are populated on each load of the main grid, and the we have one combined pending store that's just there to save the parsed pending values. To keep the diff smaller, some helpers are introduced to forward some methods to the main hardware grid. The selection change introduces some logic to keep the selection only in one grid, so there can be no conflict on the selected values. Signed-off-by: Dominik Csapak <d.csapak@proxmox.com> --- changes in v2: * refactor the disk regex * also consider newly pending disks and nics * correct use of the boolean values (have to parseBoolean first) www/manager6/button/Revert.js | 2 +- www/manager6/qemu/HardwareView.js | 551 ++++++++++++++++++++++++++++-- 2 files changed, 526 insertions(+), 27 deletions(-) diff --git a/www/manager6/button/Revert.js b/www/manager6/button/Revert.js index 14b13f5b..7d48f83f 100644 --- a/www/manager6/button/Revert.js +++ b/www/manager6/button/Revert.js @@ -18,7 +18,7 @@ Ext.define('PVE.button.PendingRevert', { } let view = this.pendingGrid; - let rec = view.getSelectionModel().getSelection()[0]; + let rec = (this.selModel ?? view.getSelectionModel()).getSelection()[0]; if (!rec) { return; } diff --git a/www/manager6/qemu/HardwareView.js b/www/manager6/qemu/HardwareView.js index 471e3a8f..86da04c7 100644 --- a/www/manager6/qemu/HardwareView.js +++ b/www/manager6/qemu/HardwareView.js @@ -1,9 +1,15 @@ Ext.define('PVE.qemu.HardwareView', { - extend: 'Proxmox.grid.PendingObjectGrid', + extend: 'Ext.panel.Panel', alias: ['widget.PVE.qemu.HardwareView'], onlineHelp: 'qm_virtual_machines_settings', + layout: { + type: 'vbox', + align: 'stretch', + }, + scrollable: true, + renderKey: function (key, metaData, rec, rowIndex, colIndex, store) { var me = this; var rows = me.rows; @@ -43,6 +49,22 @@ Ext.define('PVE.qemu.HardwareView', { } }, + referenceHolder: true, + + // helpers to redirect the methods to the hardwareGrid + + getObjectValue: function () { + let me = this; + let hardwareGrid = me.lookup('hardwareGrid'); + return hardwareGrid.getObjectValue(...arguments); + }, + + reload: function () { + let me = this; + let hardwareGrid = me.lookup('hardwareGrid'); + return hardwareGrid.reload(); + }, + initComponent: function () { var me = this; @@ -403,9 +425,475 @@ Ext.define('PVE.qemu.HardwareView', { let baseurl = `nodes/${nodename}/qemu/${vmid}/config`; - let sm = Ext.create('Ext.selection.RowModel', {}); + let hardwareGrid = Ext.create('Proxmox.grid.PendingObjectGrid', { + reference: 'hardwareGrid', + title: gettext('General'), + iconCls: 'fa fa-desktop', + pveSelNode: me.pveSelNode, + url: `/api2/json/nodes/${nodename}/qemu/${vmid}/pending`, + interval: 5000, + rows, + sorterFn, + border: false, + renderKey: me.renderKey, + }); + + let keyForDiskGrid = (key) => key.match(/^(ide|sata|scsi|virtio|unused)\d+$/); + + hardwareGrid.getStore().addFilter({ + filterFn: function (rec) { + let val = rec.get('value') || rec.get('pending'); + let key = rec.get('key'); + if (keyForDiskGrid(key)) { + return PVE.Utils.diskIsCdrom(val) || isCloudInitKey(val); + } else if (PVE.Utils.keyIsNic(key)) { + return false; + } + return true; + }, + }); + + let pendingStore = Ext.create('Ext.data.Store'); + let pendingRenderer = function (originalRenderer) { + originalRenderer ??= Ext.htmlEncode; + return function (val, metaData, rec, rowIdx, colIdx, store, view) { + let txt = originalRenderer(val, metaData, rec, rowIdx, colIdx, store, view); + let pendingTxt; + + let pending = pendingStore.getById(rec.get('id')); + if (pending) { + let dataIndex = view.up().getColumns()[colIdx].dataIndex; + let value = pending.get(dataIndex); + pendingTxt = originalRenderer( + value, + metaData, + rec, + rowIdx, + colIdx, + store, + view, + ); + } + + if (pendingTxt && txt && pendingTxt !== txt) { + txt += '<br />'; + txt += `<span style="color: darkorange">${pendingTxt}</span>`; + } + if (pending && pendingTxt && (!txt || !rec.data.raw)) { + txt = `<span style="color: orange">${pendingTxt}</span>`; + } + if (pending && !pendingTxt && txt) { + txt = `<div style="color: darkorange; text-decoration: line-through;">${txt}</div>`; + } + if (rec.data.delete && txt) { + txt = `<div style="color: darkorange; text-decoration: line-through;">${txt}</div>`; + } + + return txt; + }; + }; + + // boolean values parsed from property string have to be parsed to real boolean values + let booleanRenderer = function (value) { + if (Ext.isString(value)) { + value = PVE.Parser.parseBoolean(value); + } + return Proxmox.Utils.format_boolean(value); + }; + + let diskGrid = Ext.create('Ext.grid.Panel', { + reference: 'diskGrid', + title: gettext('Hard Disks'), + stateful: true, + stateId: 'pve-qemu-hardware-disk', + border: false, + expandable: false, + iconCls: 'fa fa-hdd-o', + minHeight: 100, + emptyText: gettext('No hard disks configured'), + store: {}, + columns: [ + { + header: gettext('Bus/Device'), + dataIndex: 'id', + width: 100, + renderer: pendingRenderer(), + }, + { + header: gettext('Volume/File'), + dataIndex: 'file', + flex: 1, + minWidth: 200, + renderer: pendingRenderer(), + }, + { + header: gettext('Size'), + dataIndex: 'size', + width: 100, + renderer: pendingRenderer((value) => { + if (value === undefined || value === null) { + return ''; + } + + let size = value; + if (!size.toString().slice(-1).match(/[0-9]/)) { + // IEC unit is implicit in config, but parser expects it as 'i' suffix + size = `${value}i`; + } + + return Proxmox.Utils.autoscale_size_unit(size); + }), + }, + { + header: gettext('Cache'), + dataIndex: 'cache', + width: 140, + renderer: pendingRenderer( + (v) => v ?? `${Proxmox.Utils.defaultText} (${gettext('No cache')})`, + ), + }, + { + header: gettext('Discard'), + dataIndex: 'discard', + width: 80, + renderer: pendingRenderer(booleanRenderer), + }, + { + header: gettext('IO thread'), + dataIndex: 'iothread', + width: 100, + renderer: pendingRenderer(booleanRenderer), + }, + { + header: gettext('SSD emulation'), + dataIndex: 'ssd', + width: 120, + renderer: pendingRenderer(booleanRenderer), + }, + { + header: gettext('Backup'), + dataIndex: 'backup', + width: 80, + renderer: pendingRenderer((v) => booleanRenderer(v ?? true)), + }, + { + header: gettext('Read-Only'), + dataIndex: 'ro', + width: 100, + renderer: pendingRenderer(booleanRenderer), + }, + { + header: gettext('Replicate'), + dataIndex: 'replicate', + width: 100, + renderer: pendingRenderer(booleanRenderer), + }, + { + header: gettext('Async I/O'), + dataIndex: 'aio', + width: 100, + renderer: pendingRenderer(), + }, + { + header: gettext('Bandwidth Limits'), + hidden: true, + columns: [ + { + header: gettext('Read limit (MiB/s)'), + dataIndex: 'mbps_rd', + width: 110, + renderer: pendingRenderer(), + }, + { + header: gettext('Write limit (MiB/s)'), + dataIndex: 'mbps_wr', + width: 110, + renderer: pendingRenderer(), + }, + { + header: gettext('Read limit (ops/s)'), + dataIndex: 'iops_rd', + width: 110, + renderer: pendingRenderer(), + }, + { + header: gettext('Write limit (ops/s)'), + dataIndex: 'iops_wr', + width: 110, + renderer: pendingRenderer(), + }, + { + header: gettext('Read Burst (MiB/s)'), + dataIndex: 'mbps_rd_max', + width: 110, + renderer: pendingRenderer(), + }, + { + header: gettext('Write Burst (MiB/s)'), + dataIndex: 'mbps_wr_max', + width: 110, + renderer: pendingRenderer(), + }, + { + header: gettext('Read Burst (ops/s)'), + dataIndex: 'iops_rd_max', + width: 110, + renderer: pendingRenderer(), + }, + { + header: gettext('Write Burst (ops/s)'), + dataIndex: 'iops_wr_max', + width: 110, + renderer: pendingRenderer(), + }, + ], + }, + { + header: gettext('Detect Zeroes'), + dataIndex: 'detect_zeroes', + hidden: true, + renderer: pendingRenderer(booleanRenderer), + }, + { + header: gettext('Vendor'), + dataIndex: 'vendor', + hidden: true, + renderer: pendingRenderer(), + }, + { + header: gettext('Product'), + dataIndex: 'product', + hidden: true, + renderer: pendingRenderer(), + }, + { + header: gettext('Serial'), + dataIndex: 'serial', + hidden: true, + renderer: pendingRenderer(), + }, + { + header: gettext('Queues'), + dataIndex: 'queues', + hidden: true, + renderer: pendingRenderer(), + }, + { + header: gettext('Shared'), + dataIndex: 'shared', + hidden: true, + renderer: pendingRenderer(booleanRenderer), + }, + { + header: gettext('Write Error Action'), + dataIndex: 'werror', + hidden: true, + renderer: pendingRenderer(), + }, + { + header: 'WWN', + dataIndex: 'wwn', + hidden: true, + renderer: pendingRenderer(), + }, + { + header: gettext('Raw Config'), + dataIndex: 'raw', + hidden: true, + flex: 1, + renderer: pendingRenderer(), + }, + ], + }); + + let netGrid = Ext.create('Ext.grid.Panel', { + reference: 'netGrid', + title: gettext('Network Interfaces'), + border: false, + stateful: true, + stateId: 'pve-qemu-hardware-net', + expandable: false, + iconCls: 'fa fa-exchange', + minHeight: 100, + emptyText: gettext('No network interfaces configured'), + store: {}, + columns: [ + { + header: gettext('ID'), + dataIndex: 'id', + flex: 1, + minWidth: 80, + renderer: pendingRenderer(), + }, + { + header: gettext('Model'), + dataIndex: 'model', + width: 100, + renderer: pendingRenderer(), + }, + { + header: gettext('MAC address'), + dataIndex: 'macaddr', + width: 200, + renderer: pendingRenderer(), + }, + { + header: gettext('Bridge'), + dataIndex: 'bridge', + width: 100, + renderer: pendingRenderer(), + }, + { + header: gettext('Firewall enabled'), + dataIndex: 'firewall', + width: 150, + renderer: pendingRenderer(booleanRenderer), + }, + { + header: gettext('VLAN tag'), + dataIndex: 'tag', + width: 80, + renderer: pendingRenderer(), + }, + { + header: gettext('MTU'), + dataIndex: 'mtu', + width: 100, + renderer: pendingRenderer(), + }, + { + header: gettext('Multiqueue'), + dataIndex: 'queues', + width: 100, + renderer: pendingRenderer(), + }, + { + header: gettext('Rate limit (MB/s)'), + dataIndex: 'rate', + width: 200, + renderer: pendingRenderer(), + }, + { + header: gettext('Disconnected'), + dataIndex: 'link_down', + width: 100, + renderer: pendingRenderer(booleanRenderer), + }, + { + header: gettext('Trunks'), + dataIndex: 'trunks', + width: 100, + }, + ], + }); + + hardwareGrid.mon(hardwareGrid.getStore(), 'refresh', function () { + let diskData = []; + let nicData = []; + let pendingData = []; + let data = hardwareGrid.getStore().getData(); + (data.getSource() ?? data).each(function (rec) { + let key = rec.get('key'); + let val = rec.get('value'); + let pending = rec.get('pending'); + let deleted = rec.get('delete'); + if (keyForDiskGrid(key)) { + let isDisk = (value) => !value.match(/media=cdrom/) && !isCloudInitKey(value); + + let parsed = PVE.Parser.parseQemuDrive(key, val); + if (parsed && val && isDisk(val)) { + parsed.id = key; + parsed.raw = val; + parsed.deleted = deleted; + diskData.push(parsed); + } + + if (pending && isDisk(pending)) { + if (!parsed) { + diskData.push({ + id: key, + raw: '', + }); + } + parsed = PVE.Parser.parseQemuDrive(key, pending); + parsed.id = key; + parsed.raw = pending; + pendingData.push(parsed); + } + } else if (PVE.Utils.keyIsNic(key)) { + let parsed = PVE.Parser.parseQemuNetwork(key, val); + if (parsed) { + parsed.id = key; + parsed.raw = val; + parsed.deleted = deleted; + nicData.push(parsed); + } + if (pending) { + if (!parsed) { + nicData.push({ + id: key, + raw: '', + }); + } + parsed = PVE.Parser.parseQemuNetwork(key, pending); + parsed.id = key; + parsed.raw = pending; + pendingData.push(parsed); + } + } + }); + + pendingStore.loadData(pendingData); + netGrid.getStore().loadData(nicData); + diskGrid.getStore().loadData(diskData); + }); + + let sm = Ext.create('Ext.selection.Model', { + getSelection: () => { + let hardwareSelection = hardwareGrid.getSelection(); + if (hardwareSelection.length > 0) { + return hardwareSelection; + } + + let diskSelection = diskGrid.getSelection(); + if (diskSelection.length > 0) { + let key = diskSelection[0].get('id'); + // Return original record with 'pending'/'value' fields expected by tools + return [hardwareGrid.rstore.getById(key)]; + } + + let nicSelection = netGrid.getSelection(); + if (nicSelection.length > 0) { + let key = nicSelection[0].get('id'); + // Return original record with 'pending'/'value' fields expected by tools + return [hardwareGrid.rstore.getById(key)]; + } + return []; + }, + }); + + let onSelectionChange = function (sm, selected) { + let me = this; + if (selected.length) { + if (me !== hardwareGrid) { + hardwareGrid.getSelectionModel().deselectAll(); + } + if (me !== diskGrid) { + diskGrid.getSelectionModel().deselectAll(); + } + if (me !== netGrid) { + netGrid.getSelectionModel().deselectAll(); + } + } + set_button_status(); + }; + + hardwareGrid.on('selectionchange', onSelectionChange); + diskGrid.on('selectionchange', onSelectionChange); + netGrid.on('selectionchange', onSelectionChange); let run_editor = function () { + let me = hardwareGrid; let rec = sm.getSelection()[0]; if (!rec || !rows[rec.data.key]?.editor) { return; @@ -449,6 +937,10 @@ Ext.define('PVE.qemu.HardwareView', { } }; + hardwareGrid.on('itemdblclick', run_editor); + diskGrid.on('itemdblclick', run_editor); + netGrid.on('itemdblclick', run_editor); + let edit_btn = new Proxmox.button.Button({ text: gettext('Edit'), selModel: sm, @@ -492,7 +984,6 @@ Ext.define('PVE.qemu.HardwareView', { text: gettext('Move Storage'), tooltip: gettext('Move disk to another storage'), iconCls: 'fa fa-database', - selModel: sm, handler: () => { let rec = sm.getSelection()[0]; if (!rec) { @@ -515,7 +1006,6 @@ Ext.define('PVE.qemu.HardwareView', { text: gettext('Reassign Owner'), tooltip: gettext('Reassign disk to another VM'), iconCls: 'fa fa-desktop', - selModel: sm, handler: () => { let rec = sm.getSelection()[0]; if (!rec) { @@ -538,7 +1028,6 @@ Ext.define('PVE.qemu.HardwareView', { let resize_menuitem = new Ext.menu.Item({ text: gettext('Resize'), iconCls: 'fa fa-plus', - selModel: sm, handler: () => { let rec = sm.getSelection()[0]; if (!rec) { @@ -621,7 +1110,7 @@ Ext.define('PVE.qemu.HardwareView', { if (this.text === this.altText) { warn = gettext('Are you sure you want to detach entry {0}'); } - let rendered = me.renderKey(rec.data.key, {}, rec); + let rendered = hardwareGrid.renderKey(rec.data.key, {}, rec); let msg = Ext.String.format(warn, `'${rendered}'`); if (rows[rec.data.key].del_extra_msg) { @@ -630,7 +1119,8 @@ Ext.define('PVE.qemu.HardwareView', { return msg; }, handler: function (btn, e, rec) { - let params = { delete: rec.data.key }; + let record = sm.getSelection()[0]; + let params = { delete: record.data.key }; if (btn.RESTMethod === 'POST') { params.background_delay = 5; } @@ -673,6 +1163,8 @@ Ext.define('PVE.qemu.HardwareView', { let revert_btn = new PVE.button.PendingRevert({ apiurl: '/api2/extjs/' + baseurl, + selModel: sm, + pendingGrid: hardwareGrid, }); let efidisk_menuitem = Ext.create('Ext.menu.Item', { @@ -680,8 +1172,7 @@ Ext.define('PVE.qemu.HardwareView', { iconCls: 'fa fa-fw fa-hdd-o black', disabled: !caps.vms['VM.Config.Disk'], handler: function () { - let { data: bios } = me.rstore.getData().map.bios || {}; - + let { data: bios } = hardwareGrid.rstore.getData().map.bios || {}; Ext.create('PVE.qemu.EFIDiskEdit', { autoShow: true, url: '/api2/extjs/' + baseurl, @@ -703,12 +1194,12 @@ Ext.define('PVE.qemu.HardwareView', { }; let set_button_status = function () { - let selection_model = me.getSelectionModel(); + let selection_model = sm; let rec = selection_model.getSelection()[0]; counts = {}; // en/disable hardwarebuttons let hasCloudInit = false; - me.rstore.getData().items.forEach(function ({ id, data }) { + hardwareGrid.rstore.getData().items.forEach(function ({ id, data }) { if (!hasCloudInit && (isCloudInitKey(data.value) || isCloudInitKey(data.pending))) { hasCloudInit = true; return; @@ -751,9 +1242,8 @@ Ext.define('PVE.qemu.HardwareView', { } const { key, value } = rec.data; const row = rows[key]; - const deleted = !!rec.data.delete; - const pending = deleted || me.hasPendingChanges(key); + const pending = deleted || hardwareGrid.hasPendingChanges(key); const isRunning = me.pveSelNode.data.running; const isCloudInit = isCloudInitKey(value); @@ -821,10 +1311,6 @@ Ext.define('PVE.qemu.HardwareView', { }; Ext.apply(me, { - url: `/api2/json/nodes/${nodename}/qemu/${vmid}/pending`, - interval: 5000, - selModel: sm, - run_editor: run_editor, tbar: [ { text: gettext('Add'), @@ -926,19 +1412,32 @@ Ext.define('PVE.qemu.HardwareView', { diskaction_btn, revert_btn, ], - rows: rows, - sorterFn: sorterFn, - listeners: { - itemdblclick: run_editor, - selectionchange: set_button_status, - }, + items: [ + hardwareGrid, + // spacer with bottom border + { + xtype: 'panel', + border: true, + height: 1, + margin: '20 0 0 0', + }, + diskGrid, + // spacer with bottom border + { + xtype: 'panel', + border: true, + height: 1, + margin: '20 0 0 0', + }, + netGrid, + ], }); me.callParent(); - me.on('activate', me.rstore.startUpdate, me.rstore); - me.on('destroy', me.rstore.stopUpdate, me.rstore); + me.on('activate', () => hardwareGrid.rstore.startUpdate()); + me.on('destroy', () => hardwareGrid.rstore.stopUpdate()); - me.mon(me.getStore(), 'datachanged', set_button_status, me); + hardwareGrid.mon(hardwareGrid.getStore(), 'datachanged', set_button_status, me); }, }); -- 2.47.3 ^ permalink raw reply related [flat|nested] 7+ messages in thread
* Re: [PATCH manager v2 3/3] ui: qemu hardware view: split out disks and nics into grids 2026-05-08 13:38 ` [PATCH manager v2 3/3] ui: qemu hardware view: split out disks and nics into grids Dominik Csapak @ 2026-05-12 2:06 ` Thomas Lamprecht 2026-05-12 7:12 ` Dominik Csapak 0 siblings, 1 reply; 7+ messages in thread From: Thomas Lamprecht @ 2026-05-12 2:06 UTC (permalink / raw) To: Dominik Csapak, pve-devel On 08/05/2026 15:39, Dominik Csapak wrote: > This is done to improve visibility of configuration of disks and nics. > Currently only the raw property string is shown in the UI, which leads > to bad UX when having many options configured and especially when having > pending changes in these situations because it's hard to parse visually. > > The disks and nics are shown below the main options, each as their own > grids with relevant columns. By default all columns that can be edited > in the gui are shown (with the exception of the bandwidth limits, since > they're not often used and blow up the number of columns). All other > options from the config are there but hidden by default. > > As a fallback, in case there are new options not yet in the ui, a 'Raw > Config' column was added, which contains the raw property string again > (but it's hidden by default). > > Some notes on the implementation: > > The stores for the new grids are populated on each load of the main > grid, and the we have one combined pending store that's just there to > save the parsed pending values. > > To keep the diff smaller, some helpers are introduced to forward some > methods to the main hardware grid. > > The selection change introduces some logic to keep the selection only > in one grid, so there can be no conflict on the selected values. Thanks for this, main question here: why not move the buttons too? I.e., the hardware view is rather crowded and that's a main reason for doing this originally, but a big part of the problem isn't really the amount of rows (for VMs with many disks and or NICs that naturally can become a problem too), but the edit buttons and their complex state/display changes depending on what is selected. In my (not so thought out!) vision I'd have added a tbar to each panel and reworked the buttons such that only the ones relevant for the elemens in there are shown, as else this is IMO going sidewards, if not even getting slightly worse in some aspects, as the element to edit and the edit/action button are even a "bigger" distance between them, so to say. Also, might be nice to have some explicit rationale for why things like TPM and EFI and maybe even SCSI controller are not part of the disks panel - not saying the must be, but they are at least hard disk related and thus IMO worth adding an explicit rationale, even if they are not normal hard disks and thus fall out a narrow/literal interpretation of the "Hard Disks" sub-panel. Layout wise I'm also not so sure if it wouldn't be better to split the vertical space more evenly (or give the general one a only som padding and flex the other two below?). ^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH manager v2 3/3] ui: qemu hardware view: split out disks and nics into grids 2026-05-12 2:06 ` Thomas Lamprecht @ 2026-05-12 7:12 ` Dominik Csapak 0 siblings, 0 replies; 7+ messages in thread From: Dominik Csapak @ 2026-05-12 7:12 UTC (permalink / raw) To: Thomas Lamprecht, pve-devel Thanks for the feedback! On 5/12/26 4:04 AM, Thomas Lamprecht wrote: > On 08/05/2026 15:39, Dominik Csapak wrote: >> This is done to improve visibility of configuration of disks and nics. >> Currently only the raw property string is shown in the UI, which leads >> to bad UX when having many options configured and especially when having >> pending changes in these situations because it's hard to parse visually. >> >> The disks and nics are shown below the main options, each as their own >> grids with relevant columns. By default all columns that can be edited >> in the gui are shown (with the exception of the bandwidth limits, since >> they're not often used and blow up the number of columns). All other >> options from the config are there but hidden by default. >> >> As a fallback, in case there are new options not yet in the ui, a 'Raw >> Config' column was added, which contains the raw property string again >> (but it's hidden by default). >> >> Some notes on the implementation: >> >> The stores for the new grids are populated on each load of the main >> grid, and the we have one combined pending store that's just there to >> save the parsed pending values. >> >> To keep the diff smaller, some helpers are introduced to forward some >> methods to the main hardware grid. >> >> The selection change introduces some logic to keep the selection only >> in one grid, so there can be no conflict on the selected values. > > Thanks for this, main question here: why not move the buttons too? > I.e., the hardware view is rather crowded and that's a main reason > for doing this originally, but a big part of the problem isn't really > the amount of rows (for VMs with many disks and or NICs that naturally > can become a problem too), but the edit buttons and their complex > state/display changes depending on what is selected. ah ok, my motivation was actually a different one, I wanted to make the various options of the nics and disks more visible (especially if there is a pending change) in different columns. So the button placement was not my primary reason, but I see what you mean. > > In my (not so thought out!) vision I'd have added a tbar to each > panel and reworked the buttons such that only the ones relevant for the > elemens in there are shown, as else this is IMO going sidewards, if not > even getting slightly worse in some aspects, as the element to edit and > the edit/action button are even a "bigger" distance between them, so to > say. Sure, I'll move the buttons the respective grids, should not be a big of a change (I hope ;) ). Another way could be to introduce an action column with the specific actions (either separate or as a menu) I think it could be even better, but I'd have to try it first. What do you think? We'd still have to keep the tbars for 'Add' at least. > > Also, might be nice to have some explicit rationale for why things like > TPM and EFI and maybe even SCSI controller are not part of the disks > panel - not saying the must be, but they are at least hard disk related > and thus IMO worth adding an explicit rationale, even if they are not > normal hard disks and thus fall out a narrow/literal interpretation > of the "Hard Disks" sub-panel. sure, main reason was that the columns wouldn't always fit and either some columns would be empty for most of the rows, or we'd have to give some columns a double meaning I'll either include them in the grids (tpm and efi could work), scsi controller would be borderline IMO, or I'll include the rationale in the next version > > Layout wise I'm also not so sure if it wouldn't be better to split > the vertical space more evenly (or give the general one a only som > padding and flex the other two below?). > > > I'm not exactly sure what you mean here? splitting the 'general' in two columns? the nic/disk grid are very wide with their columns already so showing either of them next to another grid is probably not good space wise. ^ permalink raw reply [flat|nested] 7+ messages in thread
* superseded: [PATCH manager v2 0/3] ui: split out disks and nics into grids 2026-05-08 13:38 [PATCH manager v2 0/3] ui: split out disks and nics into grids Dominik Csapak ` (2 preceding siblings ...) 2026-05-08 13:38 ` [PATCH manager v2 3/3] ui: qemu hardware view: split out disks and nics into grids Dominik Csapak @ 2026-05-15 9:21 ` Dominik Csapak 3 siblings, 0 replies; 7+ messages in thread From: Dominik Csapak @ 2026-05-15 9:21 UTC (permalink / raw) To: pve-devel superseded by v3: https://lore.proxmox.com/pve-devel/20260515085349.1123127-1-d.csapak@proxmox.com/ On 5/8/26 3:39 PM, Dominik Csapak wrote: > Resending with @Davids comments fixed, sending qemu only for now again, > still working on the lxc side. > > see commit message 3/3 for details and rationale > > changes from v1: > * refactor some regexes/checks > * fix some bugs I encountered during testing again > > Dominik Csapak (3): > ui: utils: factor out 'media=cdrom' check > ui: factor out the guest key nic regex check > ui: qemu hardware view: split out disks and nics into grids > > www/manager6/Utils.js | 16 +- > www/manager6/button/Revert.js | 2 +- > www/manager6/lxc/Network.js | 4 +- > www/manager6/qemu/BootOrderEdit.js | 12 +- > www/manager6/qemu/CreateWizard.js | 4 +- > www/manager6/qemu/HardwareView.js | 557 +++++++++++++++++++++++++++-- > www/manager6/window/GuestImport.js | 2 +- > 7 files changed, 555 insertions(+), 42 deletions(-) > ^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2026-05-15 9:22 UTC | newest] Thread overview: 7+ messages (download: mbox.gz follow: Atom feed -- links below jump to the message on this page -- 2026-05-08 13:38 [PATCH manager v2 0/3] ui: split out disks and nics into grids Dominik Csapak 2026-05-08 13:38 ` [PATCH manager v2 1/3] ui: utils: factor out 'media=cdrom' check Dominik Csapak 2026-05-08 13:38 ` [PATCH manager v2 2/3] ui: factor out the guest key nic regex check Dominik Csapak 2026-05-08 13:38 ` [PATCH manager v2 3/3] ui: qemu hardware view: split out disks and nics into grids Dominik Csapak 2026-05-12 2:06 ` Thomas Lamprecht 2026-05-12 7:12 ` Dominik Csapak 2026-05-15 9:21 ` superseded: [PATCH manager v2 0/3] ui: " Dominik Csapak
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.