From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [IPv6:2a01:7e0:0:424::9]) by lore.proxmox.com (Postfix) with ESMTPS id AA6E01FF14C for ; Fri, 15 May 2026 09:46:50 +0200 (CEST) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id 81BD0E4A2; Fri, 15 May 2026 09:46:50 +0200 (CEST) From: Thomas Lamprecht To: pdm-devel@lists.proxmox.com Subject: [PATCH datacenter-manager v3 06/12] cli: client: add subscription key pool management subcommands Date: Fri, 15 May 2026 09:43:16 +0200 Message-ID: <20260515074623.766766-7-t.lamprecht@proxmox.com> X-Mailer: git-send-email 2.47.3 In-Reply-To: <20260515074623.766766-1-t.lamprecht@proxmox.com> References: <20260515074623.766766-1-t.lamprecht@proxmox.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Bm-Milter-Handled: 55990f41-d878-4baa-be0a-ee34c49e34d2 X-Bm-Transport-Timestamp: 1778831192614 X-SPAM-LEVEL: Spam detection results: 0 AWL 0.003 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record Message-ID-Hash: YERERJDHYASYHNVNSNYXGBKRSNKINCHF X-Message-ID-Hash: YERERJDHYASYHNVNSNYXGBKRSNKINCHF X-MailFrom: t.lamprecht@proxmox.com X-Mailman-Rule-Misses: dmarc-mitigation; no-senders; approved; loop; banned-address; emergency; member-moderation; nonmember-moderation; administrivia; implicit-dest; max-recipients; max-size; news-moderation; no-subject; digests; suspicious-header X-Mailman-Version: 3.3.10 Precedence: list List-Id: Proxmox Datacenter Manager development discussion List-Help: List-Owner: List-Post: List-Subscribe: List-Unsubscribe: Plumb the new key-pool API endpoints through the CLI under the existing `subscriptions` command group. The pre-existing `status` subcommand becomes a sibling rather than the sole entry. Signed-off-by: Thomas Lamprecht --- Changes v2 -> 3: * Worker outcomes surface via the new wait_for_local_task helper from v3-0002 instead of a hand-rolled poll loop. * CLI for the new v3 endpoints (Clear Key, Adopt Key / Adopt All, Check Subscription) is wired in their respective per-feature commits, not here. cli/client/src/subscriptions.rs | 260 +++++++++++++++++++++++++++++++- lib/pdm-client/src/lib.rs | 179 +++++++++++++++++++++- 2 files changed, 430 insertions(+), 9 deletions(-) diff --git a/cli/client/src/subscriptions.rs b/cli/client/src/subscriptions.rs index d8bf1e09..00c06ada 100644 --- a/cli/client/src/subscriptions.rs +++ b/cli/client/src/subscriptions.rs @@ -1,18 +1,46 @@ use anyhow::Error; +use proxmox_config_digest::PROXMOX_CONFIG_DIGEST_SCHEMA; use proxmox_router::cli::{ - format_and_print_result, CliCommand, CommandLineInterface, OutputFormat, + format_and_print_result, CliCommand, CliCommandMap, CommandLineInterface, OutputFormat, }; use proxmox_schema::api; -use pdm_api_types::subscription::RemoteSubscriptionState; -use pdm_api_types::VIEW_ID_SCHEMA; +use pdm_api_types::remotes::REMOTE_ID_SCHEMA; +use pdm_api_types::subscription::{RemoteSubscriptionState, SUBSCRIPTION_KEY_SCHEMA}; +use pdm_api_types::{NODE_SCHEMA, VIEW_ID_SCHEMA}; +use pdm_client::ConfigDigest; use crate::env::emoji; use crate::{client, env}; pub fn cli() -> CommandLineInterface { - CliCommand::new(&API_METHOD_GET_SUBSCRIPTION_STATUS).into() + CliCommandMap::new() + .insert( + "status", + CliCommand::new(&API_METHOD_GET_SUBSCRIPTION_STATUS), + ) + .insert("list-keys", CliCommand::new(&API_METHOD_LIST_KEYS)) + .insert( + "add-keys", + CliCommand::new(&API_METHOD_ADD_KEYS).arg_param(&["keys"]), + ) + .insert( + "assign-key", + CliCommand::new(&API_METHOD_ASSIGN_KEY).arg_param(&["key"]), + ) + .insert( + "clear-assignment", + CliCommand::new(&API_METHOD_CLEAR_ASSIGNMENT).arg_param(&["key"]), + ) + .insert( + "remove-key", + CliCommand::new(&API_METHOD_REMOVE_KEY).arg_param(&["key"]), + ) + .insert("auto-assign", CliCommand::new(&API_METHOD_AUTO_ASSIGN)) + .insert("apply-pending", CliCommand::new(&API_METHOD_APPLY_PENDING)) + .insert("clear-pending", CliCommand::new(&API_METHOD_CLEAR_PENDING)) + .into() } #[api( @@ -37,7 +65,7 @@ pub fn cli() -> CommandLineInterface { }, } )] -/// List all the remotes this instance is managing. +/// Show the subscription status of all remotes. async fn get_subscription_status( max_age: Option, verbose: Option, @@ -106,3 +134,225 @@ async fn get_subscription_status( } Ok(()) } + +#[api] +/// List all subscription keys in the pool. +async fn list_keys() -> Result<(), Error> { + let (keys, _digest) = client()?.list_subscription_keys().await?; + + let output_format = env().format_args.output_format; + if output_format == OutputFormat::Text { + if keys.is_empty() { + println!("No keys in pool."); + return Ok(()); + } + let key_width = keys.iter().map(|k| k.key.len()).max().unwrap_or(20); + for key in &keys { + let assignment = match (&key.remote, &key.node) { + (Some(r), Some(n)) => format!("{r}/{n}"), + _ => "(unassigned)".to_string(), + }; + println!( + " {key:, digest: Option) -> Result<(), Error> { + let digest = digest.map(ConfigDigest::from); + client()?.add_subscription_keys(&keys, digest).await?; + let n = keys.len(); + if n == 1 { + println!("Added {} to pool.", keys[0]); + } else { + println!("Added {n} keys to pool."); + } + Ok(()) +} + +#[api( + input: { + properties: { + key: { schema: SUBSCRIPTION_KEY_SCHEMA }, + remote: { schema: REMOTE_ID_SCHEMA }, + node: { schema: NODE_SCHEMA }, + digest: { + schema: PROXMOX_CONFIG_DIGEST_SCHEMA, + optional: true, + }, + }, + }, +)] +/// Assign a key from the pool to a remote node. +async fn assign_key( + key: String, + remote: String, + node: String, + digest: Option, +) -> Result<(), Error> { + let digest = digest.map(ConfigDigest::from); + client()? + .set_subscription_assignment(&key, &remote, &node, digest) + .await?; + println!("Assigned {key} to {remote}/{node}."); + Ok(()) +} + +#[api( + input: { + properties: { + key: { schema: SUBSCRIPTION_KEY_SCHEMA }, + digest: { + schema: PROXMOX_CONFIG_DIGEST_SCHEMA, + optional: true, + }, + }, + }, +)] +/// Clear the assignment of a key (unassign from its remote node). +async fn clear_assignment(key: String, digest: Option) -> Result<(), Error> { + let digest = digest.map(ConfigDigest::from); + client()? + .clear_subscription_assignment(&key, digest) + .await?; + println!("Cleared assignment for {key}."); + Ok(()) +} + +#[api( + input: { + properties: { + key: { schema: SUBSCRIPTION_KEY_SCHEMA }, + }, + }, +)] +/// Remove a key from the pool entirely. +async fn remove_key(key: String) -> Result<(), Error> { + client()?.delete_subscription_key(&key).await?; + println!("Removed {key} from pool."); + Ok(()) +} + +#[api( + input: { + properties: { + apply: { + type: bool, + optional: true, + default: false, + description: "Commit the proposal immediately via bulk-assign. \ + Without this, only a preview is printed.", + }, + }, + }, +)] +/// Propose (and optionally apply) automatic key-to-node assignments. +async fn auto_assign(apply: bool) -> Result<(), Error> { + let client = client()?; + let proposal = client.subscription_auto_assign().await?; + + if proposal.assignments.is_empty() { + println!("No suitable free keys for nodes without an active subscription."); + return Ok(()); + } + + let verb = if apply { "assigned" } else { "proposed" }; + for p in &proposal.assignments { + println!(" {verb}: {} -> {}/{}", p.key, p.remote, p.node); + } + + if !apply { + println!("\nRe-run with --apply to apply these assignments."); + return Ok(()); + } + let applied = client.subscription_bulk_assign(proposal).await?; + if applied.is_empty() { + println!("\nServer rejected the proposal (no entries applied)."); + } + Ok(()) +} + +#[api( + input: { + properties: { + digest: { + schema: PROXMOX_CONFIG_DIGEST_SCHEMA, + optional: true, + }, + }, + }, +)] +/// Push all pending key assignments to remotes as a worker task. +/// +/// Blocks until the worker finishes so the operator sees the exit status of the actual push +/// run, not just a UPID they would have to chase down by hand. +async fn apply_pending(digest: Option) -> Result<(), Error> { + let digest = digest.map(ConfigDigest::from); + let client = client()?; + let upid = match client.subscription_apply_pending(digest).await? { + None => { + println!("No pending assignments to apply."); + return Ok(()); + } + Some(upid) => upid, + }; + println!("Started worker task: {upid}"); + let status = client.wait_for_local_task(&upid).await?; + let exit = status + .get("exitstatus") + .and_then(|v| v.as_str()) + .unwrap_or("unknown"); + if exit == "OK" { + println!("Task finished: OK"); + Ok(()) + } else { + anyhow::bail!("worker task ended with: {exit}"); + } +} + +#[api( + input: { + properties: { + digest: { + schema: PROXMOX_CONFIG_DIGEST_SCHEMA, + optional: true, + }, + }, + }, +)] +/// Clear every pending assignment in one bulk transaction. +async fn clear_pending(digest: Option) -> Result<(), Error> { + let digest = digest.map(ConfigDigest::from); + let cleared = client()?.subscription_clear_pending(digest).await?; + if cleared == 0 { + println!("No pending assignments to clear."); + } else { + println!("Cleared {cleared} pending assignment(s)."); + } + Ok(()) +} diff --git a/lib/pdm-client/src/lib.rs b/lib/pdm-client/src/lib.rs index cb5bb043..1fed0e85 100644 --- a/lib/pdm-client/src/lib.rs +++ b/lib/pdm-client/src/lib.rs @@ -76,7 +76,10 @@ pub mod types { pub use pve_api_types::StorageStatus as PveStorageStatus; - pub use pdm_api_types::subscription::{RemoteSubscriptionState, RemoteSubscriptions}; + pub use pdm_api_types::subscription::{ + AutoAssignProposal, ClearPendingResult, ProductType, ProposedAssignment, RemoteNodeStatus, + RemoteSubscriptionState, RemoteSubscriptions, SubscriptionKeyEntry, SubscriptionKeySource, + }; pub use pve_api_types::{SdnVnetMacVrf, SdnZoneIpVrf}; } @@ -898,9 +901,6 @@ impl PdmClient { /// server-side wait surface lands this method becomes a single GET with no behaviour change /// for callers. /// - /// No built-in time bound; wrap in `tokio::time::timeout` if needed. Dropping the future - /// stops the client-side polling only - the server-side worker keeps running. - /// /// Native-only: the polling loop relies on `tokio::time::sleep`, which is not available on /// the wasm32 target the UI builds for. #[cfg(not(target_arch = "wasm32"))] @@ -1119,6 +1119,177 @@ impl PdmClient { Ok(self.0.get(&path).await?.expect_json()?.data) } + /// List all keys in the subscription pool. Returns the entries plus the matching + /// `ConfigDigest` so the caller can chain a digest-aware add / assign / delete back. + pub async fn list_subscription_keys( + &self, + ) -> Result<(Vec, Option), Error> { + let mut res = self + .0 + .get("/api2/extjs/subscriptions/keys") + .await? + .expect_json()?; + Ok((res.data, res.attribs.remove("digest").map(ConfigDigest))) + } + + /// Add one or more keys to the pool. See the daemon-side endpoint for the all-or-nothing + /// validation semantics. + pub async fn add_subscription_keys( + &self, + keys: &[String], + digest: Option, + ) -> Result<(), Error> { + #[derive(Serialize)] + struct AddArgs<'a> { + keys: &'a [String], + #[serde(skip_serializing_if = "Option::is_none")] + digest: Option, + } + self.0 + .post("/api2/extjs/subscriptions/keys", &AddArgs { keys, digest }) + .await? + .nodata() + } + + /// Bind a key to a remote node. + pub async fn set_subscription_assignment( + &self, + key: &str, + remote: &str, + node: &str, + digest: Option, + ) -> Result<(), Error> { + #[derive(Serialize)] + struct AssignArgs<'a> { + remote: &'a str, + node: &'a str, + #[serde(skip_serializing_if = "Option::is_none")] + digest: Option, + } + let path = format!("/api2/extjs/subscriptions/keys/{key}/assignment"); + self.0 + .post( + &path, + &AssignArgs { + remote, + node, + digest, + }, + ) + .await? + .nodata() + } + + /// Drop the remote-node binding for a pool key (the inverse of + /// [`set_subscription_assignment`]). + pub async fn clear_subscription_assignment( + &self, + key: &str, + digest: Option, + ) -> Result<(), Error> { + let path = ApiPathBuilder::new(format!( + "/api2/extjs/subscriptions/keys/{key}/assignment" + )) + .maybe_arg("digest", &digest.map(Value::from)) + .build(); + self.0.delete(&path).await?.nodata() + } + + /// Remove a key from the pool entirely. + /// + /// No digest parameter: deletion is a point-of-no-return operation and the typed-client + /// surface elsewhere (delete_remote, delete_user, ...) does not round-trip a digest on + /// DELETE either. External REST callers can still pass `digest` via the URL query if they + /// want optimistic concurrency on deletion; the server-side endpoint accepts it. + pub async fn delete_subscription_key(&self, key: &str) -> Result<(), Error> { + let path = format!("/api2/extjs/subscriptions/keys/{key}"); + self.0.delete(&path).await?.nodata() + } + + /// Combined remote/node subscription status, filtered to remotes the caller has audit + /// privilege on. + pub async fn subscription_node_status( + &self, + max_age: Option, + ) -> Result, Error> { + let path = ApiPathBuilder::new("/api2/extjs/subscriptions/node-status") + .maybe_arg("max-age", &max_age) + .build(); + Ok(self.0.get(&path).await?.expect_json()?.data) + } + + /// Compute a key-to-node assignment proposal. Apply it with + /// [`subscription_bulk_assign`]. + pub async fn subscription_auto_assign(&self) -> Result { + Ok(self + .0 + .post("/api2/extjs/subscriptions/auto-assign", &json!({})) + .await? + .expect_json()? + .data) + } + + /// Commit a proposal previously returned by [`subscription_auto_assign`]. The server + /// rejects the call with 409 if either the pool or the live node-status has drifted + /// since the proposal was computed. + pub async fn subscription_bulk_assign( + &self, + proposal: AutoAssignProposal, + ) -> Result, Error> { + Ok(self + .0 + .post( + "/api2/extjs/subscriptions/bulk-assign", + &json!({ "proposal": proposal }), + ) + .await? + .expect_json()? + .data) + } + + /// Push every pending assignment. Returns the worker UPID, or `None` when there is nothing + /// to do. + /// + /// The optional `digest` rejects the call at the API boundary if the pool changed since the + /// caller last loaded it - the at-API-call-time plan is pinned, but the worker re-reads when + /// it fires, so a parallel admin edit between API return and worker start is still honoured. + pub async fn subscription_apply_pending( + &self, + digest: Option, + ) -> Result, Error> { + #[derive(Serialize)] + struct Args { + #[serde(skip_serializing_if = "Option::is_none")] + digest: Option, + } + Ok(self + .0 + .post("/api2/extjs/subscriptions/apply-pending", &Args { digest }) + .await? + .expect_json()? + .data) + } + + /// Clear every pending assignment in one bulk transaction; returns the count of cleared + /// entries. + pub async fn subscription_clear_pending( + &self, + digest: Option, + ) -> Result { + #[derive(Serialize)] + struct Args { + #[serde(skip_serializing_if = "Option::is_none")] + digest: Option, + } + let result: types::ClearPendingResult = self + .0 + .post("/api2/extjs/subscriptions/clear-pending", &Args { digest }) + .await? + .expect_json()? + .data; + Ok(result.cleared) + } + pub async fn pve_list_networks( &self, remote: &str, -- 2.47.3