From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [IPv6:2a01:7e0:0:424::9]) by lore.proxmox.com (Postfix) with ESMTPS id 532081FF13C for ; Thu, 02 Apr 2026 00:53:36 +0200 (CEST) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id 4CAB02DB9; Thu, 2 Apr 2026 00:54:05 +0200 (CEST) From: Thomas Lamprecht To: pbs-devel@lists.proxmox.com Subject: [PATCH v3 2/5] client: repository: add individual component parameters Date: Thu, 2 Apr 2026 00:48:58 +0200 Message-ID: <20260401225305.4069441-3-t.lamprecht@proxmox.com> X-Mailer: git-send-email 2.47.3 In-Reply-To: <20260401225305.4069441-1-t.lamprecht@proxmox.com> References: <20260401225305.4069441-1-t.lamprecht@proxmox.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Bm-Milter-Handled: 55990f41-d878-4baa-be0a-ee34c49e34d2 X-Bm-Transport-Timestamp: 1775083947883 X-SPAM-LEVEL: Spam detection results: 0 AWL 0.001 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record Message-ID-Hash: MZUWSKOLSN5DOET7VILIVQDOE7O62AZI X-Message-ID-Hash: MZUWSKOLSN5DOET7VILIVQDOE7O62AZI X-MailFrom: t.lamprecht@proxmox.com X-Mailman-Rule-Misses: dmarc-mitigation; no-senders; approved; loop; banned-address; emergency; member-moderation; nonmember-moderation; administrivia; implicit-dest; max-recipients; max-size; news-moderation; no-subject; digests; suspicious-header X-Mailman-Version: 3.3.10 Precedence: list List-Id: Proxmox Backup Server development discussion List-Help: List-Owner: List-Post: List-Subscribe: List-Unsubscribe: The compact repository URL format ([[auth-id@]server[:port]:]datastore) can be cumbersome to work with when changing a single aspect of the connection or when using API tokens. Add --server, --port, --datastore, --auth-id, and --ns as separate CLI parameters alongside the existing compound --repository URL. A conversion resolves either form into a BackupRepository, enforcing mutual exclusion between the two. CLI atom options merge with the corresponding PBS_SERVER, PBS_PORT, PBS_DATASTORE, PBS_AUTH_ID environment variables per-field (CLI wins), following the common convention where CLI flags override their corresponding environment variable defaults. If no CLI args are given, PBS_REPOSITORY takes precedence over the atom env vars. No command-level changes yet; the struct and extraction logic are introduced here so that the command migration can be a separate mechanical change. Signed-off-by: Thomas Lamprecht --- changes v2 -> v3: - deduplicated resolution cascade into shared helper. - split into BackupRepositoryArgs (repo only) + BackupTargetArgs (wraps repo args + ns), removing ns: None from resolution code. pbs-client/src/backup_repo.rs | 242 ++++++++++++++++++++++++++++- pbs-client/src/tools/mod.rs | 277 +++++++++++++++++++++++++++++----- 2 files changed, 482 insertions(+), 37 deletions(-) diff --git a/pbs-client/src/backup_repo.rs b/pbs-client/src/backup_repo.rs index 45c859d67..c899dc277 100644 --- a/pbs-client/src/backup_repo.rs +++ b/pbs-client/src/backup_repo.rs @@ -1,8 +1,156 @@ use std::fmt; -use anyhow::{format_err, Error}; +use anyhow::{bail, format_err, Error}; +use serde::{Deserialize, Serialize}; -use pbs_api_types::{Authid, Userid, BACKUP_REPO_URL_REGEX, IP_V6_REGEX}; +use proxmox_schema::*; + +use pbs_api_types::{ + Authid, BackupNamespace, Userid, BACKUP_REPO_URL, BACKUP_REPO_URL_REGEX, DATASTORE_SCHEMA, + IP_V6_REGEX, +}; + +pub const REPO_URL_SCHEMA: Schema = + StringSchema::new("Repository URL: [[auth-id@]server[:port]:]datastore") + .format(&BACKUP_REPO_URL) + .max_length(256) + .schema(); + +pub const BACKUP_REPO_SERVER_SCHEMA: Schema = + StringSchema::new("Backup server address (hostname or IP). Default: localhost") + .format(&api_types::DNS_NAME_OR_IP_FORMAT) + .max_length(256) + .schema(); + +pub const BACKUP_REPO_PORT_SCHEMA: Schema = IntegerSchema::new("Backup server port. Default: 8007") + .minimum(1) + .maximum(65535) + .default(8007) + .schema(); + +#[api( + properties: { + repository: { + schema: REPO_URL_SCHEMA, + optional: true, + }, + server: { + schema: BACKUP_REPO_SERVER_SCHEMA, + optional: true, + }, + port: { + schema: BACKUP_REPO_PORT_SCHEMA, + optional: true, + }, + datastore: { + schema: DATASTORE_SCHEMA, + optional: true, + }, + "auth-id": { + type: Authid, + optional: true, + }, + }, +)] +#[derive(Default, Serialize, Deserialize)] +#[serde(rename_all = "kebab-case")] +/// Backup repository location, specified either as a repository URL or as individual +/// components (server, port, datastore, auth-id). +pub struct BackupRepositoryArgs { + #[serde(skip_serializing_if = "Option::is_none")] + pub repository: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub server: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub port: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub datastore: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub auth_id: Option, +} + +#[api( + properties: { + target: { + type: BackupRepositoryArgs, + flatten: true, + }, + ns: { + type: BackupNamespace, + optional: true, + }, + }, +)] +#[derive(Default, Serialize, Deserialize)] +#[serde(rename_all = "kebab-case")] +/// Backup target for CLI commands, combining the repository location with an +/// optional namespace. +pub struct BackupTargetArgs { + #[serde(flatten)] + pub target: BackupRepositoryArgs, + #[serde(skip_serializing_if = "Option::is_none")] + pub ns: Option, +} + +impl BackupRepositoryArgs { + /// Returns `true` if any atom parameter (server, port, datastore, or auth-id) is set. + pub fn has_atoms(&self) -> bool { + self.server.is_some() + || self.port.is_some() + || self.datastore.is_some() + || self.auth_id.is_some() + } + + /// Merge `self` with `fallback`, using values from `self` where present + /// and filling in from `fallback` for fields that are `None`. + pub fn merge_from(self, fallback: BackupRepositoryArgs) -> Self { + Self { + repository: self.repository.or(fallback.repository), + server: self.server.or(fallback.server), + port: self.port.or(fallback.port), + datastore: self.datastore.or(fallback.datastore), + auth_id: self.auth_id.or(fallback.auth_id), + } + } +} + +impl TryFrom for BackupRepository { + type Error = anyhow::Error; + + /// Convert explicit CLI arguments into a [`BackupRepository`]. + /// + /// * If `repository` and any atom are both set, returns an error. + /// * If atoms are present, builds the repository from them (requires `datastore`). + /// * If only `repository` is set, parses the repo URL. + /// * If nothing is set, returns an error - callers must fall back to environment variables / + /// credentials themselves. + fn try_from(args: BackupRepositoryArgs) -> Result { + let has_url = args.repository.is_some(); + let has_atoms = args.has_atoms(); + + if has_url && has_atoms { + bail!("--repository and --server/--port/--datastore/--auth-id are mutually exclusive"); + } + + if has_atoms { + let store = args.datastore.ok_or_else(|| { + format_err!("--datastore is required when not using --repository") + })?; + return Ok(BackupRepository::new( + args.auth_id, + args.server, + args.port, + store, + )); + } + + if let Some(url) = args.repository { + return url.parse(); + } + + anyhow::bail!("no repository specified") + } +} /// Reference remote backup locations /// @@ -193,4 +341,94 @@ mod tests { let repo = BackupRepository::new(None, Some("[ff80::1]".into()), None, "s".into()); assert_eq!(repo.host(), "[ff80::1]"); } + + #[test] + fn has_atoms() { + assert!(!BackupRepositoryArgs::default().has_atoms()); + + let with_server = BackupRepositoryArgs { + server: Some("host".into()), + ..Default::default() + }; + assert!(with_server.has_atoms()); + + let repo_only = BackupRepositoryArgs { + repository: Some("myhost:mystore".into()), + ..Default::default() + }; + assert!(!repo_only.has_atoms()); + } + + #[test] + fn try_from_atoms_only() { + let args = BackupRepositoryArgs { + server: Some("pbs.local".into()), + port: Some(9000), + datastore: Some("tank".into()), + auth_id: Some("backup@pam".parse().unwrap()), + ..Default::default() + }; + let repo = BackupRepository::try_from(args).unwrap(); + assert_eq!(repo.host(), "pbs.local"); + assert_eq!(repo.port(), 9000); + assert_eq!(repo.store(), "tank"); + assert_eq!(repo.auth_id().to_string(), "backup@pam"); + } + + #[test] + fn try_from_atoms_datastore_only() { + let args = BackupRepositoryArgs { + datastore: Some("local".into()), + ..Default::default() + }; + let repo = BackupRepository::try_from(args).unwrap(); + assert_eq!(repo.store(), "local"); + assert_eq!(repo.host(), "localhost"); + assert_eq!(repo.port(), 8007); + } + + #[test] + fn try_from_url_only() { + let args = BackupRepositoryArgs { + repository: Some("admin@pam@backuphost:8008:mystore".into()), + ..Default::default() + }; + let repo = BackupRepository::try_from(args).unwrap(); + assert_eq!(repo.host(), "backuphost"); + assert_eq!(repo.port(), 8008); + assert_eq!(repo.store(), "mystore"); + } + + #[test] + fn try_from_mutual_exclusion_error() { + let args = BackupRepositoryArgs { + repository: Some("somehost:mystore".into()), + server: Some("otherhost".into()), + ..Default::default() + }; + let err = BackupRepository::try_from(args).unwrap_err(); + assert!(err.to_string().contains("mutually exclusive"), "got: {err}"); + } + + #[test] + fn try_from_nothing_set_error() { + let err = BackupRepository::try_from(BackupRepositoryArgs::default()).unwrap_err(); + assert!( + err.to_string().contains("no repository specified"), + "got: {err}" + ); + } + + #[test] + fn try_from_atoms_without_datastore_error() { + let args = BackupRepositoryArgs { + server: Some("pbs.local".into()), + ..Default::default() + }; + let err = BackupRepository::try_from(args).unwrap_err(); + assert!( + err.to_string().contains("--datastore is required"), + "got: {err}" + ); + } } diff --git a/pbs-client/src/tools/mod.rs b/pbs-client/src/tools/mod.rs index 7a496d14c..32c55ee1b 100644 --- a/pbs-client/src/tools/mod.rs +++ b/pbs-client/src/tools/mod.rs @@ -17,12 +17,14 @@ use proxmox_router::cli::{complete_file_name, shellword_split}; use proxmox_schema::*; use proxmox_sys::fs::file_get_json; -use pbs_api_types::{ - Authid, BackupArchiveName, BackupNamespace, RateLimitConfig, UserWithTokens, BACKUP_REPO_URL, -}; +use pbs_api_types::{Authid, BackupArchiveName, BackupNamespace, RateLimitConfig, UserWithTokens}; use pbs_datastore::BackupManifest; -use crate::{BackupRepository, HttpClient, HttpClientOptions}; +use crate::{BackupRepository, BackupRepositoryArgs, HttpClient, HttpClientOptions}; + +// Re-export for backward compatibility; the canonical definition is now in backup_repo alongside +// BackupRepositoryArgs. +pub use crate::REPO_URL_SCHEMA; pub mod key_source; @@ -30,6 +32,10 @@ const ENV_VAR_PBS_FINGERPRINT: &str = "PBS_FINGERPRINT"; const ENV_VAR_PBS_PASSWORD: &str = "PBS_PASSWORD"; const ENV_VAR_PBS_ENCRYPTION_PASSWORD: &str = "PBS_ENCRYPTION_PASSWORD"; const ENV_VAR_PBS_REPOSITORY: &str = "PBS_REPOSITORY"; +const ENV_VAR_PBS_SERVER: &str = "PBS_SERVER"; +const ENV_VAR_PBS_PORT: &str = "PBS_PORT"; +const ENV_VAR_PBS_DATASTORE: &str = "PBS_DATASTORE"; +const ENV_VAR_PBS_AUTH_ID: &str = "PBS_AUTH_ID"; /// Directory with system [credential]s. See systemd-creds(1). /// @@ -44,11 +50,6 @@ const CRED_PBS_REPOSITORY: &str = "proxmox-backup-client.repository"; /// Credential name of the the fingerprint. const CRED_PBS_FINGERPRINT: &str = "proxmox-backup-client.fingerprint"; -pub const REPO_URL_SCHEMA: Schema = StringSchema::new("Repository URL.") - .format(&BACKUP_REPO_URL) - .max_length(256) - .schema(); - pub const CHUNK_SIZE_SCHEMA: Schema = IntegerSchema::new("Chunk size in KB. Must be a power of 2.") .minimum(64) .maximum(4096) @@ -233,41 +234,110 @@ pub fn get_fingerprint() -> Option { .unwrap_or_default() } +/// Build [`BackupRepositoryArgs`] from the fields in a JSON Value. +fn args_from_value(param: &Value) -> BackupRepositoryArgs { + BackupRepositoryArgs { + repository: param["repository"].as_str().map(String::from), + server: param["server"].as_str().map(String::from), + port: param["port"].as_u64().map(|p| p as u16), + datastore: param["datastore"].as_str().map(String::from), + auth_id: param["auth-id"] + .as_str() + .and_then(|s| s.parse::().ok()), + } +} + +/// Build [`BackupRepositoryArgs`] from `PBS_*` environment variables. +fn args_from_env() -> BackupRepositoryArgs { + BackupRepositoryArgs { + repository: None, + server: std::env::var(ENV_VAR_PBS_SERVER).ok(), + port: std::env::var(ENV_VAR_PBS_PORT) + .ok() + .and_then(|p| p.parse::().ok()), + datastore: std::env::var(ENV_VAR_PBS_DATASTORE).ok(), + auth_id: std::env::var(ENV_VAR_PBS_AUTH_ID) + .ok() + .and_then(|s| s.parse::().ok()), + } +} + +/// Resolve a [`BackupRepository`] from explicit CLI arguments with environment variable fallback. +/// +/// Resolution: +/// - `--repository` and CLI atoms are mutually exclusive. +/// - `--repository` alone is used as-is (env vars ignored). +/// - CLI atoms are merged with `PBS_*` env atom vars per-field (CLI wins). +/// - If no CLI args are given, falls back to `PBS_REPOSITORY`, then to +/// `PBS_*` atom env vars, then errors. +fn resolve_repository(cli: BackupRepositoryArgs) -> Result { + if cli.repository.is_some() && cli.has_atoms() { + bail!("--repository and --server/--port/--datastore/--auth-id are mutually exclusive"); + } + if cli.repository.is_some() { + return BackupRepository::try_from(cli); + } + if cli.has_atoms() { + let env = args_from_env(); + return BackupRepository::try_from(cli.merge_from(env)); + } + + // No CLI args at all, try environment. + if let Some(url) = get_default_repository() { + return url.parse(); + } + let env = args_from_env(); + if env.has_atoms() { + return BackupRepository::try_from(env); + } + bail!("unable to get (default) repository"); +} + +/// Remove repository-related keys from a JSON Value and return the parsed [`BackupRepository`]. +/// +/// This is used by commands that forward the remaining parameters to the server API after stripping +/// the repository fields. pub fn remove_repository_from_value(param: &mut Value) -> Result { - if let Some(url) = param + let map = param .as_object_mut() - .ok_or_else(|| format_err!("unable to get repository (parameter is not an object)"))? - .remove("repository") - { - return url - .as_str() - .ok_or_else(|| format_err!("invalid repository value (must be a string)"))? - .parse(); - } + .ok_or_else(|| format_err!("unable to get repository (parameter is not an object)"))?; - get_default_repository() - .ok_or_else(|| format_err!("unable to get default repository"))? - .parse() + let to_string = |v: Value| v.as_str().map(String::from); + + let args = BackupRepositoryArgs { + repository: map.remove("repository").and_then(to_string), + server: map.remove("server").and_then(to_string), + port: map + .remove("port") + .and_then(|v| v.as_u64()) + .map(|p| p as u16), + datastore: map.remove("datastore").and_then(to_string), + auth_id: map + .remove("auth-id") + .and_then(to_string) + .map(|s| s.parse::()) + .transpose()?, + }; + + resolve_repository(args) } +/// Extract a [`BackupRepository`] from CLI parameters. pub fn extract_repository_from_value(param: &Value) -> Result { - let repo_url = param["repository"] - .as_str() - .map(String::from) - .or_else(get_default_repository) - .ok_or_else(|| format_err!("unable to get (default) repository"))?; - - let repo: BackupRepository = repo_url.parse()?; - - Ok(repo) + resolve_repository(args_from_value(param)) } +/// Extract a [`BackupRepository`] from a parameter map (used for shell completion callbacks). pub fn extract_repository_from_map(param: &HashMap) -> Option { - param - .get("repository") - .map(String::from) - .or_else(get_default_repository) - .and_then(|repo_url| repo_url.parse::().ok()) + let cli = BackupRepositoryArgs { + repository: param.get("repository").cloned(), + server: param.get("server").cloned(), + port: param.get("port").and_then(|p| p.parse().ok()), + datastore: param.get("datastore").cloned(), + auth_id: param.get("auth-id").and_then(|s| s.parse().ok()), + }; + + resolve_repository(cli).ok() } pub fn connect(repo: &BackupRepository) -> Result { @@ -757,3 +827,140 @@ pub fn create_tmp_file() -> std::io::Result { } }) } + +#[cfg(test)] +mod tests { + use super::*; + use serde_json::json; + + static ENV_MUTEX: std::sync::Mutex<()> = std::sync::Mutex::new(()); + + const REPO_ENV_VARS: &[&str] = &[ + ENV_VAR_PBS_REPOSITORY, + ENV_VAR_PBS_SERVER, + ENV_VAR_PBS_PORT, + ENV_VAR_PBS_DATASTORE, + ENV_VAR_PBS_AUTH_ID, + ENV_VAR_CREDENTIALS_DIRECTORY, + ]; + + fn with_cleared_repo_env(f: impl FnOnce()) { + let _guard = ENV_MUTEX.lock().unwrap(); + for k in REPO_ENV_VARS { + std::env::remove_var(k); + } + f(); + for k in REPO_ENV_VARS { + std::env::remove_var(k); + } + } + + #[test] + fn extract_repo_from_atoms() { + with_cleared_repo_env(|| { + let param = json!({"server": "myhost", "datastore": "mystore"}); + let repo = extract_repository_from_value(¶m).unwrap(); + assert_eq!(repo.host(), "myhost"); + assert_eq!(repo.store(), "mystore"); + assert_eq!(repo.port(), 8007); + }); + } + + #[test] + fn extract_repo_from_url() { + with_cleared_repo_env(|| { + let param = json!({"repository": "myhost:mystore"}); + let repo = extract_repository_from_value(¶m).unwrap(); + assert_eq!(repo.host(), "myhost"); + assert_eq!(repo.store(), "mystore"); + }); + } + + #[test] + fn extract_repo_mutual_exclusion_error() { + with_cleared_repo_env(|| { + let param = json!({"repository": "myhost:mystore", "auth-id": "user@pam"}); + let err = extract_repository_from_value(¶m).unwrap_err(); + assert!(err.to_string().contains("mutually exclusive"), "got: {err}"); + }); + } + + #[test] + fn extract_repo_atoms_without_datastore_error() { + with_cleared_repo_env(|| { + let param = json!({"server": "myhost"}); + let err = extract_repository_from_value(¶m).unwrap_err(); + assert!( + err.to_string().contains("--datastore is required"), + "got: {err}" + ); + }); + } + + #[test] + fn extract_repo_nothing_provided_error() { + with_cleared_repo_env(|| { + let err = extract_repository_from_value(&json!({})).unwrap_err(); + assert!(err.to_string().contains("unable to get"), "got: {err}"); + }); + } + + #[test] + fn extract_repo_env_fallback() { + with_cleared_repo_env(|| { + std::env::set_var(ENV_VAR_PBS_SERVER, "envhost"); + std::env::set_var(ENV_VAR_PBS_DATASTORE, "envstore"); + let repo = extract_repository_from_value(&json!({})).unwrap(); + assert_eq!(repo.host(), "envhost"); + assert_eq!(repo.store(), "envstore"); + }); + } + + #[test] + fn extract_repo_pbs_repository_env_takes_precedence() { + with_cleared_repo_env(|| { + std::env::set_var(ENV_VAR_PBS_REPOSITORY, "repohost:repostore"); + std::env::set_var(ENV_VAR_PBS_SERVER, "envhost"); + std::env::set_var(ENV_VAR_PBS_DATASTORE, "envstore"); + let repo = extract_repository_from_value(&json!({})).unwrap(); + assert_eq!(repo.host(), "repohost"); + assert_eq!(repo.store(), "repostore"); + }); + } + + #[test] + fn extract_repo_cli_overrides_env() { + with_cleared_repo_env(|| { + std::env::set_var(ENV_VAR_PBS_REPOSITORY, "envhost:envstore"); + let param = json!({"server": "clihost", "datastore": "clistore"}); + let repo = extract_repository_from_value(¶m).unwrap(); + assert_eq!(repo.host(), "clihost"); + assert_eq!(repo.store(), "clistore"); + }); + } + + #[test] + fn extract_repo_cli_atoms_merge_with_env_atoms() { + with_cleared_repo_env(|| { + std::env::set_var(ENV_VAR_PBS_SERVER, "envhost"); + std::env::set_var(ENV_VAR_PBS_DATASTORE, "envstore"); + let param = json!({"auth-id": "backup@pbs"}); + let repo = extract_repository_from_value(¶m).unwrap(); + assert_eq!(repo.host(), "envhost"); + assert_eq!(repo.store(), "envstore"); + assert_eq!(repo.auth_id().to_string(), "backup@pbs"); + }); + } + + #[test] + fn extract_repo_cli_atom_overrides_same_env_atom() { + with_cleared_repo_env(|| { + std::env::set_var(ENV_VAR_PBS_SERVER, "envhost"); + std::env::set_var(ENV_VAR_PBS_DATASTORE, "envstore"); + let param = json!({"server": "clihost"}); + let repo = extract_repository_from_value(¶m).unwrap(); + assert_eq!(repo.host(), "clihost"); + assert_eq!(repo.store(), "envstore"); + }); + } +} -- 2.47.3