* [PATCH v2 2/5] client: repository: add individual component parameters
2026-03-30 18:20 [PATCH v2 0/5] client: repository: add individual component parameters Thomas Lamprecht
2026-03-30 18:20 ` [PATCH v2 1/5] client: repository: add tests for BackupRepository parsing Thomas Lamprecht
@ 2026-03-30 18:20 ` Thomas Lamprecht
2026-03-31 8:55 ` Thomas Lamprecht
2026-03-30 18:20 ` [PATCH v2 3/5] client: migrate commands to flattened repository args Thomas Lamprecht
` (3 subsequent siblings)
5 siblings, 1 reply; 8+ messages in thread
From: Thomas Lamprecht @ 2026-03-30 18:20 UTC (permalink / raw)
To: pbs-devel
The compact repository URL format ([[auth-id@]server[:port]:]datastore)
can be cumbersome to work with when changing a single aspect of the
connection or when using API tokens.
Add --server, --port, --datastore, --auth-id, and --ns as separate
CLI parameters alongside the existing compound --repository URL.
A conversion resolves either form into a BackupRepository, enforcing
mutual exclusion between the two.
CLI atom options merge with the corresponding PBS_SERVER, PBS_PORT,
PBS_DATASTORE, PBS_AUTH_ID environment variables per-field (CLI wins),
following the common convention where CLI flags override their
corresponding environment variable defaults. If no CLI args are given,
PBS_REPOSITORY takes precedence over the atom env vars.
No command-level changes yet; the struct and extraction logic are
introduced here so that the command migration can be a separate
mechanical change.
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
---
changes v1 -> v2:
- split out from the single v1 commit; now contains only the
pbs-client library code, no command-level changes
- includes ns in BackupRepositoryArgs (was a separate commit in v1)
- per-field merge of CLI atoms with PBS_* env vars instead of
treating them as mutually exclusive layers
- fixed error swallowing: mutual-exclusion errors now propagate
instead of falling through to env fallback
pbs-client/src/backup_repo.rs | 226 +++++++++++++++++++++++-
pbs-client/src/tools/mod.rs | 314 ++++++++++++++++++++++++++++++----
2 files changed, 504 insertions(+), 36 deletions(-)
diff --git a/pbs-client/src/backup_repo.rs b/pbs-client/src/backup_repo.rs
index 45c859d67..fff63054e 100644
--- a/pbs-client/src/backup_repo.rs
+++ b/pbs-client/src/backup_repo.rs
@@ -1,8 +1,140 @@
use std::fmt;
-use anyhow::{format_err, Error};
+use anyhow::{bail, format_err, Error};
+use serde::{Deserialize, Serialize};
-use pbs_api_types::{Authid, Userid, BACKUP_REPO_URL_REGEX, IP_V6_REGEX};
+use proxmox_schema::*;
+
+use pbs_api_types::{
+ Authid, BackupNamespace, Userid, BACKUP_REPO_URL, BACKUP_REPO_URL_REGEX, DATASTORE_SCHEMA,
+ IP_V6_REGEX,
+};
+
+pub const REPO_URL_SCHEMA: Schema =
+ StringSchema::new("Repository URL: [[auth-id@]server[:port]:]datastore")
+ .format(&BACKUP_REPO_URL)
+ .max_length(256)
+ .schema();
+
+pub const BACKUP_REPO_SERVER_SCHEMA: Schema =
+ StringSchema::new("Backup server address (hostname or IP). Default: localhost")
+ .format(&api_types::DNS_NAME_OR_IP_FORMAT)
+ .max_length(256)
+ .schema();
+
+pub const BACKUP_REPO_PORT_SCHEMA: Schema = IntegerSchema::new("Backup server port. Default: 8007")
+ .minimum(1)
+ .maximum(65535)
+ .default(8007)
+ .schema();
+
+#[api(
+ properties: {
+ repository: {
+ schema: REPO_URL_SCHEMA,
+ optional: true,
+ },
+ server: {
+ schema: BACKUP_REPO_SERVER_SCHEMA,
+ optional: true,
+ },
+ port: {
+ schema: BACKUP_REPO_PORT_SCHEMA,
+ optional: true,
+ },
+ datastore: {
+ schema: DATASTORE_SCHEMA,
+ optional: true,
+ },
+ "auth-id": {
+ type: Authid,
+ optional: true,
+ },
+ ns: {
+ type: BackupNamespace,
+ optional: true,
+ },
+ },
+)]
+#[derive(Default, Serialize, Deserialize)]
+#[serde(rename_all = "kebab-case")]
+/// Backup repository location, specified either as a repository URL or as individual components
+/// (server, port, datastore, auth-id), plus an optional backup namespace.
+pub struct BackupRepositoryArgs {
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub repository: Option<String>,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub server: Option<String>,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub port: Option<u16>,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub datastore: Option<String>,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub auth_id: Option<Authid>,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub ns: Option<BackupNamespace>,
+}
+
+impl BackupRepositoryArgs {
+ /// Returns `true` if any atom parameter (server, port, datastore, or auth-id) is set.
+ pub fn has_atoms(&self) -> bool {
+ self.server.is_some()
+ || self.port.is_some()
+ || self.datastore.is_some()
+ || self.auth_id.is_some()
+ }
+
+ /// Merge `self` with `fallback`, using values from `self` where present
+ /// and filling in from `fallback` for fields that are `None`.
+ pub fn merge_from(self, fallback: BackupRepositoryArgs) -> Self {
+ Self {
+ repository: self.repository.or(fallback.repository),
+ server: self.server.or(fallback.server),
+ port: self.port.or(fallback.port),
+ datastore: self.datastore.or(fallback.datastore),
+ auth_id: self.auth_id.or(fallback.auth_id),
+ ns: self.ns.or(fallback.ns),
+ }
+ }
+}
+
+impl TryFrom<BackupRepositoryArgs> for BackupRepository {
+ type Error = anyhow::Error;
+
+ /// Convert explicit CLI arguments into a [`BackupRepository`].
+ ///
+ /// * If `repository` and any atom are both set, returns an error.
+ /// * If atoms are present, builds the repository from them (requires `datastore`).
+ /// * If only `repository` is set, parses the repo URL.
+ /// * If nothing is set, returns an error - callers must fall back to environment variables /
+ /// credentials themselves.
+ fn try_from(args: BackupRepositoryArgs) -> Result<Self, Self::Error> {
+ let has_url = args.repository.is_some();
+ let has_atoms = args.has_atoms();
+
+ if has_url && has_atoms {
+ bail!("--repository and --server/--port/--datastore/--auth-id are mutually exclusive");
+ }
+
+ if has_atoms {
+ let store = args.datastore.ok_or_else(|| {
+ format_err!("--datastore is required when not using --repository")
+ })?;
+ return Ok(BackupRepository::new(
+ args.auth_id,
+ args.server,
+ args.port,
+ store,
+ ));
+ }
+
+ if let Some(url) = args.repository {
+ return url.parse();
+ }
+
+ anyhow::bail!("no repository specified")
+ }
+}
/// Reference remote backup locations
///
@@ -193,4 +325,94 @@ mod tests {
let repo = BackupRepository::new(None, Some("[ff80::1]".into()), None, "s".into());
assert_eq!(repo.host(), "[ff80::1]");
}
+
+ #[test]
+ fn has_atoms() {
+ assert!(!BackupRepositoryArgs::default().has_atoms());
+
+ let with_server = BackupRepositoryArgs {
+ server: Some("host".into()),
+ ..Default::default()
+ };
+ assert!(with_server.has_atoms());
+
+ let repo_only = BackupRepositoryArgs {
+ repository: Some("myhost:mystore".into()),
+ ..Default::default()
+ };
+ assert!(!repo_only.has_atoms());
+ }
+
+ #[test]
+ fn try_from_atoms_only() {
+ let args = BackupRepositoryArgs {
+ server: Some("pbs.local".into()),
+ port: Some(9000),
+ datastore: Some("tank".into()),
+ auth_id: Some("backup@pam".parse().unwrap()),
+ ..Default::default()
+ };
+ let repo = BackupRepository::try_from(args).unwrap();
+ assert_eq!(repo.host(), "pbs.local");
+ assert_eq!(repo.port(), 9000);
+ assert_eq!(repo.store(), "tank");
+ assert_eq!(repo.auth_id().to_string(), "backup@pam");
+ }
+
+ #[test]
+ fn try_from_atoms_datastore_only() {
+ let args = BackupRepositoryArgs {
+ datastore: Some("local".into()),
+ ..Default::default()
+ };
+ let repo = BackupRepository::try_from(args).unwrap();
+ assert_eq!(repo.store(), "local");
+ assert_eq!(repo.host(), "localhost");
+ assert_eq!(repo.port(), 8007);
+ }
+
+ #[test]
+ fn try_from_url_only() {
+ let args = BackupRepositoryArgs {
+ repository: Some("admin@pam@backuphost:8008:mystore".into()),
+ ..Default::default()
+ };
+ let repo = BackupRepository::try_from(args).unwrap();
+ assert_eq!(repo.host(), "backuphost");
+ assert_eq!(repo.port(), 8008);
+ assert_eq!(repo.store(), "mystore");
+ }
+
+ #[test]
+ fn try_from_mutual_exclusion_error() {
+ let args = BackupRepositoryArgs {
+ repository: Some("somehost:mystore".into()),
+ server: Some("otherhost".into()),
+ ..Default::default()
+ };
+ let err = BackupRepository::try_from(args).unwrap_err();
+ assert!(err.to_string().contains("mutually exclusive"), "got: {err}");
+ }
+
+ #[test]
+ fn try_from_nothing_set_error() {
+ let err = BackupRepository::try_from(BackupRepositoryArgs::default()).unwrap_err();
+ assert!(
+ err.to_string().contains("no repository specified"),
+ "got: {err}"
+ );
+ }
+
+ #[test]
+ fn try_from_atoms_without_datastore_error() {
+ let args = BackupRepositoryArgs {
+ server: Some("pbs.local".into()),
+ ..Default::default()
+ };
+ let err = BackupRepository::try_from(args).unwrap_err();
+ assert!(
+ err.to_string().contains("--datastore is required"),
+ "got: {err}"
+ );
+ }
}
diff --git a/pbs-client/src/tools/mod.rs b/pbs-client/src/tools/mod.rs
index 7a496d14c..859946079 100644
--- a/pbs-client/src/tools/mod.rs
+++ b/pbs-client/src/tools/mod.rs
@@ -17,12 +17,14 @@ use proxmox_router::cli::{complete_file_name, shellword_split};
use proxmox_schema::*;
use proxmox_sys::fs::file_get_json;
-use pbs_api_types::{
- Authid, BackupArchiveName, BackupNamespace, RateLimitConfig, UserWithTokens, BACKUP_REPO_URL,
-};
+use pbs_api_types::{Authid, BackupArchiveName, BackupNamespace, RateLimitConfig, UserWithTokens};
use pbs_datastore::BackupManifest;
-use crate::{BackupRepository, HttpClient, HttpClientOptions};
+use crate::{BackupRepository, BackupRepositoryArgs, HttpClient, HttpClientOptions};
+
+// Re-export for backward compatibility; the canonical definition is now in backup_repo alongside
+// BackupRepositoryArgs.
+pub use crate::REPO_URL_SCHEMA;
pub mod key_source;
@@ -30,6 +32,10 @@ const ENV_VAR_PBS_FINGERPRINT: &str = "PBS_FINGERPRINT";
const ENV_VAR_PBS_PASSWORD: &str = "PBS_PASSWORD";
const ENV_VAR_PBS_ENCRYPTION_PASSWORD: &str = "PBS_ENCRYPTION_PASSWORD";
const ENV_VAR_PBS_REPOSITORY: &str = "PBS_REPOSITORY";
+const ENV_VAR_PBS_SERVER: &str = "PBS_SERVER";
+const ENV_VAR_PBS_PORT: &str = "PBS_PORT";
+const ENV_VAR_PBS_DATASTORE: &str = "PBS_DATASTORE";
+const ENV_VAR_PBS_AUTH_ID: &str = "PBS_AUTH_ID";
/// Directory with system [credential]s. See systemd-creds(1).
///
@@ -44,11 +50,6 @@ const CRED_PBS_REPOSITORY: &str = "proxmox-backup-client.repository";
/// Credential name of the the fingerprint.
const CRED_PBS_FINGERPRINT: &str = "proxmox-backup-client.fingerprint";
-pub const REPO_URL_SCHEMA: Schema = StringSchema::new("Repository URL.")
- .format(&BACKUP_REPO_URL)
- .max_length(256)
- .schema();
-
pub const CHUNK_SIZE_SCHEMA: Schema = IntegerSchema::new("Chunk size in KB. Must be a power of 2.")
.minimum(64)
.maximum(4096)
@@ -233,41 +234,149 @@ pub fn get_fingerprint() -> Option<String> {
.unwrap_or_default()
}
+/// Build [`BackupRepositoryArgs`] from the fields in a JSON Value.
+fn args_from_value(param: &Value) -> BackupRepositoryArgs {
+ BackupRepositoryArgs {
+ repository: param["repository"].as_str().map(String::from),
+ server: param["server"].as_str().map(String::from),
+ port: param["port"].as_u64().map(|p| p as u16),
+ datastore: param["datastore"].as_str().map(String::from),
+ auth_id: param["auth-id"]
+ .as_str()
+ .and_then(|s| s.parse::<Authid>().ok()),
+ ns: None, // namespace is not part of repository resolution
+ }
+}
+
+/// Build [`BackupRepositoryArgs`] from `PBS_*` environment variables.
+fn args_from_env() -> BackupRepositoryArgs {
+ BackupRepositoryArgs {
+ repository: None,
+ server: std::env::var(ENV_VAR_PBS_SERVER).ok(),
+ port: std::env::var(ENV_VAR_PBS_PORT)
+ .ok()
+ .and_then(|p| p.parse::<u16>().ok()),
+ datastore: std::env::var(ENV_VAR_PBS_DATASTORE).ok(),
+ auth_id: std::env::var(ENV_VAR_PBS_AUTH_ID)
+ .ok()
+ .and_then(|s| s.parse::<Authid>().ok()),
+ ns: None,
+ }
+}
+
+/// Remove repository-related keys from a JSON Value and return the parsed [`BackupRepository`].
+///
+/// This is used by commands that forward the remaining parameters to the server API after stripping
+/// the repository fields.
pub fn remove_repository_from_value(param: &mut Value) -> Result<BackupRepository, Error> {
- if let Some(url) = param
+ let map = param
.as_object_mut()
- .ok_or_else(|| format_err!("unable to get repository (parameter is not an object)"))?
- .remove("repository")
- {
- return url
- .as_str()
- .ok_or_else(|| format_err!("invalid repository value (must be a string)"))?
- .parse();
- }
-
- get_default_repository()
- .ok_or_else(|| format_err!("unable to get default repository"))?
- .parse()
+ .ok_or_else(|| format_err!("unable to get repository (parameter is not an object)"))?;
+
+ let to_string = |v: Value| v.as_str().map(String::from);
+
+ let args = BackupRepositoryArgs {
+ repository: map.remove("repository").and_then(to_string),
+ server: map.remove("server").and_then(to_string),
+ port: map
+ .remove("port")
+ .and_then(|v| v.as_u64())
+ .map(|p| p as u16),
+ datastore: map.remove("datastore").and_then(to_string),
+ auth_id: map
+ .remove("auth-id")
+ .and_then(to_string)
+ .map(|s| s.parse::<Authid>())
+ .transpose()?,
+ ns: None, // keep ns in the Value for the API call
+ };
+
+ if args.repository.is_some() && args.has_atoms() {
+ bail!("--repository and --server/--port/--datastore/--auth-id are mutually exclusive");
+ }
+ if args.repository.is_some() {
+ return BackupRepository::try_from(args);
+ }
+ if args.has_atoms() {
+ let env = args_from_env();
+ return BackupRepository::try_from(args.merge_from(env));
+ }
+
+ if let Some(url) = get_default_repository() {
+ return url.parse();
+ }
+ let env = args_from_env();
+ if env.has_atoms() {
+ return BackupRepository::try_from(env);
+ }
+ bail!("repository not passed via CLI options and unable to get (default) repository from environment");
}
+/// Extract a [`BackupRepository`] from CLI parameters.
+///
+/// Resolution:
+/// - `--repository` and CLI atoms are mutually exclusive.
+/// - `--repository` alone is used as-is (env vars ignored).
+/// - CLI atoms are merged with `PBS_*` env atom vars per-field (CLI wins).
+/// - If no CLI args are given, falls back to `PBS_REPOSITORY`, then to `PBS_*` atom env vars, then
+/// errors.
pub fn extract_repository_from_value(param: &Value) -> Result<BackupRepository, Error> {
- let repo_url = param["repository"]
- .as_str()
- .map(String::from)
- .or_else(get_default_repository)
- .ok_or_else(|| format_err!("unable to get (default) repository"))?;
+ let cli = args_from_value(param);
- let repo: BackupRepository = repo_url.parse()?;
+ if cli.repository.is_some() && cli.has_atoms() {
+ bail!("--repository and --server/--port/--datastore/--auth-id are mutually exclusive");
+ }
+ if cli.repository.is_some() {
+ return BackupRepository::try_from(cli);
+ }
+ if cli.has_atoms() {
+ let env = args_from_env();
+ return BackupRepository::try_from(cli.merge_from(env));
+ }
- Ok(repo)
+ // No CLI args at all, try environment.
+ if let Some(url) = get_default_repository() {
+ return url.parse();
+ }
+ let env = args_from_env();
+ if env.has_atoms() {
+ return BackupRepository::try_from(env);
+ }
+ bail!("unable to get (default) repository");
}
+/// Extract a [`BackupRepository`] from a parameter map (used for shell completion callbacks).
pub fn extract_repository_from_map(param: &HashMap<String, String>) -> Option<BackupRepository> {
- param
- .get("repository")
- .map(String::from)
- .or_else(get_default_repository)
- .and_then(|repo_url| repo_url.parse::<BackupRepository>().ok())
+ let cli = BackupRepositoryArgs {
+ repository: param.get("repository").cloned(),
+ server: param.get("server").cloned(),
+ port: param.get("port").and_then(|p| p.parse().ok()),
+ datastore: param.get("datastore").cloned(),
+ auth_id: param.get("auth-id").and_then(|s| s.parse().ok()),
+ ns: None,
+ };
+
+ if cli.repository.is_some() {
+ return BackupRepository::try_from(cli).ok();
+ }
+ if cli.has_atoms() {
+ let env = args_from_env();
+ return BackupRepository::try_from(cli.merge_from(env)).ok();
+ }
+
+ // Fall back to environment: compound URL, then atoms.
+ if let Some(url) = get_default_repository() {
+ if let Ok(repo) = url.parse() {
+ return Some(repo);
+ }
+ }
+
+ let env = args_from_env();
+ if env.has_atoms() {
+ return BackupRepository::try_from(env).ok();
+ }
+
+ None
}
pub fn connect(repo: &BackupRepository) -> Result<HttpClient, Error> {
@@ -757,3 +866,140 @@ pub fn create_tmp_file() -> std::io::Result<std::fs::File> {
}
})
}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use serde_json::json;
+
+ static ENV_MUTEX: std::sync::Mutex<()> = std::sync::Mutex::new(());
+
+ const REPO_ENV_VARS: &[&str] = &[
+ ENV_VAR_PBS_REPOSITORY,
+ ENV_VAR_PBS_SERVER,
+ ENV_VAR_PBS_PORT,
+ ENV_VAR_PBS_DATASTORE,
+ ENV_VAR_PBS_AUTH_ID,
+ ENV_VAR_CREDENTIALS_DIRECTORY,
+ ];
+
+ fn with_cleared_repo_env(f: impl FnOnce()) {
+ let _guard = ENV_MUTEX.lock().unwrap();
+ for k in REPO_ENV_VARS {
+ std::env::remove_var(k);
+ }
+ f();
+ for k in REPO_ENV_VARS {
+ std::env::remove_var(k);
+ }
+ }
+
+ #[test]
+ fn extract_repo_from_atoms() {
+ with_cleared_repo_env(|| {
+ let param = json!({"server": "myhost", "datastore": "mystore"});
+ let repo = extract_repository_from_value(¶m).unwrap();
+ assert_eq!(repo.host(), "myhost");
+ assert_eq!(repo.store(), "mystore");
+ assert_eq!(repo.port(), 8007);
+ });
+ }
+
+ #[test]
+ fn extract_repo_from_url() {
+ with_cleared_repo_env(|| {
+ let param = json!({"repository": "myhost:mystore"});
+ let repo = extract_repository_from_value(¶m).unwrap();
+ assert_eq!(repo.host(), "myhost");
+ assert_eq!(repo.store(), "mystore");
+ });
+ }
+
+ #[test]
+ fn extract_repo_mutual_exclusion_error() {
+ with_cleared_repo_env(|| {
+ let param = json!({"repository": "myhost:mystore", "auth-id": "user@pam"});
+ let err = extract_repository_from_value(¶m).unwrap_err();
+ assert!(err.to_string().contains("mutually exclusive"), "got: {err}");
+ });
+ }
+
+ #[test]
+ fn extract_repo_atoms_without_datastore_error() {
+ with_cleared_repo_env(|| {
+ let param = json!({"server": "myhost"});
+ let err = extract_repository_from_value(¶m).unwrap_err();
+ assert!(
+ err.to_string().contains("--datastore is required"),
+ "got: {err}"
+ );
+ });
+ }
+
+ #[test]
+ fn extract_repo_nothing_provided_error() {
+ with_cleared_repo_env(|| {
+ let err = extract_repository_from_value(&json!({})).unwrap_err();
+ assert!(err.to_string().contains("unable to get"), "got: {err}");
+ });
+ }
+
+ #[test]
+ fn extract_repo_env_fallback() {
+ with_cleared_repo_env(|| {
+ std::env::set_var(ENV_VAR_PBS_SERVER, "envhost");
+ std::env::set_var(ENV_VAR_PBS_DATASTORE, "envstore");
+ let repo = extract_repository_from_value(&json!({})).unwrap();
+ assert_eq!(repo.host(), "envhost");
+ assert_eq!(repo.store(), "envstore");
+ });
+ }
+
+ #[test]
+ fn extract_repo_pbs_repository_env_takes_precedence() {
+ with_cleared_repo_env(|| {
+ std::env::set_var(ENV_VAR_PBS_REPOSITORY, "repohost:repostore");
+ std::env::set_var(ENV_VAR_PBS_SERVER, "envhost");
+ std::env::set_var(ENV_VAR_PBS_DATASTORE, "envstore");
+ let repo = extract_repository_from_value(&json!({})).unwrap();
+ assert_eq!(repo.host(), "repohost");
+ assert_eq!(repo.store(), "repostore");
+ });
+ }
+
+ #[test]
+ fn extract_repo_cli_overrides_env() {
+ with_cleared_repo_env(|| {
+ std::env::set_var(ENV_VAR_PBS_REPOSITORY, "envhost:envstore");
+ let param = json!({"server": "clihost", "datastore": "clistore"});
+ let repo = extract_repository_from_value(¶m).unwrap();
+ assert_eq!(repo.host(), "clihost");
+ assert_eq!(repo.store(), "clistore");
+ });
+ }
+
+ #[test]
+ fn extract_repo_cli_atoms_merge_with_env_atoms() {
+ with_cleared_repo_env(|| {
+ std::env::set_var(ENV_VAR_PBS_SERVER, "envhost");
+ std::env::set_var(ENV_VAR_PBS_DATASTORE, "envstore");
+ let param = json!({"auth-id": "backup@pbs"});
+ let repo = extract_repository_from_value(¶m).unwrap();
+ assert_eq!(repo.host(), "envhost");
+ assert_eq!(repo.store(), "envstore");
+ assert_eq!(repo.auth_id().to_string(), "backup@pbs");
+ });
+ }
+
+ #[test]
+ fn extract_repo_cli_atom_overrides_same_env_atom() {
+ with_cleared_repo_env(|| {
+ std::env::set_var(ENV_VAR_PBS_SERVER, "envhost");
+ std::env::set_var(ENV_VAR_PBS_DATASTORE, "envstore");
+ let param = json!({"server": "clihost"});
+ let repo = extract_repository_from_value(¶m).unwrap();
+ assert_eq!(repo.host(), "clihost");
+ assert_eq!(repo.store(), "envstore");
+ });
+ }
+}
--
2.47.3
^ permalink raw reply [flat|nested] 8+ messages in thread* [PATCH v2 3/5] client: migrate commands to flattened repository args
2026-03-30 18:20 [PATCH v2 0/5] client: repository: add individual component parameters Thomas Lamprecht
2026-03-30 18:20 ` [PATCH v2 1/5] client: repository: add tests for BackupRepository parsing Thomas Lamprecht
2026-03-30 18:20 ` [PATCH v2 2/5] client: repository: add individual component parameters Thomas Lamprecht
@ 2026-03-30 18:20 ` Thomas Lamprecht
2026-03-30 18:20 ` [PATCH v2 4/5] docs: document repository component options and env vars Thomas Lamprecht
` (2 subsequent siblings)
5 siblings, 0 replies; 8+ messages in thread
From: Thomas Lamprecht @ 2026-03-30 18:20 UTC (permalink / raw)
To: pbs-devel
Replace the individual repository and ns property definitions in all
client command API schema macros with the flattened repository args
struct. This makes --server, --port, --datastore, --auth-id, and --ns
available on every command that previously only accepted --repository.
For some commands the --ns namespace option doesn't make really sense,
like e.g. login, but that is really not different from the
pre-existing datastore component that one could pass through the
repository URL.
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
---
changes v1 -> v2:
- split out from the single v1 commit; purely mechanical migration
- fixed duplicate ns property in restore command's #[api] block
proxmox-backup-client/src/benchmark.rs | 8 +--
proxmox-backup-client/src/catalog.rs | 26 +++-----
proxmox-backup-client/src/group.rs | 14 ++--
proxmox-backup-client/src/main.rs | 88 +++++++++++---------------
proxmox-backup-client/src/mount.rs | 17 +++--
proxmox-backup-client/src/namespace.rs | 33 +++-------
proxmox-backup-client/src/snapshot.rs | 84 ++++++++----------------
proxmox-backup-client/src/task.rs | 20 +++---
proxmox-file-restore/src/main.rs | 37 +++++------
src/bin/proxmox_backup_debug/diff.rs | 20 +++---
10 files changed, 139 insertions(+), 208 deletions(-)
diff --git a/proxmox-backup-client/src/benchmark.rs b/proxmox-backup-client/src/benchmark.rs
index ad8c60ed9..c113e2ca2 100644
--- a/proxmox-backup-client/src/benchmark.rs
+++ b/proxmox-backup-client/src/benchmark.rs
@@ -22,7 +22,7 @@ use pbs_key_config::{load_and_decrypt_key, KeyDerivationConfig};
use pbs_tools::crypt_config::CryptConfig;
use crate::{
- connect, extract_repository_from_value, record_repository, KEYFILE_SCHEMA, REPO_URL_SCHEMA,
+ connect, extract_repository_from_value, record_repository, BackupRepositoryArgs, KEYFILE_SCHEMA,
};
#[api()]
@@ -105,9 +105,9 @@ static BENCHMARK_RESULT_2020_TOP: BenchmarkResult = BenchmarkResult {
#[api(
input: {
properties: {
- repository: {
- schema: REPO_URL_SCHEMA,
- optional: true,
+ repo: {
+ type: BackupRepositoryArgs,
+ flatten: true,
},
keyfile: {
schema: KEYFILE_SCHEMA,
diff --git a/proxmox-backup-client/src/catalog.rs b/proxmox-backup-client/src/catalog.rs
index b1b22ff24..1426019f7 100644
--- a/proxmox-backup-client/src/catalog.rs
+++ b/proxmox-backup-client/src/catalog.rs
@@ -7,7 +7,7 @@ use serde_json::Value;
use proxmox_router::cli::*;
use proxmox_schema::api;
-use pbs_api_types::{BackupArchiveName, BackupNamespace, CATALOG_NAME};
+use pbs_api_types::{BackupArchiveName, CATALOG_NAME};
use pbs_client::pxar::tools::get_remote_pxar_reader;
use pbs_client::tools::key_source::get_encryption_key_password;
use pbs_client::{BackupReader, RemoteChunkReader};
@@ -20,20 +20,16 @@ use crate::{
complete_backup_snapshot, complete_group_or_snapshot, complete_namespace,
complete_pxar_archive_name, complete_repository, connect, crypto_parameters, decrypt_key,
dir_or_last_from_group, extract_repository_from_value, format_key_source, optional_ns_param,
- record_repository, BackupDir, BufferedDynamicReader, CatalogReader, DynamicIndexReader,
- IndexFile, Shell, KEYFD_SCHEMA, REPO_URL_SCHEMA,
+ record_repository, BackupDir, BackupRepositoryArgs, BufferedDynamicReader, CatalogReader,
+ DynamicIndexReader, IndexFile, Shell, KEYFD_SCHEMA,
};
#[api(
input: {
properties: {
- repository: {
- schema: REPO_URL_SCHEMA,
- optional: true,
- },
- ns: {
- type: BackupNamespace,
- optional: true,
+ repo: {
+ type: BackupRepositoryArgs,
+ flatten: true,
},
snapshot: {
type: String,
@@ -159,10 +155,6 @@ async fn dump_catalog(param: Value) -> Result<Value, Error> {
#[api(
input: {
properties: {
- ns: {
- type: BackupNamespace,
- optional: true,
- },
"snapshot": {
type: String,
description: "Group/Snapshot path.",
@@ -170,9 +162,9 @@ async fn dump_catalog(param: Value) -> Result<Value, Error> {
"archive-name": {
type: BackupArchiveName,
},
- "repository": {
- optional: true,
- schema: REPO_URL_SCHEMA,
+ repo: {
+ type: BackupRepositoryArgs,
+ flatten: true,
},
"keyfile": {
optional: true,
diff --git a/proxmox-backup-client/src/group.rs b/proxmox-backup-client/src/group.rs
index 2c12db2ee..ec2ea6138 100644
--- a/proxmox-backup-client/src/group.rs
+++ b/proxmox-backup-client/src/group.rs
@@ -6,9 +6,9 @@ use proxmox_schema::api;
use crate::{
complete_backup_group, complete_namespace, complete_repository, merge_group_into,
- REPO_URL_SCHEMA,
+ BackupRepositoryArgs,
};
-use pbs_api_types::{BackupGroup, BackupNamespace};
+use pbs_api_types::BackupGroup;
use pbs_client::tools::{connect, remove_repository_from_value};
pub fn group_mgmt_cli() -> CliCommandMap {
@@ -29,13 +29,9 @@ pub fn group_mgmt_cli() -> CliCommandMap {
type: String,
description: "Backup group",
},
- repository: {
- schema: REPO_URL_SCHEMA,
- optional: true,
- },
- ns: {
- type: BackupNamespace,
- optional: true,
+ repo: {
+ type: BackupRepositoryArgs,
+ flatten: true,
},
}
}
diff --git a/proxmox-backup-client/src/main.rs b/proxmox-backup-client/src/main.rs
index 2ada87bdd..cdcf99f8c 100644
--- a/proxmox-backup-client/src/main.rs
+++ b/proxmox-backup-client/src/main.rs
@@ -27,8 +27,8 @@ use pbs_api_types::{
ArchiveType, Authid, BackupArchiveName, BackupDir, BackupGroup, BackupNamespace, BackupPart,
BackupType, ClientRateLimitConfig, CryptMode, Fingerprint, GroupListItem, PathPattern,
PruneJobOptions, PruneListItem, RateLimitConfig, SnapshotListItem, StorageStatus,
- BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA,
- CATALOG_NAME, ENCRYPTED_KEY_BLOB_NAME, MANIFEST_BLOB_NAME,
+ BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, CATALOG_NAME,
+ ENCRYPTED_KEY_BLOB_NAME, MANIFEST_BLOB_NAME,
};
use pbs_client::catalog_shell::Shell;
use pbs_client::pxar::{ErrorHandler as PxarErrorHandler, MetadataArchiveReader, PxarPrevRef};
@@ -41,13 +41,13 @@ use pbs_client::tools::{
crypto_parameters, format_key_source, get_encryption_key_password, KEYFD_SCHEMA,
KEYFILE_SCHEMA, MASTER_PUBKEY_FD_SCHEMA, MASTER_PUBKEY_FILE_SCHEMA,
},
- raise_nofile_limit, CHUNK_SIZE_SCHEMA, REPO_URL_SCHEMA,
+ raise_nofile_limit, CHUNK_SIZE_SCHEMA,
};
use pbs_client::{
delete_ticket_info, parse_backup_specification, view_task_result, BackupDetectionMode,
- BackupReader, BackupRepository, BackupSpecificationType, BackupStats, BackupWriter,
- BackupWriterOptions, ChunkStream, FixedChunkStream, HttpClient, IndexType, InjectionData,
- PxarBackupStream, RemoteChunkReader, UploadOptions, BACKUP_SOURCE_SCHEMA,
+ BackupReader, BackupRepository, BackupRepositoryArgs, BackupSpecificationType, BackupStats,
+ BackupWriter, BackupWriterOptions, ChunkStream, FixedChunkStream, HttpClient, IndexType,
+ InjectionData, PxarBackupStream, RemoteChunkReader, UploadOptions, BACKUP_SOURCE_SCHEMA,
};
use pbs_datastore::catalog::{BackupCatalogWriter, CatalogReader, CatalogWriter};
use pbs_datastore::chunk_store::verify_chunk_size;
@@ -340,13 +340,9 @@ pub fn optional_ns_param(param: &Value) -> Result<BackupNamespace, Error> {
#[api(
input: {
properties: {
- repository: {
- schema: REPO_URL_SCHEMA,
- optional: true,
- },
- "ns": {
- type: BackupNamespace,
- optional: true,
+ repo: {
+ type: BackupRepositoryArgs,
+ flatten: true,
},
"output-format": {
schema: OUTPUT_FORMAT,
@@ -433,18 +429,14 @@ fn merge_group_into(to: &mut serde_json::Map<String, Value>, group: BackupGroup)
#[api(
input: {
properties: {
- repository: {
- schema: REPO_URL_SCHEMA,
- optional: true,
+ repo: {
+ type: BackupRepositoryArgs,
+ flatten: true,
},
group: {
type: String,
description: "Backup group.",
},
- "ns": {
- type: BackupNamespace,
- optional: true,
- },
"new-owner": {
type: Authid,
},
@@ -478,9 +470,9 @@ async fn change_backup_owner(group: String, mut param: Value) -> Result<(), Erro
#[api(
input: {
properties: {
- repository: {
- schema: REPO_URL_SCHEMA,
- optional: true,
+ repo: {
+ type: BackupRepositoryArgs,
+ flatten: true,
},
}
}
@@ -500,9 +492,9 @@ async fn api_login(param: Value) -> Result<Value, Error> {
#[api(
input: {
properties: {
- repository: {
- schema: REPO_URL_SCHEMA,
- optional: true,
+ repo: {
+ type: BackupRepositoryArgs,
+ flatten: true,
},
}
}
@@ -519,9 +511,9 @@ fn api_logout(param: Value) -> Result<Value, Error> {
#[api(
input: {
properties: {
- repository: {
- schema: REPO_URL_SCHEMA,
- optional: true,
+ repo: {
+ type: BackupRepositoryArgs,
+ flatten: true,
},
"output-format": {
schema: OUTPUT_FORMAT,
@@ -572,9 +564,9 @@ async fn api_version(param: Value) -> Result<(), Error> {
#[api(
input: {
properties: {
- repository: {
- schema: REPO_URL_SCHEMA,
- optional: true,
+ repo: {
+ type: BackupRepositoryArgs,
+ flatten: true,
},
"output-format": {
schema: OUTPUT_FORMAT,
@@ -662,9 +654,9 @@ fn spawn_catalog_upload(
schema: BACKUP_SOURCE_SCHEMA,
}
},
- repository: {
- schema: REPO_URL_SCHEMA,
- optional: true,
+ repo: {
+ type: BackupRepositoryArgs,
+ flatten: true,
},
"include-dev": {
description:
@@ -708,10 +700,6 @@ fn spawn_catalog_upload(
optional: true,
default: false,
},
- "ns": {
- schema: BACKUP_NAMESPACE_SCHEMA,
- optional: true,
- },
"backup-type": {
schema: BACKUP_TYPE_SCHEMA,
optional: true,
@@ -1439,13 +1427,9 @@ async fn dump_image<W: Write>(
#[api(
input: {
properties: {
- repository: {
- schema: REPO_URL_SCHEMA,
- optional: true,
- },
- ns: {
- type: BackupNamespace,
- optional: true,
+ repo: {
+ type: BackupRepositoryArgs,
+ flatten: true,
},
snapshot: {
type: String,
@@ -1837,9 +1821,9 @@ async fn restore(
default: false,
description: "Minimal output - only show removals.",
},
- repository: {
- schema: REPO_URL_SCHEMA,
- optional: true,
+ repo: {
+ type: BackupRepositoryArgs,
+ flatten: true,
},
},
},
@@ -1929,9 +1913,9 @@ async fn prune(
#[api(
input: {
properties: {
- repository: {
- schema: REPO_URL_SCHEMA,
- optional: true,
+ repo: {
+ type: BackupRepositoryArgs,
+ flatten: true,
},
"output-format": {
schema: OUTPUT_FORMAT,
diff --git a/proxmox-backup-client/src/mount.rs b/proxmox-backup-client/src/mount.rs
index e815c8a9c..91e85dde5 100644
--- a/proxmox-backup-client/src/mount.rs
+++ b/proxmox-backup-client/src/mount.rs
@@ -17,11 +17,12 @@ use proxmox_router::{cli::*, ApiHandler, ApiMethod, RpcEnvironment};
use proxmox_schema::*;
use proxmox_sortable_macro::sortable;
-use pbs_api_types::{ArchiveType, BackupArchiveName, BackupNamespace};
+use pbs_api_types::{ArchiveType, Authid, BackupArchiveName, BackupNamespace, DATASTORE_SCHEMA};
use pbs_client::tools::key_source::{
crypto_parameters, format_key_source, get_encryption_key_password,
};
use pbs_client::{BackupReader, RemoteChunkReader};
+use pbs_client::{BACKUP_REPO_PORT_SCHEMA, BACKUP_REPO_SERVER_SCHEMA, REPO_URL_SCHEMA};
use pbs_datastore::cached_chunk_reader::CachedChunkReader;
use pbs_datastore::index::IndexFile;
use pbs_key_config::decrypt_key;
@@ -32,7 +33,7 @@ use crate::helper;
use crate::{
complete_group_or_snapshot, complete_img_archive_name, complete_namespace,
complete_pxar_archive_name, complete_repository, connect, dir_or_last_from_group,
- extract_repository_from_value, optional_ns_param, record_repository, REPO_URL_SCHEMA,
+ extract_repository_from_value, optional_ns_param, record_repository,
};
#[sortable]
@@ -41,7 +42,6 @@ const API_METHOD_MOUNT: ApiMethod = ApiMethod::new(
&ObjectSchema::new(
"Mount pxar archive.",
&sorted!([
- ("ns", true, &BackupNamespace::API_SCHEMA,),
(
"snapshot",
false,
@@ -53,7 +53,12 @@ const API_METHOD_MOUNT: ApiMethod = ApiMethod::new(
false,
&StringSchema::new("Target directory path.").schema()
),
+ ("ns", true, &BackupNamespace::API_SCHEMA),
("repository", true, &REPO_URL_SCHEMA),
+ ("server", true, &BACKUP_REPO_SERVER_SCHEMA),
+ ("port", true, &BACKUP_REPO_PORT_SCHEMA),
+ ("datastore", true, &DATASTORE_SCHEMA),
+ ("auth-id", true, &Authid::API_SCHEMA),
(
"keyfile",
true,
@@ -77,14 +82,18 @@ const API_METHOD_MAP: ApiMethod = ApiMethod::new(
"Map a drive image from a VM backup to a local loopback device. Use 'unmap' to undo.
WARNING: Only do this with *trusted* backups!",
&sorted!([
- ("ns", true, &BackupNamespace::API_SCHEMA,),
(
"snapshot",
false,
&StringSchema::new("Group/Snapshot path.").schema()
),
("archive-name", false, &BackupArchiveName::API_SCHEMA),
+ ("ns", true, &BackupNamespace::API_SCHEMA),
("repository", true, &REPO_URL_SCHEMA),
+ ("server", true, &BACKUP_REPO_SERVER_SCHEMA),
+ ("port", true, &BACKUP_REPO_PORT_SCHEMA),
+ ("datastore", true, &DATASTORE_SCHEMA),
+ ("auth-id", true, &Authid::API_SCHEMA),
(
"keyfile",
true,
diff --git a/proxmox-backup-client/src/namespace.rs b/proxmox-backup-client/src/namespace.rs
index 2929e394b..e2928dc93 100644
--- a/proxmox-backup-client/src/namespace.rs
+++ b/proxmox-backup-client/src/namespace.rs
@@ -1,8 +1,7 @@
use anyhow::{bail, Error};
use serde_json::{json, Value};
-use pbs_api_types::BackupNamespace;
-use pbs_client::tools::REPO_URL_SCHEMA;
+use pbs_client::BackupRepositoryArgs;
use proxmox_router::cli::{
format_and_print_result, get_output_format, CliCommand, CliCommandMap, OUTPUT_FORMAT,
@@ -17,13 +16,9 @@ use crate::{
#[api(
input: {
properties: {
- repository: {
- schema: REPO_URL_SCHEMA,
- optional: true,
- },
- ns: {
- type: BackupNamespace,
- optional: true,
+ repo: {
+ type: BackupRepositoryArgs,
+ flatten: true,
},
"max-depth": {
description: "maximum recursion depth",
@@ -84,13 +79,9 @@ async fn list_namespaces(param: Value, max_depth: Option<usize>) -> Result<(), E
#[api(
input: {
properties: {
- repository: {
- schema: REPO_URL_SCHEMA,
- optional: true,
- },
- ns: {
- type: BackupNamespace,
- optional: true,
+ repo: {
+ type: BackupRepositoryArgs,
+ flatten: true,
},
}
},
@@ -124,13 +115,9 @@ async fn create_namespace(param: Value) -> Result<(), Error> {
#[api(
input: {
properties: {
- repository: {
- schema: REPO_URL_SCHEMA,
- optional: true,
- },
- ns: {
- type: BackupNamespace,
- optional: true,
+ repo: {
+ type: BackupRepositoryArgs,
+ flatten: true,
},
"delete-groups": {
description: "Destroys all groups in the hierarchy.",
diff --git a/proxmox-backup-client/src/snapshot.rs b/proxmox-backup-client/src/snapshot.rs
index 500fd2bb8..05f0ec1c7 100644
--- a/proxmox-backup-client/src/snapshot.rs
+++ b/proxmox-backup-client/src/snapshot.rs
@@ -17,8 +17,8 @@ use pbs_tools::json::required_string_param;
use crate::{
api_datastore_list_snapshots, complete_backup_group, complete_backup_snapshot,
complete_namespace, complete_repository, connect, crypto_parameters,
- extract_repository_from_value, optional_ns_param, record_repository, BackupDir, KEYFD_SCHEMA,
- KEYFILE_SCHEMA, REPO_URL_SCHEMA,
+ extract_repository_from_value, optional_ns_param, record_repository, BackupDir,
+ BackupRepositoryArgs, KEYFD_SCHEMA, KEYFILE_SCHEMA,
};
fn snapshot_args(ns: &BackupNamespace, snapshot: &BackupDir) -> Result<Value, Error> {
@@ -32,13 +32,9 @@ fn snapshot_args(ns: &BackupNamespace, snapshot: &BackupDir) -> Result<Value, Er
#[api(
input: {
properties: {
- repository: {
- schema: REPO_URL_SCHEMA,
- optional: true,
- },
- ns: {
- type: BackupNamespace,
- optional: true,
+ repo: {
+ type: BackupRepositoryArgs,
+ flatten: true,
},
group: {
type: String,
@@ -108,13 +104,9 @@ async fn list_snapshots(param: Value) -> Result<Value, Error> {
#[api(
input: {
properties: {
- repository: {
- schema: REPO_URL_SCHEMA,
- optional: true,
- },
- ns: {
- type: BackupNamespace,
- optional: true,
+ repo: {
+ type: BackupRepositoryArgs,
+ flatten: true,
},
snapshot: {
type: String,
@@ -161,13 +153,9 @@ async fn list_snapshot_files(param: Value) -> Result<Value, Error> {
#[api(
input: {
properties: {
- repository: {
- schema: REPO_URL_SCHEMA,
- optional: true,
- },
- ns: {
- type: BackupNamespace,
- optional: true,
+ repo: {
+ type: BackupRepositoryArgs,
+ flatten: true,
},
snapshot: {
type: String,
@@ -200,13 +188,9 @@ async fn forget_snapshots(param: Value) -> Result<(), Error> {
#[api(
input: {
properties: {
- repository: {
- schema: REPO_URL_SCHEMA,
- optional: true,
- },
- ns: {
- type: BackupNamespace,
- optional: true,
+ repo: {
+ type: BackupRepositoryArgs,
+ flatten: true,
},
snapshot: {
type: String,
@@ -281,13 +265,9 @@ async fn upload_log(param: Value) -> Result<Value, Error> {
#[api(
input: {
properties: {
- repository: {
- schema: REPO_URL_SCHEMA,
- optional: true,
- },
- ns: {
- type: BackupNamespace,
- optional: true,
+ repo: {
+ type: BackupRepositoryArgs,
+ flatten: true,
},
snapshot: {
type: String,
@@ -338,13 +318,9 @@ async fn show_notes(param: Value) -> Result<Value, Error> {
#[api(
input: {
properties: {
- repository: {
- schema: REPO_URL_SCHEMA,
- optional: true,
- },
- ns: {
- type: BackupNamespace,
- optional: true,
+ repo: {
+ type: BackupRepositoryArgs,
+ flatten: true,
},
snapshot: {
type: String,
@@ -380,13 +356,9 @@ async fn update_notes(param: Value) -> Result<Value, Error> {
#[api(
input: {
properties: {
- repository: {
- schema: REPO_URL_SCHEMA,
- optional: true,
- },
- ns: {
- type: BackupNamespace,
- optional: true,
+ repo: {
+ type: BackupRepositoryArgs,
+ flatten: true,
},
snapshot: {
type: String,
@@ -437,13 +409,9 @@ async fn show_protection(param: Value) -> Result<(), Error> {
#[api(
input: {
properties: {
- repository: {
- schema: REPO_URL_SCHEMA,
- optional: true,
- },
- ns: {
- type: BackupNamespace,
- optional: true,
+ repo: {
+ type: BackupRepositoryArgs,
+ flatten: true,
},
snapshot: {
type: String,
diff --git a/proxmox-backup-client/src/task.rs b/proxmox-backup-client/src/task.rs
index 472db0860..dba50cc85 100644
--- a/proxmox-backup-client/src/task.rs
+++ b/proxmox-backup-client/src/task.rs
@@ -10,14 +10,14 @@ use pbs_tools::json::required_string_param;
use pbs_api_types::UPID;
-use crate::{complete_repository, connect, extract_repository_from_value, REPO_URL_SCHEMA};
+use crate::{complete_repository, connect, extract_repository_from_value, BackupRepositoryArgs};
#[api(
input: {
properties: {
- repository: {
- schema: REPO_URL_SCHEMA,
- optional: true,
+ repo: {
+ type: BackupRepositoryArgs,
+ flatten: true,
},
limit: {
description: "The maximal number of tasks to list.",
@@ -87,9 +87,9 @@ async fn task_list(param: Value) -> Result<Value, Error> {
#[api(
input: {
properties: {
- repository: {
- schema: REPO_URL_SCHEMA,
- optional: true,
+ repo: {
+ type: BackupRepositoryArgs,
+ flatten: true,
},
upid: {
type: UPID,
@@ -112,9 +112,9 @@ async fn task_log(param: Value) -> Result<Value, Error> {
#[api(
input: {
properties: {
- repository: {
- schema: REPO_URL_SCHEMA,
- optional: true,
+ repo: {
+ type: BackupRepositoryArgs,
+ flatten: true,
},
upid: {
type: UPID,
diff --git a/proxmox-file-restore/src/main.rs b/proxmox-file-restore/src/main.rs
index bf6cf9aed..64de94b42 100644
--- a/proxmox-file-restore/src/main.rs
+++ b/proxmox-file-restore/src/main.rs
@@ -31,9 +31,8 @@ use pbs_client::tools::{
crypto_parameters_keep_fd, format_key_source, get_encryption_key_password, KEYFD_SCHEMA,
KEYFILE_SCHEMA,
},
- REPO_URL_SCHEMA,
};
-use pbs_client::{BackupReader, BackupRepository, RemoteChunkReader};
+use pbs_client::{BackupReader, BackupRepository, BackupRepositoryArgs, RemoteChunkReader};
use pbs_datastore::catalog::{ArchiveEntry, CatalogReader, DirEntryAttribute};
use pbs_datastore::dynamic_index::BufferedDynamicReader;
use pbs_datastore::index::IndexFile;
@@ -212,13 +211,9 @@ async fn list_files(
#[api(
input: {
properties: {
- repository: {
- schema: REPO_URL_SCHEMA,
- optional: true,
- },
- ns: {
- type: BackupNamespace,
- optional: true,
+ repo: {
+ type: BackupRepositoryArgs,
+ flatten: true,
},
snapshot: {
type: String,
@@ -272,7 +267,6 @@ async fn list_files(
)]
/// List a directory from a backup snapshot.
async fn list(
- ns: Option<BackupNamespace>,
snapshot: String,
path: String,
base64: bool,
@@ -280,7 +274,11 @@ async fn list(
param: Value,
) -> Result<(), Error> {
let repo = extract_repository_from_value(¶m)?;
- let ns = ns.unwrap_or_default();
+ let ns: BackupNamespace = param["ns"]
+ .as_str()
+ .map(|s| s.parse())
+ .transpose()?
+ .unwrap_or_default();
let snapshot: BackupDir = snapshot.parse()?;
let path = parse_path(path, base64)?;
@@ -361,13 +359,9 @@ async fn list(
#[api(
input: {
properties: {
- repository: {
- schema: REPO_URL_SCHEMA,
- optional: true,
- },
- ns: {
- type: BackupNamespace,
- optional: true,
+ repo: {
+ type: BackupRepositoryArgs,
+ flatten: true,
},
snapshot: {
type: String,
@@ -426,7 +420,6 @@ async fn list(
/// Restore files from a backup snapshot.
#[allow(clippy::too_many_arguments)]
async fn extract(
- ns: Option<BackupNamespace>,
snapshot: String,
path: String,
base64: bool,
@@ -436,7 +429,11 @@ async fn extract(
param: Value,
) -> Result<(), Error> {
let repo = extract_repository_from_value(¶m)?;
- let namespace = ns.unwrap_or_default();
+ let namespace: BackupNamespace = param["ns"]
+ .as_str()
+ .map(|s| s.parse())
+ .transpose()?
+ .unwrap_or_default();
let snapshot: BackupDir = snapshot.parse()?;
let orig_path = path;
let path = parse_path(orig_path.clone(), base64)?;
diff --git a/src/bin/proxmox_backup_debug/diff.rs b/src/bin/proxmox_backup_debug/diff.rs
index 116216e51..53d0efd10 100644
--- a/src/bin/proxmox_backup_debug/diff.rs
+++ b/src/bin/proxmox_backup_debug/diff.rs
@@ -19,9 +19,8 @@ use pbs_client::tools::key_source::{
};
use pbs_client::tools::{
complete_archive_name, complete_group_or_snapshot, connect, extract_repository_from_value,
- REPO_URL_SCHEMA,
};
-use pbs_client::{BackupReader, BackupRepository, RemoteChunkReader};
+use pbs_client::{BackupReader, BackupRepository, BackupRepositoryArgs, RemoteChunkReader};
use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader, LocalDynamicReadAt};
use pbs_datastore::index::IndexFile;
use pbs_key_config::decrypt_key;
@@ -57,10 +56,6 @@ pub fn diff_commands() -> CommandLineInterface {
#[api(
input: {
properties: {
- "ns": {
- type: BackupNamespace,
- optional: true,
- },
"prev-snapshot": {
description: "Path for the first snapshot.",
type: String,
@@ -72,9 +67,9 @@ pub fn diff_commands() -> CommandLineInterface {
"archive-name": {
type: BackupArchiveName,
},
- "repository": {
- optional: true,
- schema: REPO_URL_SCHEMA,
+ repo: {
+ type: BackupRepositoryArgs,
+ flatten: true,
},
"keyfile": {
optional: true,
@@ -108,13 +103,16 @@ async fn diff_archive_cmd(
archive_name: BackupArchiveName,
compare_content: bool,
color: Option<ColorMode>,
- ns: Option<BackupNamespace>,
param: Value,
) -> Result<(), Error> {
let repo = extract_repository_from_value(¶m)?;
let color = color.unwrap_or_default();
- let namespace = ns.unwrap_or_else(BackupNamespace::root);
+ let namespace: BackupNamespace = param["ns"]
+ .as_str()
+ .map(|s| s.parse())
+ .transpose()?
+ .unwrap_or_default();
let crypto = crypto_parameters(¶m)?;
--
2.47.3
^ permalink raw reply [flat|nested] 8+ messages in thread