From: "Fabian Grünbichler" <f.gruenbichler@proxmox.com>
To: pbs-devel@lists.proxmox.com
Subject: [pbs-devel] [PATCH proxmox-backup 03/16] replace Userid with Authid
Date: Wed, 28 Oct 2020 12:36:26 +0100 [thread overview]
Message-ID: <20201028113632.814586-6-f.gruenbichler@proxmox.com> (raw)
In-Reply-To: <20201028113632.814586-1-f.gruenbichler@proxmox.com>
in most generic places. this is accompanied by a change in
RpcEnvironment to purposefully break existing call sites.
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
---
Notes:
requires corresponding proxmox patch + dependency bump
some places remain untouched for more involved follow-ups, e.g.
remote.cfg field names, termproxy stuff
src/api2/access.rs | 11 +--
src/api2/access/acl.rs | 19 ++---
src/api2/access/user.rs | 7 +-
src/api2/admin/datastore.rs | 103 +++++++++++++------------
src/api2/admin/sync.rs | 4 +-
src/api2/admin/verify.rs | 4 +-
src/api2/backup.rs | 14 ++--
src/api2/backup/environment.rs | 18 ++---
src/api2/config/datastore.rs | 4 +-
src/api2/config/remote.rs | 4 +-
src/api2/node.rs | 9 ++-
src/api2/node/apt.rs | 6 +-
src/api2/node/disks.rs | 6 +-
src/api2/node/disks/directory.rs | 4 +-
src/api2/node/disks/zfs.rs | 4 +-
src/api2/node/network.rs | 4 +-
src/api2/node/services.rs | 22 +++---
src/api2/node/subscription.rs | 6 +-
src/api2/node/tasks.rs | 30 +++----
src/api2/pull.rs | 24 +++---
src/api2/reader.rs | 10 +--
src/api2/reader/environment.rs | 16 ++--
src/api2/status.rs | 12 +--
src/api2/types/mod.rs | 16 ++--
src/backup/datastore.rs | 16 ++--
src/bin/proxmox-backup-client.rs | 6 +-
src/bin/proxmox-backup-manager.rs | 2 +-
src/bin/proxmox-backup-proxy.rs | 16 ++--
src/bin/proxmox_backup_manager/acl.rs | 2 +-
src/bin/proxmox_backup_manager/user.rs | 4 +-
src/client/pull.rs | 8 +-
src/config/acl.rs | 64 +++++++--------
src/config/cached_user_info.rs | 63 ++++++++++-----
src/config/remote.rs | 2 +-
src/config/user.rs | 69 +++++++++++++++--
src/server/environment.rs | 12 +--
src/server/rest.rs | 38 ++++-----
src/server/upid.rs | 16 ++--
src/server/verify_job.rs | 6 +-
src/server/worker_task.rs | 14 ++--
tests/worker-task-abort.rs | 2 +-
www/config/ACLView.js | 2 +-
www/window/ACLEdit.js | 2 +-
43 files changed, 399 insertions(+), 302 deletions(-)
diff --git a/src/api2/access.rs b/src/api2/access.rs
index c302e0c7..d0494c9a 100644
--- a/src/api2/access.rs
+++ b/src/api2/access.rs
@@ -31,7 +31,8 @@ fn authenticate_user(
) -> Result<bool, Error> {
let user_info = CachedUserInfo::new()?;
- if !user_info.is_active_user(&userid) {
+ let auth_id = Authid::from(userid.clone());
+ if !user_info.is_active_auth_id(&auth_id) {
bail!("user account disabled or expired.");
}
@@ -69,8 +70,7 @@ fn authenticate_user(
path_vec.push(part);
}
}
-
- user_info.check_privs(userid, &path_vec, *privilege, false)?;
+ user_info.check_privs(&auth_id, &path_vec, *privilege, false)?;
return Ok(false);
}
}
@@ -213,9 +213,10 @@ fn change_password(
) -> Result<Value, Error> {
let current_user: Userid = rpcenv
- .get_user()
+ .get_auth_id()
.ok_or_else(|| format_err!("unknown user"))?
.parse()?;
+ let current_auth = Authid::from(current_user.clone());
let mut allowed = userid == current_user;
@@ -223,7 +224,7 @@ fn change_password(
if !allowed {
let user_info = CachedUserInfo::new()?;
- let privs = user_info.lookup_privs(¤t_user, &[]);
+ let privs = user_info.lookup_privs(¤t_auth, &[]);
if (privs & PRIV_PERMISSIONS_MODIFY) != 0 { allowed = true; }
}
diff --git a/src/api2/access/acl.rs b/src/api2/access/acl.rs
index 3282c66e..7211a6be 100644
--- a/src/api2/access/acl.rs
+++ b/src/api2/access/acl.rs
@@ -140,9 +140,9 @@ pub fn read_acl(
optional: true,
schema: ACL_PROPAGATE_SCHEMA,
},
- userid: {
+ auth_id: {
optional: true,
- type: Userid,
+ type: Authid,
},
group: {
optional: true,
@@ -168,7 +168,7 @@ pub fn update_acl(
path: String,
role: String,
propagate: Option<bool>,
- userid: Option<Userid>,
+ auth_id: Option<Authid>,
group: Option<String>,
delete: Option<bool>,
digest: Option<String>,
@@ -190,11 +190,12 @@ pub fn update_acl(
if let Some(ref _group) = group {
bail!("parameter 'group' - groups are currently not supported.");
- } else if let Some(ref userid) = userid {
+ } else if let Some(ref auth_id) = auth_id {
if !delete { // Note: we allow to delete non-existent users
let user_cfg = crate::config::user::cached_config()?;
- if user_cfg.sections.get(&userid.to_string()).is_none() {
- bail!("no such user.");
+ if user_cfg.sections.get(&auth_id.to_string()).is_none() {
+ bail!(format!("no such {}.",
+ if auth_id.is_token() { "API token" } else { "user" }));
}
}
} else {
@@ -205,11 +206,11 @@ pub fn update_acl(
acl::check_acl_path(&path)?;
}
- if let Some(userid) = userid {
+ if let Some(auth_id) = auth_id {
if delete {
- tree.delete_user_role(&path, &userid, &role);
+ tree.delete_user_role(&path, &auth_id, &role);
} else {
- tree.insert_user_role(&path, &userid, &role, propagate);
+ tree.insert_user_role(&path, &auth_id, &role, propagate);
}
} else if let Some(group) = group {
if delete {
diff --git a/src/api2/access/user.rs b/src/api2/access/user.rs
index c041d804..47f8e1d1 100644
--- a/src/api2/access/user.rs
+++ b/src/api2/access/user.rs
@@ -39,10 +39,13 @@ pub fn list_users(
let (config, digest) = user::config()?;
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ // intentionally user only for now
+ let userid: Userid = rpcenv.get_auth_id().unwrap().parse()?;
+ let auth_id = Authid::from(userid.clone());
+
let user_info = CachedUserInfo::new()?;
- let top_level_privs = user_info.lookup_privs(&userid, &["access", "users"]);
+ let top_level_privs = user_info.lookup_privs(&auth_id, &["access", "users"]);
let top_level_allowed = (top_level_privs & PRIV_SYS_AUDIT) != 0;
let filter_by_privs = |user: &user::User| {
diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs
index e1a0aacc..8862637d 100644
--- a/src/api2/admin/datastore.rs
+++ b/src/api2/admin/datastore.rs
@@ -47,11 +47,11 @@ use crate::config::acl::{
fn check_backup_owner(
store: &DataStore,
group: &BackupGroup,
- userid: &Userid,
+ auth_id: &Authid,
) -> Result<(), Error> {
let owner = store.get_owner(group)?;
- if &owner != userid {
- bail!("backup owner check failed ({} != {})", userid, owner);
+ if &owner != auth_id {
+ bail!("backup owner check failed ({} != {})", auth_id, owner);
}
Ok(())
}
@@ -149,9 +149,9 @@ fn list_groups(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<GroupListItem>, Error> {
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
- let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
+ let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let datastore = DataStore::lookup_datastore(&store)?;
@@ -171,7 +171,7 @@ fn list_groups(
let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
let owner = datastore.get_owner(group)?;
- if !list_all && owner != userid {
+ if !list_all && owner != auth_id {
continue;
}
@@ -230,16 +230,16 @@ pub fn list_snapshot_files(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<BackupContent>, Error> {
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
- let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
+ let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let datastore = DataStore::lookup_datastore(&store)?;
let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)) != 0;
- if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
+ if !allowed { check_backup_owner(&datastore, snapshot.group(), &auth_id)?; }
let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
@@ -282,16 +282,16 @@ fn delete_snapshot(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
- let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
+ let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
let datastore = DataStore::lookup_datastore(&store)?;
let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
- if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
+ if !allowed { check_backup_owner(&datastore, snapshot.group(), &auth_id)?; }
datastore.remove_backup_dir(&snapshot, false)?;
@@ -338,9 +338,9 @@ pub fn list_snapshots (
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<SnapshotListItem>, Error> {
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
- let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
+ let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let datastore = DataStore::lookup_datastore(&store)?;
@@ -362,7 +362,7 @@ pub fn list_snapshots (
let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
let owner = datastore.get_owner(group)?;
- if !list_all && owner != userid {
+ if !list_all && owner != auth_id {
continue;
}
@@ -568,13 +568,13 @@ pub fn verify(
_ => bail!("parameters do not specify a backup group or snapshot"),
}
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
let upid_str = WorkerTask::new_thread(
worker_type,
Some(worker_id.clone()),
- userid,
+ auth_id,
to_stdout,
move |worker| {
let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16)));
@@ -701,9 +701,9 @@ fn prune(
let backup_type = tools::required_string_param(¶m, "backup-type")?;
let backup_id = tools::required_string_param(¶m, "backup-id")?;
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
- let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
+ let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let dry_run = param["dry-run"].as_bool().unwrap_or(false);
@@ -712,7 +712,7 @@ fn prune(
let datastore = DataStore::lookup_datastore(&store)?;
let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
- if !allowed { check_backup_owner(&datastore, &group, &userid)?; }
+ if !allowed { check_backup_owner(&datastore, &group, &auth_id)?; }
let prune_options = PruneOptions {
keep_last: param["keep-last"].as_u64(),
@@ -754,7 +754,7 @@ fn prune(
// We use a WorkerTask just to have a task log, but run synchrounously
- let worker = WorkerTask::new("prune", Some(worker_id), Userid::root_userid().clone(), true)?;
+ let worker = WorkerTask::new("prune", Some(worker_id), auth_id.clone(), true)?;
if keep_all {
worker.log("No prune selection - keeping all files.");
@@ -829,6 +829,7 @@ fn start_garbage_collection(
) -> Result<Value, Error> {
let datastore = DataStore::lookup_datastore(&store)?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
println!("Starting garbage collection on store {}", store);
@@ -837,7 +838,7 @@ fn start_garbage_collection(
let upid_str = WorkerTask::new_thread(
"garbage_collection",
Some(store.clone()),
- Userid::root_userid().clone(),
+ auth_id.clone(),
to_stdout,
move |worker| {
worker.log(format!("starting garbage collection on store {}", store));
@@ -907,13 +908,13 @@ fn get_datastore_list(
let (config, _digest) = datastore::config()?;
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let mut list = Vec::new();
for (store, (_, data)) in &config.sections {
- let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
+ let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
if allowed {
let mut entry = json!({ "store": store });
@@ -958,9 +959,9 @@ fn download_file(
let store = tools::required_string_param(¶m, "store")?;
let datastore = DataStore::lookup_datastore(store)?;
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
- let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
+ let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let file_name = tools::required_string_param(¶m, "file-name")?.to_owned();
@@ -971,7 +972,7 @@ fn download_file(
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
- if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
+ if !allowed { check_backup_owner(&datastore, backup_dir.group(), &auth_id)?; }
println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
@@ -1031,9 +1032,9 @@ fn download_file_decoded(
let store = tools::required_string_param(¶m, "store")?;
let datastore = DataStore::lookup_datastore(store)?;
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
- let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
+ let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let file_name = tools::required_string_param(¶m, "file-name")?.to_owned();
@@ -1044,7 +1045,7 @@ fn download_file_decoded(
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
- if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
+ if !allowed { check_backup_owner(&datastore, backup_dir.group(), &auth_id)?; }
let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
for file in files {
@@ -1156,8 +1157,8 @@ fn upload_backup_log(
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
- check_backup_owner(&datastore, backup_dir.group(), &userid)?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
+ check_backup_owner(&datastore, backup_dir.group(), &auth_id)?;
let mut path = datastore.base_path();
path.push(backup_dir.relative_path());
@@ -1226,14 +1227,14 @@ fn catalog(
) -> Result<Value, Error> {
let datastore = DataStore::lookup_datastore(&store)?;
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
- let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
+ let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
- if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
+ if !allowed { check_backup_owner(&datastore, backup_dir.group(), &auth_id)?; }
let file_name = CATALOG_NAME;
@@ -1397,9 +1398,9 @@ fn pxar_file_download(
let store = tools::required_string_param(¶m, "store")?;
let datastore = DataStore::lookup_datastore(&store)?;
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
- let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
+ let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let filepath = tools::required_string_param(¶m, "filepath")?.to_owned();
@@ -1410,7 +1411,7 @@ fn pxar_file_download(
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
- if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
+ if !allowed { check_backup_owner(&datastore, backup_dir.group(), &auth_id)?; }
let mut components = base64::decode(&filepath)?;
if components.len() > 0 && components[0] == '/' as u8 {
@@ -1576,14 +1577,14 @@ fn get_notes(
) -> Result<String, Error> {
let datastore = DataStore::lookup_datastore(&store)?;
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
- let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
+ let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
- if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
+ if !allowed { check_backup_owner(&datastore, backup_dir.group(), &auth_id)?; }
let (manifest, _) = datastore.load_manifest(&backup_dir)?;
@@ -1629,14 +1630,14 @@ fn set_notes(
) -> Result<(), Error> {
let datastore = DataStore::lookup_datastore(&store)?;
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
- let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
+ let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
- if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
+ if !allowed { check_backup_owner(&datastore, backup_dir.group(), &auth_id)?; }
datastore.update_manifest(&backup_dir,|manifest| {
manifest.unprotected["notes"] = notes.into();
@@ -1658,7 +1659,7 @@ fn set_notes(
schema: BACKUP_ID_SCHEMA,
},
"new-owner": {
- type: Userid,
+ type: Authid,
},
},
},
@@ -1671,7 +1672,7 @@ fn set_backup_owner(
store: String,
backup_type: String,
backup_id: String,
- new_owner: Userid,
+ new_owner: Authid,
_rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
@@ -1681,8 +1682,14 @@ fn set_backup_owner(
let user_info = CachedUserInfo::new()?;
- if !user_info.is_active_user(&new_owner) {
- bail!("user '{}' is inactive or non-existent", new_owner);
+ if !user_info.is_active_auth_id(&new_owner) {
+ bail!("{} '{}' is inactive or non-existent",
+ if new_owner.is_token() {
+ "API token".to_string()
+ } else {
+ "user".to_string()
+ },
+ new_owner);
}
datastore.set_owner(&backup_group, &new_owner, true)?;
diff --git a/src/api2/admin/sync.rs b/src/api2/admin/sync.rs
index 4a104441..f7032a3a 100644
--- a/src/api2/admin/sync.rs
+++ b/src/api2/admin/sync.rs
@@ -101,11 +101,11 @@ fn run_sync_job(
let (config, _digest) = sync::config()?;
let sync_job: SyncJobConfig = config.lookup("sync", &id)?;
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let job = Job::new("syncjob", &id)?;
- let upid_str = do_sync_job(job, sync_job, &userid, None)?;
+ let upid_str = do_sync_job(job, sync_job, &auth_id, None)?;
Ok(upid_str)
}
diff --git a/src/api2/admin/verify.rs b/src/api2/admin/verify.rs
index 4bc184d3..02c1eef4 100644
--- a/src/api2/admin/verify.rs
+++ b/src/api2/admin/verify.rs
@@ -101,11 +101,11 @@ fn run_verification_job(
let (config, _digest) = verify::config()?;
let verification_job: VerificationJobConfig = config.lookup("verification", &id)?;
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let job = Job::new("verificationjob", &id)?;
- let upid_str = do_verification_job(job, verification_job, &userid, None)?;
+ let upid_str = do_verification_job(job, verification_job, &auth_id, None)?;
Ok(upid_str)
}
diff --git a/src/api2/backup.rs b/src/api2/backup.rs
index 8e58efdd..e030d60d 100644
--- a/src/api2/backup.rs
+++ b/src/api2/backup.rs
@@ -59,12 +59,12 @@ async move {
let debug = param["debug"].as_bool().unwrap_or(false);
let benchmark = param["benchmark"].as_bool().unwrap_or(false);
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let store = tools::required_string_param(¶m, "store")?.to_owned();
let user_info = CachedUserInfo::new()?;
- user_info.check_privs(&userid, &["datastore", &store], PRIV_DATASTORE_BACKUP, false)?;
+ user_info.check_privs(&auth_id, &["datastore", &store], PRIV_DATASTORE_BACKUP, false)?;
let datastore = DataStore::lookup_datastore(&store)?;
@@ -105,12 +105,12 @@ async move {
};
// lock backup group to only allow one backup per group at a time
- let (owner, _group_guard) = datastore.create_locked_backup_group(&backup_group, &userid)?;
+ let (owner, _group_guard) = datastore.create_locked_backup_group(&backup_group, &auth_id)?;
// permission check
- if owner != userid && worker_type != "benchmark" {
+ if owner != auth_id && worker_type != "benchmark" {
// only the owner is allowed to create additional snapshots
- bail!("backup owner check failed ({} != {})", userid, owner);
+ bail!("backup owner check failed ({} != {})", auth_id, owner);
}
let last_backup = {
@@ -153,9 +153,9 @@ async move {
if !is_new { bail!("backup directory already exists."); }
- WorkerTask::spawn(worker_type, Some(worker_id), userid.clone(), true, move |worker| {
+ WorkerTask::spawn(worker_type, Some(worker_id), auth_id.clone(), true, move |worker| {
let mut env = BackupEnvironment::new(
- env_type, userid, worker.clone(), datastore, backup_dir);
+ env_type, auth_id, worker.clone(), datastore, backup_dir);
env.debug = debug;
env.last_backup = last_backup;
diff --git a/src/api2/backup/environment.rs b/src/api2/backup/environment.rs
index 7f0476c3..0fb37807 100644
--- a/src/api2/backup/environment.rs
+++ b/src/api2/backup/environment.rs
@@ -10,7 +10,7 @@ use proxmox::tools::digest_to_hex;
use proxmox::tools::fs::{replace_file, CreateOptions};
use proxmox::api::{RpcEnvironment, RpcEnvironmentType};
-use crate::api2::types::Userid;
+use crate::api2::types::Authid;
use crate::backup::*;
use crate::server::WorkerTask;
use crate::server::formatter::*;
@@ -104,7 +104,7 @@ impl SharedBackupState {
pub struct BackupEnvironment {
env_type: RpcEnvironmentType,
result_attributes: Value,
- user: Userid,
+ auth_id: Authid,
pub debug: bool,
pub formatter: &'static OutputFormatter,
pub worker: Arc<WorkerTask>,
@@ -117,7 +117,7 @@ pub struct BackupEnvironment {
impl BackupEnvironment {
pub fn new(
env_type: RpcEnvironmentType,
- user: Userid,
+ auth_id: Authid,
worker: Arc<WorkerTask>,
datastore: Arc<DataStore>,
backup_dir: BackupDir,
@@ -137,7 +137,7 @@ impl BackupEnvironment {
Self {
result_attributes: json!({}),
env_type,
- user,
+ auth_id,
worker,
datastore,
debug: false,
@@ -518,7 +518,7 @@ impl BackupEnvironment {
WorkerTask::new_thread(
"verify",
Some(worker_id),
- self.user.clone(),
+ self.auth_id.clone(),
false,
move |worker| {
worker.log("Automatically verifying newly added snapshot");
@@ -598,12 +598,12 @@ impl RpcEnvironment for BackupEnvironment {
self.env_type
}
- fn set_user(&mut self, _user: Option<String>) {
- panic!("unable to change user");
+ fn set_auth_id(&mut self, _auth_id: Option<String>) {
+ panic!("unable to change auth_id");
}
- fn get_user(&self) -> Option<String> {
- Some(self.user.to_string())
+ fn get_auth_id(&self) -> Option<String> {
+ Some(self.auth_id.to_string())
}
}
diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs
index 4847bdad..1da82593 100644
--- a/src/api2/config/datastore.rs
+++ b/src/api2/config/datastore.rs
@@ -35,14 +35,14 @@ pub fn list_datastores(
let (config, digest) = datastore::config()?;
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
let list:Vec<DataStoreConfig> = config.convert_to_typed_array("datastore")?;
let filter_by_privs = |store: &DataStoreConfig| {
- let user_privs = user_info.lookup_privs(&userid, &["datastore", &store.name]);
+ let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store.name]);
(user_privs & PRIV_DATASTORE_AUDIT) != 0
};
diff --git a/src/api2/config/remote.rs b/src/api2/config/remote.rs
index d419be2b..dd2777c9 100644
--- a/src/api2/config/remote.rs
+++ b/src/api2/config/remote.rs
@@ -66,7 +66,7 @@ pub fn list_remotes(
default: 8007,
},
userid: {
- type: Userid,
+ type: Authid,
},
password: {
schema: remote::REMOTE_PASSWORD_SCHEMA,
@@ -167,7 +167,7 @@ pub enum DeletableProperty {
},
userid: {
optional: true,
- type: Userid,
+ type: Authid,
},
password: {
optional: true,
diff --git a/src/api2/node.rs b/src/api2/node.rs
index 4689c494..d06a0cb6 100644
--- a/src/api2/node.rs
+++ b/src/api2/node.rs
@@ -91,10 +91,12 @@ async fn termproxy(
cmd: Option<String>,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
+ // intentionally user only for now
let userid: Userid = rpcenv
- .get_user()
+ .get_auth_id()
.ok_or_else(|| format_err!("unknown user"))?
.parse()?;
+ let auth_id = Authid::from(userid.clone());
if userid.realm() != "pam" {
bail!("only pam users can use the console");
@@ -137,7 +139,7 @@ async fn termproxy(
let upid = WorkerTask::spawn(
"termproxy",
None,
- userid,
+ auth_id,
false,
move |worker| async move {
// move inside the worker so that it survives and does not close the port
@@ -272,7 +274,8 @@ fn upgrade_to_websocket(
rpcenv: Box<dyn RpcEnvironment>,
) -> ApiResponseFuture {
async move {
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ // intentionally user only for now
+ let userid: Userid = rpcenv.get_auth_id().unwrap().parse()?;
let ticket = tools::required_string_param(¶m, "vncticket")?;
let port: u16 = tools::required_integer_param(¶m, "port")? as u16;
diff --git a/src/api2/node/apt.rs b/src/api2/node/apt.rs
index e8d4094b..ba72d352 100644
--- a/src/api2/node/apt.rs
+++ b/src/api2/node/apt.rs
@@ -12,7 +12,7 @@ use crate::server::WorkerTask;
use crate::tools::http;
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
-use crate::api2::types::{APTUpdateInfo, NODE_SCHEMA, Userid, UPID_SCHEMA};
+use crate::api2::types::{Authid, APTUpdateInfo, NODE_SCHEMA, UPID_SCHEMA};
const_regex! {
VERSION_EPOCH_REGEX = r"^\d+:";
@@ -351,11 +351,11 @@ pub fn apt_update_database(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> {
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
let quiet = quiet.unwrap_or(API_METHOD_APT_UPDATE_DATABASE_PARAM_DEFAULT_QUIET);
- let upid_str = WorkerTask::new_thread("aptupdate", None, userid, to_stdout, move |worker| {
+ let upid_str = WorkerTask::new_thread("aptupdate", None, auth_id, to_stdout, move |worker| {
if !quiet { worker.log("starting apt-get update") }
// TODO: set proxy /etc/apt/apt.conf.d/76pbsproxy like PVE
diff --git a/src/api2/node/disks.rs b/src/api2/node/disks.rs
index eed9e257..97e04edd 100644
--- a/src/api2/node/disks.rs
+++ b/src/api2/node/disks.rs
@@ -13,7 +13,7 @@ use crate::tools::disks::{
};
use crate::server::WorkerTask;
-use crate::api2::types::{Userid, UPID_SCHEMA, NODE_SCHEMA, BLOCKDEVICE_NAME_SCHEMA};
+use crate::api2::types::{Authid, UPID_SCHEMA, NODE_SCHEMA, BLOCKDEVICE_NAME_SCHEMA};
pub mod directory;
pub mod zfs;
@@ -140,7 +140,7 @@ pub fn initialize_disk(
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let info = get_disk_usage_info(&disk, true)?;
@@ -149,7 +149,7 @@ pub fn initialize_disk(
}
let upid_str = WorkerTask::new_thread(
- "diskinit", Some(disk.clone()), userid, to_stdout, move |worker|
+ "diskinit", Some(disk.clone()), auth_id, to_stdout, move |worker|
{
worker.log(format!("initialize disk {}", disk));
diff --git a/src/api2/node/disks/directory.rs b/src/api2/node/disks/directory.rs
index 0d9ddeef..2a4780f2 100644
--- a/src/api2/node/disks/directory.rs
+++ b/src/api2/node/disks/directory.rs
@@ -134,7 +134,7 @@ pub fn create_datastore_disk(
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let info = get_disk_usage_info(&disk, true)?;
@@ -143,7 +143,7 @@ pub fn create_datastore_disk(
}
let upid_str = WorkerTask::new_thread(
- "dircreate", Some(name.clone()), userid, to_stdout, move |worker|
+ "dircreate", Some(name.clone()), auth_id, to_stdout, move |worker|
{
worker.log(format!("create datastore '{}' on disk {}", name, disk));
diff --git a/src/api2/node/disks/zfs.rs b/src/api2/node/disks/zfs.rs
index f70b91b6..79094012 100644
--- a/src/api2/node/disks/zfs.rs
+++ b/src/api2/node/disks/zfs.rs
@@ -256,7 +256,7 @@ pub fn create_zpool(
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let add_datastore = add_datastore.unwrap_or(false);
@@ -316,7 +316,7 @@ pub fn create_zpool(
}
let upid_str = WorkerTask::new_thread(
- "zfscreate", Some(name.clone()), userid, to_stdout, move |worker|
+ "zfscreate", Some(name.clone()), auth_id, to_stdout, move |worker|
{
worker.log(format!("create {:?} zpool '{}' on devices '{}'", raidlevel, name, devices_text));
diff --git a/src/api2/node/network.rs b/src/api2/node/network.rs
index efdc8afd..f737684d 100644
--- a/src/api2/node/network.rs
+++ b/src/api2/node/network.rs
@@ -684,9 +684,9 @@ pub async fn reload_network_config(
network::assert_ifupdown2_installed()?;
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
- let upid_str = WorkerTask::spawn("srvreload", Some(String::from("networking")), userid, true, |_worker| async {
+ let upid_str = WorkerTask::spawn("srvreload", Some(String::from("networking")), auth_id, true, |_worker| async {
let _ = std::fs::rename(network::NETWORK_INTERFACES_NEW_FILENAME, network::NETWORK_INTERFACES_FILENAME);
diff --git a/src/api2/node/services.rs b/src/api2/node/services.rs
index 849bead7..4c2a17b4 100644
--- a/src/api2/node/services.rs
+++ b/src/api2/node/services.rs
@@ -182,7 +182,7 @@ fn get_service_state(
Ok(json_service_state(&service, status))
}
-fn run_service_command(service: &str, cmd: &str, userid: Userid) -> Result<Value, Error> {
+fn run_service_command(service: &str, cmd: &str, auth_id: Authid) -> Result<Value, Error> {
let workerid = format!("srv{}", &cmd);
@@ -196,7 +196,7 @@ fn run_service_command(service: &str, cmd: &str, userid: Userid) -> Result<Value
let upid = WorkerTask::new_thread(
&workerid,
Some(service.clone()),
- userid,
+ auth_id,
false,
move |_worker| {
@@ -244,11 +244,11 @@ fn start_service(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
log::info!("starting service {}", service);
- run_service_command(&service, "start", userid)
+ run_service_command(&service, "start", auth_id)
}
#[api(
@@ -274,11 +274,11 @@ fn stop_service(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
log::info!("stopping service {}", service);
- run_service_command(&service, "stop", userid)
+ run_service_command(&service, "stop", auth_id)
}
#[api(
@@ -304,15 +304,15 @@ fn restart_service(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
log::info!("re-starting service {}", service);
if &service == "proxmox-backup-proxy" {
// special case, avoid aborting running tasks
- run_service_command(&service, "reload", userid)
+ run_service_command(&service, "reload", auth_id)
} else {
- run_service_command(&service, "restart", userid)
+ run_service_command(&service, "restart", auth_id)
}
}
@@ -339,11 +339,11 @@ fn reload_service(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
log::info!("reloading service {}", service);
- run_service_command(&service, "reload", userid)
+ run_service_command(&service, "reload", auth_id)
}
diff --git a/src/api2/node/subscription.rs b/src/api2/node/subscription.rs
index 0802f6a7..3ce7ce98 100644
--- a/src/api2/node/subscription.rs
+++ b/src/api2/node/subscription.rs
@@ -7,7 +7,7 @@ use crate::tools;
use crate::tools::subscription::{self, SubscriptionStatus, SubscriptionInfo};
use crate::config::acl::{PRIV_SYS_AUDIT,PRIV_SYS_MODIFY};
use crate::config::cached_user_info::CachedUserInfo;
-use crate::api2::types::{NODE_SCHEMA, Userid};
+use crate::api2::types::{NODE_SCHEMA, Authid};
#[api(
input: {
@@ -100,9 +100,9 @@ fn get_subscription(
},
};
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
- let user_privs = user_info.lookup_privs(&userid, &[]);
+ let user_privs = user_info.lookup_privs(&auth_id, &[]);
if (user_privs & PRIV_SYS_AUDIT) == 0 {
// not enough privileges for full state
diff --git a/src/api2/node/tasks.rs b/src/api2/node/tasks.rs
index 4d16b20a..66af6d11 100644
--- a/src/api2/node/tasks.rs
+++ b/src/api2/node/tasks.rs
@@ -84,11 +84,11 @@ async fn get_task_status(
let upid = extract_upid(¶m)?;
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
- if userid != upid.userid {
+ if auth_id != upid.auth_id {
let user_info = CachedUserInfo::new()?;
- user_info.check_privs(&userid, &["system", "tasks"], PRIV_SYS_AUDIT, false)?;
+ user_info.check_privs(&auth_id, &["system", "tasks"], PRIV_SYS_AUDIT, false)?;
}
let mut result = json!({
@@ -99,7 +99,7 @@ async fn get_task_status(
"starttime": upid.starttime,
"type": upid.worker_type,
"id": upid.worker_id,
- "user": upid.userid,
+ "user": upid.auth_id,
});
if crate::server::worker_is_active(&upid).await? {
@@ -161,11 +161,11 @@ async fn read_task_log(
let upid = extract_upid(¶m)?;
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
- if userid != upid.userid {
+ if auth_id != upid.auth_id {
let user_info = CachedUserInfo::new()?;
- user_info.check_privs(&userid, &["system", "tasks"], PRIV_SYS_AUDIT, false)?;
+ user_info.check_privs(&auth_id, &["system", "tasks"], PRIV_SYS_AUDIT, false)?;
}
let test_status = param["test-status"].as_bool().unwrap_or(false);
@@ -234,11 +234,11 @@ fn stop_task(
let upid = extract_upid(¶m)?;
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
- if userid != upid.userid {
+ if auth_id != upid.auth_id {
let user_info = CachedUserInfo::new()?;
- user_info.check_privs(&userid, &["system", "tasks"], PRIV_SYS_MODIFY, false)?;
+ user_info.check_privs(&auth_id, &["system", "tasks"], PRIV_SYS_MODIFY, false)?;
}
server::abort_worker_async(upid);
@@ -308,9 +308,9 @@ pub fn list_tasks(
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<TaskListItem>, Error> {
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
- let user_privs = user_info.lookup_privs(&userid, &["system", "tasks"]);
+ let user_privs = user_info.lookup_privs(&auth_id, &["system", "tasks"]);
let list_all = (user_privs & PRIV_SYS_AUDIT) != 0;
@@ -326,10 +326,10 @@ pub fn list_tasks(
Err(_) => return None,
};
- if !list_all && info.upid.userid != userid { return None; }
+ if !list_all && info.upid.auth_id != auth_id { return None; }
- if let Some(userid) = &userfilter {
- if !info.upid.userid.as_str().contains(userid) { return None; }
+ if let Some(needle) = &userfilter {
+ if !info.upid.auth_id.to_string().contains(needle) { return None; }
}
if let Some(store) = store {
diff --git a/src/api2/pull.rs b/src/api2/pull.rs
index 441701a5..aef7de4e 100644
--- a/src/api2/pull.rs
+++ b/src/api2/pull.rs
@@ -20,7 +20,7 @@ use crate::config::{
pub fn check_pull_privs(
- userid: &Userid,
+ auth_id: &Authid,
store: &str,
remote: &str,
remote_store: &str,
@@ -29,11 +29,11 @@ pub fn check_pull_privs(
let user_info = CachedUserInfo::new()?;
- user_info.check_privs(userid, &["datastore", store], PRIV_DATASTORE_BACKUP, false)?;
- user_info.check_privs(userid, &["remote", remote, remote_store], PRIV_REMOTE_READ, false)?;
+ user_info.check_privs(auth_id, &["datastore", store], PRIV_DATASTORE_BACKUP, false)?;
+ user_info.check_privs(auth_id, &["remote", remote, remote_store], PRIV_REMOTE_READ, false)?;
if delete {
- user_info.check_privs(userid, &["datastore", store], PRIV_DATASTORE_PRUNE, false)?;
+ user_info.check_privs(auth_id, &["datastore", store], PRIV_DATASTORE_PRUNE, false)?;
}
Ok(())
@@ -68,7 +68,7 @@ pub async fn get_pull_parameters(
pub fn do_sync_job(
mut job: Job,
sync_job: SyncJobConfig,
- userid: &Userid,
+ auth_id: &Authid,
schedule: Option<String>,
) -> Result<String, Error> {
@@ -78,7 +78,7 @@ pub fn do_sync_job(
let upid_str = WorkerTask::spawn(
&worker_type,
Some(job.jobname().to_string()),
- userid.clone(),
+ auth_id.clone(),
false,
move |worker| async move {
@@ -98,7 +98,9 @@ pub fn do_sync_job(
worker.log(format!("Sync datastore '{}' from '{}/{}'",
sync_job.store, sync_job.remote, sync_job.remote_store));
- crate::client::pull::pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, Userid::backup_userid().clone()).await?;
+ let backup_auth_id = Authid::backup_auth_id();
+
+ crate::client::pull::pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, backup_auth_id.clone()).await?;
worker.log(format!("sync job '{}' end", &job_id));
@@ -164,19 +166,19 @@ async fn pull (
rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> {
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let delete = remove_vanished.unwrap_or(true);
- check_pull_privs(&userid, &store, &remote, &remote_store, delete)?;
+ check_pull_privs(&auth_id, &store, &remote, &remote_store, delete)?;
let (client, src_repo, tgt_store) = get_pull_parameters(&store, &remote, &remote_store).await?;
// fixme: set to_stdout to false?
- let upid_str = WorkerTask::spawn("sync", Some(store.clone()), userid.clone(), true, move |worker| async move {
+ let upid_str = WorkerTask::spawn("sync", Some(store.clone()), auth_id.clone(), true, move |worker| async move {
worker.log(format!("sync datastore '{}' start", store));
- let pull_future = pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, userid);
+ let pull_future = pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, auth_id);
let future = select!{
success = pull_future.fuse() => success,
abort = worker.abort_future().map(|_| Err(format_err!("pull aborted"))) => abort,
diff --git a/src/api2/reader.rs b/src/api2/reader.rs
index 0fa9b08e..3eeece52 100644
--- a/src/api2/reader.rs
+++ b/src/api2/reader.rs
@@ -55,11 +55,11 @@ fn upgrade_to_backup_reader_protocol(
async move {
let debug = param["debug"].as_bool().unwrap_or(false);
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let store = tools::required_string_param(¶m, "store")?.to_owned();
let user_info = CachedUserInfo::new()?;
- let privs = user_info.lookup_privs(&userid, &["datastore", &store]);
+ let privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let priv_read = privs & PRIV_DATASTORE_READ != 0;
let priv_backup = privs & PRIV_DATASTORE_BACKUP != 0;
@@ -94,7 +94,7 @@ fn upgrade_to_backup_reader_protocol(
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
if !priv_read {
let owner = datastore.get_owner(backup_dir.group())?;
- if owner != userid {
+ if owner != auth_id {
bail!("backup owner check failed!");
}
}
@@ -110,10 +110,10 @@ fn upgrade_to_backup_reader_protocol(
let worker_id = format!("{}:{}/{}/{:08X}", store, backup_type, backup_id, backup_dir.backup_time());
- WorkerTask::spawn("reader", Some(worker_id), userid.clone(), true, move |worker| {
+ WorkerTask::spawn("reader", Some(worker_id), auth_id.clone(), true, move |worker| {
let mut env = ReaderEnvironment::new(
env_type,
- userid,
+ auth_id,
worker.clone(),
datastore,
backup_dir,
diff --git a/src/api2/reader/environment.rs b/src/api2/reader/environment.rs
index 4c7db36f..e1cc2754 100644
--- a/src/api2/reader/environment.rs
+++ b/src/api2/reader/environment.rs
@@ -5,7 +5,7 @@ use serde_json::{json, Value};
use proxmox::api::{RpcEnvironment, RpcEnvironmentType};
-use crate::api2::types::Userid;
+use crate::api2::types::Authid;
use crate::backup::*;
use crate::server::formatter::*;
use crate::server::WorkerTask;
@@ -17,7 +17,7 @@ use crate::server::WorkerTask;
pub struct ReaderEnvironment {
env_type: RpcEnvironmentType,
result_attributes: Value,
- user: Userid,
+ auth_id: Authid,
pub debug: bool,
pub formatter: &'static OutputFormatter,
pub worker: Arc<WorkerTask>,
@@ -29,7 +29,7 @@ pub struct ReaderEnvironment {
impl ReaderEnvironment {
pub fn new(
env_type: RpcEnvironmentType,
- user: Userid,
+ auth_id: Authid,
worker: Arc<WorkerTask>,
datastore: Arc<DataStore>,
backup_dir: BackupDir,
@@ -39,7 +39,7 @@ impl ReaderEnvironment {
Self {
result_attributes: json!({}),
env_type,
- user,
+ auth_id,
worker,
datastore,
debug: false,
@@ -82,12 +82,12 @@ impl RpcEnvironment for ReaderEnvironment {
self.env_type
}
- fn set_user(&mut self, _user: Option<String>) {
- panic!("unable to change user");
+ fn set_auth_id(&mut self, _auth_id: Option<String>) {
+ panic!("unable to change auth_id");
}
- fn get_user(&self) -> Option<String> {
- Some(self.user.to_string())
+ fn get_auth_id(&self) -> Option<String> {
+ Some(self.auth_id.to_string())
}
}
diff --git a/src/api2/status.rs b/src/api2/status.rs
index 372b4822..02ba1a78 100644
--- a/src/api2/status.rs
+++ b/src/api2/status.rs
@@ -16,9 +16,9 @@ use crate::api2::types::{
DATASTORE_SCHEMA,
RRDMode,
RRDTimeFrameResolution,
+ Authid,
TaskListItem,
TaskStateType,
- Userid,
};
use crate::server;
@@ -87,13 +87,13 @@ fn datastore_status(
let (config, _digest) = datastore::config()?;
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let mut list = Vec::new();
for (store, (_, _)) in &config.sections {
- let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
+ let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
if !allowed {
continue;
@@ -221,9 +221,9 @@ pub fn list_tasks(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<TaskListItem>, Error> {
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
- let user_privs = user_info.lookup_privs(&userid, &["system", "tasks"]);
+ let user_privs = user_info.lookup_privs(&auth_id, &["system", "tasks"]);
let list_all = (user_privs & PRIV_SYS_AUDIT) != 0;
let since = since.unwrap_or_else(|| 0);
@@ -238,7 +238,7 @@ pub fn list_tasks(
.filter_map(|info| {
match info {
Ok(info) => {
- if list_all || info.upid.userid == userid {
+ if list_all || info.upid.auth_id == auth_id {
if let Some(filter) = &typefilter {
if !info.upid.worker_type.contains(filter) {
return None;
diff --git a/src/api2/types/mod.rs b/src/api2/types/mod.rs
index 3f723e32..b1287583 100644
--- a/src/api2/types/mod.rs
+++ b/src/api2/types/mod.rs
@@ -376,7 +376,7 @@ pub const BLOCKDEVICE_NAME_SCHEMA: Schema = StringSchema::new("Block device name
},
},
owner: {
- type: Userid,
+ type: Authid,
optional: true,
},
},
@@ -394,7 +394,7 @@ pub struct GroupListItem {
pub files: Vec<String>,
/// The owner of group
#[serde(skip_serializing_if="Option::is_none")]
- pub owner: Option<Userid>,
+ pub owner: Option<Authid>,
}
#[api()]
@@ -452,7 +452,7 @@ pub struct SnapshotVerifyState {
},
},
owner: {
- type: Userid,
+ type: Authid,
optional: true,
},
},
@@ -477,7 +477,7 @@ pub struct SnapshotListItem {
pub size: Option<u64>,
/// The owner of the snapshots group
#[serde(skip_serializing_if="Option::is_none")]
- pub owner: Option<Userid>,
+ pub owner: Option<Authid>,
}
#[api(
@@ -627,7 +627,7 @@ pub struct StorageStatus {
#[api(
properties: {
upid: { schema: UPID_SCHEMA },
- user: { type: Userid },
+ userid: { type: Authid },
},
)]
#[derive(Serialize, Deserialize)]
@@ -646,8 +646,8 @@ pub struct TaskListItem {
pub worker_type: String,
/// Worker ID (arbitrary ASCII string)
pub worker_id: Option<String>,
- /// The user who started the task
- pub user: Userid,
+ /// The authenticated entity who started the task
+ pub userid: Authid,
/// The task end time (Epoch)
#[serde(skip_serializing_if="Option::is_none")]
pub endtime: Option<i64>,
@@ -670,7 +670,7 @@ impl From<crate::server::TaskListInfo> for TaskListItem {
starttime: info.upid.starttime,
worker_type: info.upid.worker_type,
worker_id: info.upid.worker_id,
- user: info.upid.userid,
+ userid: info.upid.auth_id,
endtime,
status,
}
diff --git a/src/backup/datastore.rs b/src/backup/datastore.rs
index 485a10ff..b0b04c00 100644
--- a/src/backup/datastore.rs
+++ b/src/backup/datastore.rs
@@ -23,7 +23,7 @@ use crate::task::TaskState;
use crate::tools;
use crate::tools::format::HumanByte;
use crate::tools::fs::{lock_dir_noblock, DirLockGuard};
-use crate::api2::types::{GarbageCollectionStatus, Userid};
+use crate::api2::types::{Authid, GarbageCollectionStatus};
use crate::server::UPID;
lazy_static! {
@@ -276,8 +276,8 @@ impl DataStore {
/// Returns the backup owner.
///
- /// The backup owner is the user who first created the backup group.
- pub fn get_owner(&self, backup_group: &BackupGroup) -> Result<Userid, Error> {
+ /// The backup owner is the entity who first created the backup group.
+ pub fn get_owner(&self, backup_group: &BackupGroup) -> Result<Authid, Error> {
let mut full_path = self.base_path();
full_path.push(backup_group.group_path());
full_path.push("owner");
@@ -289,7 +289,7 @@ impl DataStore {
pub fn set_owner(
&self,
backup_group: &BackupGroup,
- userid: &Userid,
+ auth_id: &Authid,
force: bool,
) -> Result<(), Error> {
let mut path = self.base_path();
@@ -309,7 +309,7 @@ impl DataStore {
let mut file = open_options.open(&path)
.map_err(|err| format_err!("unable to create owner file {:?} - {}", path, err))?;
- writeln!(file, "{}", userid)
+ writeln!(file, "{}", auth_id)
.map_err(|err| format_err!("unable to write owner file {:?} - {}", path, err))?;
Ok(())
@@ -324,8 +324,8 @@ impl DataStore {
pub fn create_locked_backup_group(
&self,
backup_group: &BackupGroup,
- userid: &Userid,
- ) -> Result<(Userid, DirLockGuard), Error> {
+ auth_id: &Authid,
+ ) -> Result<(Authid, DirLockGuard), Error> {
// create intermediate path first:
let base_path = self.base_path();
@@ -339,7 +339,7 @@ impl DataStore {
match std::fs::create_dir(&full_path) {
Ok(_) => {
let guard = lock_dir_noblock(&full_path, "backup group", "another backup is already running")?;
- self.set_owner(backup_group, userid, false)?;
+ self.set_owner(backup_group, auth_id, false)?;
let owner = self.get_owner(backup_group)?; // just to be sure
Ok((owner, guard))
}
diff --git a/src/bin/proxmox-backup-client.rs b/src/bin/proxmox-backup-client.rs
index 4bb05bd1..8c68ffd2 100644
--- a/src/bin/proxmox-backup-client.rs
+++ b/src/bin/proxmox-backup-client.rs
@@ -36,7 +36,7 @@ use proxmox_backup::api2::types::*;
use proxmox_backup::api2::version;
use proxmox_backup::client::*;
use proxmox_backup::pxar::catalog::*;
-use proxmox_backup::config::user::complete_user_name;
+use proxmox_backup::config::user::complete_userid;
use proxmox_backup::backup::{
archive_type,
decrypt_key,
@@ -425,7 +425,7 @@ async fn list_backup_groups(param: Value) -> Result<Value, Error> {
description: "Backup group.",
},
"new-owner": {
- type: Userid,
+ type: Authid,
},
}
}
@@ -2010,7 +2010,7 @@ fn main() {
let change_owner_cmd_def = CliCommand::new(&API_METHOD_CHANGE_BACKUP_OWNER)
.arg_param(&["group", "new-owner"])
.completion_cb("group", complete_backup_group)
- .completion_cb("new-owner", complete_user_name)
+ .completion_cb("new-owner", complete_userid)
.completion_cb("repository", complete_repository);
let cmd_def = CliCommandMap::new()
diff --git a/src/bin/proxmox-backup-manager.rs b/src/bin/proxmox-backup-manager.rs
index f0dbea93..f13e55ee 100644
--- a/src/bin/proxmox-backup-manager.rs
+++ b/src/bin/proxmox-backup-manager.rs
@@ -388,7 +388,7 @@ fn main() {
let mut rpcenv = CliEnvironment::new();
- rpcenv.set_user(Some(String::from("root@pam")));
+ rpcenv.set_auth_id(Some(String::from("root@pam")));
proxmox_backup::tools::runtime::main(run_async_cli_command(cmd_def, rpcenv));
}
diff --git a/src/bin/proxmox-backup-proxy.rs b/src/bin/proxmox-backup-proxy.rs
index f2612e1f..b8c22af3 100644
--- a/src/bin/proxmox-backup-proxy.rs
+++ b/src/bin/proxmox-backup-proxy.rs
@@ -30,7 +30,7 @@ use proxmox_backup::{
};
-use proxmox_backup::api2::types::Userid;
+use proxmox_backup::api2::types::{Authid, Userid};
use proxmox_backup::configdir;
use proxmox_backup::buildcfg;
use proxmox_backup::server;
@@ -334,7 +334,7 @@ async fn schedule_datastore_garbage_collection() {
if let Err(err) = WorkerTask::new_thread(
worker_type,
Some(store.clone()),
- Userid::backup_userid().clone(),
+ Authid::backup_auth_id().clone(),
false,
move |worker| {
job.start(&worker.upid().to_string())?;
@@ -463,7 +463,7 @@ async fn schedule_datastore_prune() {
if let Err(err) = WorkerTask::new_thread(
worker_type,
Some(store.clone()),
- Userid::backup_userid().clone(),
+ Authid::backup_auth_id().clone(),
false,
move |worker| {
@@ -579,9 +579,9 @@ async fn schedule_datastore_sync_jobs() {
Err(_) => continue, // could not get lock
};
- let userid = Userid::backup_userid().clone();
+ let auth_id = Authid::backup_auth_id();
- if let Err(err) = do_sync_job(job, job_config, &userid, Some(event_str)) {
+ if let Err(err) = do_sync_job(job, job_config, &auth_id, Some(event_str)) {
eprintln!("unable to start datastore sync job {} - {}", &job_id, err);
}
}
@@ -642,8 +642,8 @@ async fn schedule_datastore_verify_jobs() {
Ok(job) => job,
Err(_) => continue, // could not get lock
};
- let userid = Userid::backup_userid().clone();
- if let Err(err) = do_verification_job(job, job_config, &userid, Some(event_str)) {
+ let auth_id = Authid::backup_auth_id();
+ if let Err(err) = do_verification_job(job, job_config, &auth_id, Some(event_str)) {
eprintln!("unable to start datastore verification job {} - {}", &job_id, err);
}
}
@@ -704,7 +704,7 @@ async fn schedule_task_log_rotate() {
if let Err(err) = WorkerTask::new_thread(
worker_type,
Some(job_id.to_string()),
- Userid::backup_userid().clone(),
+ Authid::backup_auth_id().clone(),
false,
move |worker| {
job.start(&worker.upid().to_string())?;
diff --git a/src/bin/proxmox_backup_manager/acl.rs b/src/bin/proxmox_backup_manager/acl.rs
index bc2e8f7a..3fbb3bcb 100644
--- a/src/bin/proxmox_backup_manager/acl.rs
+++ b/src/bin/proxmox_backup_manager/acl.rs
@@ -60,7 +60,7 @@ pub fn acl_commands() -> CommandLineInterface {
"update",
CliCommand::new(&api2::access::acl::API_METHOD_UPDATE_ACL)
.arg_param(&["path", "role"])
- .completion_cb("userid", config::user::complete_user_name)
+ .completion_cb("userid", config::user::complete_userid)
.completion_cb("path", config::datastore::complete_acl_path)
);
diff --git a/src/bin/proxmox_backup_manager/user.rs b/src/bin/proxmox_backup_manager/user.rs
index 80dbcb1b..af05e9b5 100644
--- a/src/bin/proxmox_backup_manager/user.rs
+++ b/src/bin/proxmox_backup_manager/user.rs
@@ -62,13 +62,13 @@ pub fn user_commands() -> CommandLineInterface {
"update",
CliCommand::new(&api2::access::user::API_METHOD_UPDATE_USER)
.arg_param(&["userid"])
- .completion_cb("userid", config::user::complete_user_name)
+ .completion_cb("userid", config::user::complete_userid)
)
.insert(
"remove",
CliCommand::new(&api2::access::user::API_METHOD_DELETE_USER)
.arg_param(&["userid"])
- .completion_cb("userid", config::user::complete_user_name)
+ .completion_cb("userid", config::user::complete_userid)
);
cmd_def.into()
diff --git a/src/client/pull.rs b/src/client/pull.rs
index a1d5ea48..7cb8d8e1 100644
--- a/src/client/pull.rs
+++ b/src/client/pull.rs
@@ -491,7 +491,7 @@ pub async fn pull_store(
src_repo: &BackupRepository,
tgt_store: Arc<DataStore>,
delete: bool,
- userid: Userid,
+ auth_id: Authid,
) -> Result<(), Error> {
// explicit create shared lock to prevent GC on newly created chunks
@@ -524,11 +524,11 @@ pub async fn pull_store(
for (groups_done, item) in list.into_iter().enumerate() {
let group = BackupGroup::new(&item.backup_type, &item.backup_id);
- let (owner, _lock_guard) = tgt_store.create_locked_backup_group(&group, &userid)?;
+ let (owner, _lock_guard) = tgt_store.create_locked_backup_group(&group, &auth_id)?;
// permission check
- if userid != owner { // only the owner is allowed to create additional snapshots
+ if auth_id != owner { // only the owner is allowed to create additional snapshots
worker.log(format!("sync group {}/{} failed - owner check failed ({} != {})",
- item.backup_type, item.backup_id, userid, owner));
+ item.backup_type, item.backup_id, auth_id, owner));
errors = true; // do not stop here, instead continue
} else if let Err(err) = pull_group(
diff --git a/src/config/acl.rs b/src/config/acl.rs
index 39f9d030..12b5a851 100644
--- a/src/config/acl.rs
+++ b/src/config/acl.rs
@@ -15,7 +15,7 @@ use proxmox::tools::{fs::replace_file, fs::CreateOptions};
use proxmox::constnamedbitmap;
use proxmox::api::{api, schema::*};
-use crate::api2::types::Userid;
+use crate::api2::types::{Authid,Userid};
// define Privilege bitfield
@@ -231,7 +231,7 @@ pub struct AclTree {
}
pub struct AclTreeNode {
- pub users: HashMap<Userid, HashMap<String, bool>>,
+ pub users: HashMap<Authid, HashMap<String, bool>>,
pub groups: HashMap<String, HashMap<String, bool>>,
pub children: BTreeMap<String, AclTreeNode>,
}
@@ -246,21 +246,21 @@ impl AclTreeNode {
}
}
- pub fn extract_roles(&self, user: &Userid, all: bool) -> HashSet<String> {
- let user_roles = self.extract_user_roles(user, all);
+ pub fn extract_roles(&self, auth_id: &Authid, all: bool) -> HashSet<String> {
+ let user_roles = self.extract_user_roles(auth_id, all);
if !user_roles.is_empty() {
// user privs always override group privs
return user_roles
};
- self.extract_group_roles(user, all)
+ self.extract_group_roles(auth_id.user(), all)
}
- pub fn extract_user_roles(&self, user: &Userid, all: bool) -> HashSet<String> {
+ pub fn extract_user_roles(&self, auth_id: &Authid, all: bool) -> HashSet<String> {
let mut set = HashSet::new();
- let roles = match self.users.get(user) {
+ let roles = match self.users.get(auth_id) {
Some(m) => m,
None => return set,
};
@@ -312,8 +312,8 @@ impl AclTreeNode {
roles.remove(role);
}
- pub fn delete_user_role(&mut self, userid: &Userid, role: &str) {
- let roles = match self.users.get_mut(userid) {
+ pub fn delete_user_role(&mut self, auth_id: &Authid, role: &str) {
+ let roles = match self.users.get_mut(auth_id) {
Some(r) => r,
None => return,
};
@@ -331,8 +331,8 @@ impl AclTreeNode {
}
}
- pub fn insert_user_role(&mut self, user: Userid, role: String, propagate: bool) {
- let map = self.users.entry(user).or_insert_with(|| HashMap::new());
+ pub fn insert_user_role(&mut self, auth_id: Authid, role: String, propagate: bool) {
+ let map = self.users.entry(auth_id).or_insert_with(|| HashMap::new());
if role == ROLE_NAME_NO_ACCESS {
map.clear();
map.insert(role, propagate);
@@ -383,13 +383,13 @@ impl AclTree {
node.delete_group_role(group, role);
}
- pub fn delete_user_role(&mut self, path: &str, userid: &Userid, role: &str) {
+ pub fn delete_user_role(&mut self, path: &str, auth_id: &Authid, role: &str) {
let path = split_acl_path(path);
let node = match self.get_node(&path) {
Some(n) => n,
None => return,
};
- node.delete_user_role(userid, role);
+ node.delete_user_role(auth_id, role);
}
pub fn insert_group_role(&mut self, path: &str, group: &str, role: &str, propagate: bool) {
@@ -398,10 +398,10 @@ impl AclTree {
node.insert_group_role(group.to_string(), role.to_string(), propagate);
}
- pub fn insert_user_role(&mut self, path: &str, user: &Userid, role: &str, propagate: bool) {
+ pub fn insert_user_role(&mut self, path: &str, auth_id: &Authid, role: &str, propagate: bool) {
let path = split_acl_path(path);
let node = self.get_or_insert_node(&path);
- node.insert_user_role(user.to_owned(), role.to_string(), propagate);
+ node.insert_user_role(auth_id.to_owned(), role.to_string(), propagate);
}
fn write_node_config(
@@ -413,18 +413,18 @@ impl AclTree {
let mut role_ug_map0 = HashMap::new();
let mut role_ug_map1 = HashMap::new();
- for (user, roles) in &node.users {
+ for (auth_id, roles) in &node.users {
// no need to save, because root is always 'Administrator'
- if user == "root@pam" { continue; }
+ if !auth_id.is_token() && auth_id.user() == "root@pam" { continue; }
for (role, propagate) in roles {
let role = role.as_str();
- let user = user.to_string();
+ let auth_id = auth_id.to_string();
if *propagate {
role_ug_map1.entry(role).or_insert_with(|| BTreeSet::new())
- .insert(user);
+ .insert(auth_id);
} else {
role_ug_map0.entry(role).or_insert_with(|| BTreeSet::new())
- .insert(user);
+ .insert(auth_id);
}
}
}
@@ -576,10 +576,10 @@ impl AclTree {
Ok(tree)
}
- pub fn roles(&self, userid: &Userid, path: &[&str]) -> HashSet<String> {
+ pub fn roles(&self, auth_id: &Authid, path: &[&str]) -> HashSet<String> {
let mut node = &self.root;
- let mut role_set = node.extract_roles(userid, path.is_empty());
+ let mut role_set = node.extract_roles(auth_id, path.is_empty());
for (pos, comp) in path.iter().enumerate() {
let last_comp = (pos + 1) == path.len();
@@ -587,7 +587,7 @@ impl AclTree {
Some(n) => n,
None => return role_set, // path not found
};
- let new_set = node.extract_roles(userid, last_comp);
+ let new_set = node.extract_roles(auth_id, last_comp);
if !new_set.is_empty() {
// overwrite previous settings
role_set = new_set;
@@ -675,22 +675,22 @@ mod test {
use anyhow::{Error};
use super::AclTree;
- use crate::api2::types::Userid;
+ use crate::api2::types::Authid;
fn check_roles(
tree: &AclTree,
- user: &Userid,
+ auth_id: &Authid,
path: &str,
expected_roles: &str,
) {
let path_vec = super::split_acl_path(path);
- let mut roles = tree.roles(user, &path_vec)
+ let mut roles = tree.roles(auth_id, &path_vec)
.iter().map(|v| v.clone()).collect::<Vec<String>>();
roles.sort();
let roles = roles.join(",");
- assert_eq!(roles, expected_roles, "\nat check_roles for '{}' on '{}'", user, path);
+ assert_eq!(roles, expected_roles, "\nat check_roles for '{}' on '{}'", auth_id, path);
}
#[test]
@@ -721,13 +721,13 @@ acl:1:/storage:user1@pbs:Admin
acl:1:/storage/store1:user1@pbs:DatastoreBackup
acl:1:/storage/store2:user2@pbs:DatastoreBackup
"###)?;
- let user1: Userid = "user1@pbs".parse()?;
+ let user1: Authid = "user1@pbs".parse()?;
check_roles(&tree, &user1, "/", "");
check_roles(&tree, &user1, "/storage", "Admin");
check_roles(&tree, &user1, "/storage/store1", "DatastoreBackup");
check_roles(&tree, &user1, "/storage/store2", "Admin");
- let user2: Userid = "user2@pbs".parse()?;
+ let user2: Authid = "user2@pbs".parse()?;
check_roles(&tree, &user2, "/", "");
check_roles(&tree, &user2, "/storage", "");
check_roles(&tree, &user2, "/storage/store1", "");
@@ -744,7 +744,7 @@ acl:1:/:user1@pbs:Admin
acl:1:/storage:user1@pbs:NoAccess
acl:1:/storage/store1:user1@pbs:DatastoreBackup
"###)?;
- let user1: Userid = "user1@pbs".parse()?;
+ let user1: Authid = "user1@pbs".parse()?;
check_roles(&tree, &user1, "/", "Admin");
check_roles(&tree, &user1, "/storage", "NoAccess");
check_roles(&tree, &user1, "/storage/store1", "DatastoreBackup");
@@ -770,7 +770,7 @@ acl:1:/storage/store1:user1@pbs:DatastoreBackup
let mut tree = AclTree::new();
- let user1: Userid = "user1@pbs".parse()?;
+ let user1: Authid = "user1@pbs".parse()?;
tree.insert_user_role("/", &user1, "Admin", true);
tree.insert_user_role("/", &user1, "Audit", true);
@@ -794,7 +794,7 @@ acl:1:/storage/store1:user1@pbs:DatastoreBackup
let mut tree = AclTree::new();
- let user1: Userid = "user1@pbs".parse()?;
+ let user1: Authid = "user1@pbs".parse()?;
tree.insert_user_role("/storage", &user1, "NoAccess", true);
diff --git a/src/config/cached_user_info.rs b/src/config/cached_user_info.rs
index cf9c534d..57d53aac 100644
--- a/src/config/cached_user_info.rs
+++ b/src/config/cached_user_info.rs
@@ -9,10 +9,10 @@ use lazy_static::lazy_static;
use proxmox::api::UserInformation;
use super::acl::{AclTree, ROLE_NAMES, ROLE_ADMIN};
-use super::user::User;
-use crate::api2::types::Userid;
+use super::user::{ApiToken, User};
+use crate::api2::types::{Authid, Userid};
-/// Cache User/Group/Acl configuration data for fast permission tests
+/// Cache User/Group/Token/Acl configuration data for fast permission tests
pub struct CachedUserInfo {
user_cfg: Arc<SectionConfigData>,
acl_tree: Arc<AclTree>,
@@ -57,8 +57,10 @@ impl CachedUserInfo {
Ok(config)
}
- /// Test if a user account is enabled and not expired
- pub fn is_active_user(&self, userid: &Userid) -> bool {
+ /// Test if a authentication id is enabled and not expired
+ pub fn is_active_auth_id(&self, auth_id: &Authid) -> bool {
+ let userid = auth_id.user();
+
if let Ok(info) = self.user_cfg.lookup::<User>("user", userid.as_str()) {
if !info.enable.unwrap_or(true) {
return false;
@@ -68,24 +70,41 @@ impl CachedUserInfo {
return false;
}
}
- return true;
} else {
return false;
}
+
+ if auth_id.is_token() {
+ if let Ok(info) = self.user_cfg.lookup::<ApiToken>("token", &auth_id.to_string()) {
+ if !info.enable.unwrap_or(true) {
+ return false;
+ }
+ if let Some(expire) = info.expire {
+ if expire > 0 && expire <= now() {
+ return false;
+ }
+ }
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ return true;
}
pub fn check_privs(
&self,
- userid: &Userid,
+ auth_id: &Authid,
path: &[&str],
required_privs: u64,
partial: bool,
) -> Result<(), Error> {
- let user_privs = self.lookup_privs(&userid, path);
+ let privs = self.lookup_privs(&auth_id, path);
let allowed = if partial {
- (user_privs & required_privs) != 0
+ (privs & required_privs) != 0
} else {
- (user_privs & required_privs) == required_privs
+ (privs & required_privs) == required_privs
};
if !allowed {
// printing the path doesn't leaks any information as long as we
@@ -95,27 +114,33 @@ impl CachedUserInfo {
Ok(())
}
- pub fn is_superuser(&self, userid: &Userid) -> bool {
- userid == "root@pam"
+ pub fn is_superuser(&self, auth_id: &Authid) -> bool {
+ !auth_id.is_token() && auth_id.user() == "root@pam"
}
pub fn is_group_member(&self, _userid: &Userid, _group: &str) -> bool {
false
}
- pub fn lookup_privs(&self, userid: &Userid, path: &[&str]) -> u64 {
-
- if self.is_superuser(userid) {
+ pub fn lookup_privs(&self, auth_id: &Authid, path: &[&str]) -> u64 {
+ if self.is_superuser(auth_id) {
return ROLE_ADMIN;
}
- let roles = self.acl_tree.roles(userid, path);
+ let roles = self.acl_tree.roles(auth_id, path);
let mut privs: u64 = 0;
for role in roles {
if let Some((role_privs, _)) = ROLE_NAMES.get(role.as_str()) {
privs |= role_privs;
}
}
+
+ if auth_id.is_token() {
+ // limit privs to that of owning user
+ let user_auth_id = Authid::from(auth_id.user().clone());
+ privs &= self.lookup_privs(&user_auth_id, path);
+ }
+
privs
}
}
@@ -129,9 +154,9 @@ impl UserInformation for CachedUserInfo {
false
}
- fn lookup_privs(&self, userid: &str, path: &[&str]) -> u64 {
- match userid.parse::<Userid>() {
- Ok(userid) => Self::lookup_privs(self, &userid, path),
+ fn lookup_privs(&self, auth_id: &str, path: &[&str]) -> u64 {
+ match auth_id.parse::<Authid>() {
+ Ok(auth_id) => Self::lookup_privs(self, &auth_id, path),
Err(_) => 0,
}
}
diff --git a/src/config/remote.rs b/src/config/remote.rs
index 9e597342..7ad653ac 100644
--- a/src/config/remote.rs
+++ b/src/config/remote.rs
@@ -45,7 +45,7 @@ pub const REMOTE_PASSWORD_SCHEMA: Schema = StringSchema::new("Password or auth t
type: u16,
},
userid: {
- type: Userid,
+ type: Authid,
},
password: {
schema: REMOTE_PASSWORD_SCHEMA,
diff --git a/src/config/user.rs b/src/config/user.rs
index b72fa40b..78571daa 100644
--- a/src/config/user.rs
+++ b/src/config/user.rs
@@ -52,6 +52,36 @@ pub const EMAIL_SCHEMA: Schema = StringSchema::new("E-Mail Address.")
.max_length(64)
.schema();
+#[api(
+ properties: {
+ tokenid: {
+ schema: PROXMOX_TOKEN_ID_SCHEMA,
+ },
+ comment: {
+ optional: true,
+ schema: SINGLE_LINE_COMMENT_SCHEMA,
+ },
+ enable: {
+ optional: true,
+ schema: ENABLE_USER_SCHEMA,
+ },
+ expire: {
+ optional: true,
+ schema: EXPIRE_USER_SCHEMA,
+ },
+ }
+)]
+#[derive(Serialize,Deserialize)]
+/// ApiToken properties.
+pub struct ApiToken {
+ pub tokenid: Authid,
+ #[serde(skip_serializing_if="Option::is_none")]
+ pub comment: Option<String>,
+ #[serde(skip_serializing_if="Option::is_none")]
+ pub enable: Option<bool>,
+ #[serde(skip_serializing_if="Option::is_none")]
+ pub expire: Option<i64>,
+}
#[api(
properties: {
@@ -103,15 +133,21 @@ pub struct User {
}
fn init() -> SectionConfig {
- let obj_schema = match User::API_SCHEMA {
- Schema::Object(ref obj_schema) => obj_schema,
+ let mut config = SectionConfig::new(&Authid::API_SCHEMA);
+
+ let user_schema = match User::API_SCHEMA {
+ Schema::Object(ref user_schema) => user_schema,
_ => unreachable!(),
};
+ let user_plugin = SectionConfigPlugin::new("user".to_string(), Some("userid".to_string()), user_schema);
+ config.register_plugin(user_plugin);
- let plugin = SectionConfigPlugin::new("user".to_string(), Some("userid".to_string()), obj_schema);
- let mut config = SectionConfig::new(&Userid::API_SCHEMA);
-
- config.register_plugin(plugin);
+ let token_schema = match ApiToken::API_SCHEMA {
+ Schema::Object(ref token_schema) => token_schema,
+ _ => unreachable!(),
+ };
+ let token_plugin = SectionConfigPlugin::new("token".to_string(), Some("tokenid".to_string()), token_schema);
+ config.register_plugin(token_plugin);
config
}
@@ -206,9 +242,26 @@ pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
}
// shell completion helper
-pub fn complete_user_name(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
+pub fn complete_userid(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
match config() {
- Ok((data, _digest)) => data.sections.iter().map(|(id, _)| id.to_string()).collect(),
+ Ok((data, _digest)) => {
+ data.sections.iter()
+ .filter_map(|(id, (section_type, _))| {
+ if section_type == "user" {
+ Some(id.to_string())
+ } else {
+ None
+ }
+ }).collect()
+ },
Err(_) => return vec![],
}
}
+
+// shell completion helper
+pub fn complete_authid(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
+ match config() {
+ Ok((data, _digest)) => data.sections.iter().map(|(id, _)| id.to_string()).collect(),
+ Err(_) => vec![],
+ }
+}
diff --git a/src/server/environment.rs b/src/server/environment.rs
index 5fbff307..2577c379 100644
--- a/src/server/environment.rs
+++ b/src/server/environment.rs
@@ -6,7 +6,7 @@ use proxmox::api::{RpcEnvironment, RpcEnvironmentType};
pub struct RestEnvironment {
env_type: RpcEnvironmentType,
result_attributes: Value,
- user: Option<String>,
+ auth_id: Option<String>,
client_ip: Option<std::net::SocketAddr>,
}
@@ -14,7 +14,7 @@ impl RestEnvironment {
pub fn new(env_type: RpcEnvironmentType) -> Self {
Self {
result_attributes: json!({}),
- user: None,
+ auth_id: None,
client_ip: None,
env_type,
}
@@ -35,12 +35,12 @@ impl RpcEnvironment for RestEnvironment {
self.env_type
}
- fn set_user(&mut self, user: Option<String>) {
- self.user = user;
+ fn set_auth_id(&mut self, auth_id: Option<String>) {
+ self.auth_id = auth_id;
}
- fn get_user(&self) -> Option<String> {
- self.user.clone()
+ fn get_auth_id(&self) -> Option<String> {
+ self.auth_id.clone()
}
fn set_client_ip(&mut self, client_ip: Option<std::net::SocketAddr>) {
diff --git a/src/server/rest.rs b/src/server/rest.rs
index c650a3aa..2b835c4a 100644
--- a/src/server/rest.rs
+++ b/src/server/rest.rs
@@ -42,7 +42,7 @@ use super::formatter::*;
use super::ApiConfig;
use crate::auth_helpers::*;
-use crate::api2::types::Userid;
+use crate::api2::types::{Authid, Userid};
use crate::tools;
use crate::tools::FileLogger;
use crate::tools::ticket::Ticket;
@@ -138,9 +138,9 @@ fn log_response(
log::error!("{} {}: {} {}: [client {}] {}", method.as_str(), path, status.as_str(), reason, peer, message);
}
if let Some(logfile) = logfile {
- let user = match resp.extensions().get::<Userid>() {
- Some(userid) => userid.as_str(),
- None => "-",
+ let auth_id = match resp.extensions().get::<Authid>() {
+ Some(auth_id) => auth_id.to_string(),
+ None => "-".to_string(),
};
let now = proxmox::tools::time::epoch_i64();
// time format which apache/nginx use (by default), copied from pve-http-server
@@ -153,7 +153,7 @@ fn log_response(
.log(format!(
"{} - {} [{}] \"{} {}\" {} {} {}",
peer.ip(),
- user,
+ auth_id,
datetime,
method.as_str(),
path,
@@ -441,7 +441,7 @@ fn get_index(
.unwrap();
if let Some(userid) = userid {
- resp.extensions_mut().insert(userid);
+ resp.extensions_mut().insert(Authid::from((userid, None)));
}
resp
@@ -555,14 +555,15 @@ fn check_auth(
ticket: &Option<String>,
csrf_token: &Option<String>,
user_info: &CachedUserInfo,
-) -> Result<Userid, Error> {
+) -> Result<Authid, Error> {
let ticket_lifetime = tools::ticket::TICKET_LIFETIME;
let ticket = ticket.as_ref().map(String::as_str);
let userid: Userid = Ticket::parse(&ticket.ok_or_else(|| format_err!("missing ticket"))?)?
.verify_with_time_frame(public_auth_key(), "PBS", None, -300..ticket_lifetime)?;
- if !user_info.is_active_user(&userid) {
+ let auth_id = Authid::from(userid.clone());
+ if !user_info.is_active_auth_id(&auth_id) {
bail!("user account disabled or expired.");
}
@@ -574,7 +575,7 @@ fn check_auth(
}
}
- Ok(userid)
+ Ok(Authid::from(userid))
}
async fn handle_request(
@@ -632,7 +633,7 @@ async fn handle_request(
if auth_required {
let (ticket, csrf_token, _) = extract_auth_data(&parts.headers);
match check_auth(&method, &ticket, &csrf_token, &user_info) {
- Ok(userid) => rpcenv.set_user(Some(userid.to_string())),
+ Ok(authid) => rpcenv.set_auth_id(Some(authid.to_string())),
Err(err) => {
// always delay unauthorized calls by 3 seconds (from start of request)
let err = http_err!(UNAUTHORIZED, "authentication failed - {}", err);
@@ -648,8 +649,8 @@ async fn handle_request(
return Ok((formatter.format_error)(err));
}
Some(api_method) => {
- let user = rpcenv.get_user();
- if !check_api_permission(api_method.access.permission, user.as_deref(), &uri_param, user_info.as_ref()) {
+ let auth_id = rpcenv.get_auth_id();
+ if !check_api_permission(api_method.access.permission, auth_id.as_deref(), &uri_param, user_info.as_ref()) {
let err = http_err!(FORBIDDEN, "permission check failed");
tokio::time::delay_until(Instant::from_std(access_forbidden_time)).await;
return Ok((formatter.format_error)(err));
@@ -666,9 +667,9 @@ async fn handle_request(
Err(err) => (formatter.format_error)(err),
};
- if let Some(user) = user {
- let userid: Userid = user.parse()?;
- response.extensions_mut().insert(userid);
+ if let Some(auth_id) = auth_id {
+ let auth_id: Authid = auth_id.parse()?;
+ response.extensions_mut().insert(auth_id);
}
return Ok(response);
@@ -687,9 +688,10 @@ async fn handle_request(
let (ticket, csrf_token, language) = extract_auth_data(&parts.headers);
if ticket != None {
match check_auth(&method, &ticket, &csrf_token, &user_info) {
- Ok(userid) => {
- let new_csrf_token = assemble_csrf_prevention_token(csrf_secret(), &userid);
- return Ok(get_index(Some(userid), Some(new_csrf_token), language, &api, parts));
+ Ok(auth_id) => {
+ let userid = auth_id.user();
+ let new_csrf_token = assemble_csrf_prevention_token(csrf_secret(), userid);
+ return Ok(get_index(Some(userid.clone()), Some(new_csrf_token), language, &api, parts));
}
_ => {
tokio::time::delay_until(Instant::from_std(delay_unauth_time)).await;
diff --git a/src/server/upid.rs b/src/server/upid.rs
index df027b07..3ca7cff2 100644
--- a/src/server/upid.rs
+++ b/src/server/upid.rs
@@ -6,7 +6,7 @@ use proxmox::api::schema::{ApiStringFormat, Schema, StringSchema};
use proxmox::const_regex;
use proxmox::sys::linux::procfs;
-use crate::api2::types::Userid;
+use crate::api2::types::Authid;
/// Unique Process/Task Identifier
///
@@ -34,8 +34,8 @@ pub struct UPID {
pub worker_type: String,
/// Worker ID (arbitrary ASCII string)
pub worker_id: Option<String>,
- /// The user who started the task
- pub userid: Userid,
+ /// The authenticated entity who started the task
+ pub auth_id: Authid,
/// The node name.
pub node: String,
}
@@ -47,7 +47,7 @@ const_regex! {
pub PROXMOX_UPID_REGEX = concat!(
r"^UPID:(?P<node>[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?):(?P<pid>[0-9A-Fa-f]{8}):",
r"(?P<pstart>[0-9A-Fa-f]{8,9}):(?P<task_id>[0-9A-Fa-f]{8,16}):(?P<starttime>[0-9A-Fa-f]{8}):",
- r"(?P<wtype>[^:\s]+):(?P<wid>[^:\s]*):(?P<userid>[^:\s]+):$"
+ r"(?P<wtype>[^:\s]+):(?P<wid>[^:\s]*):(?P<authid>[^:\s]+):$"
);
}
@@ -65,7 +65,7 @@ impl UPID {
pub fn new(
worker_type: &str,
worker_id: Option<String>,
- userid: Userid,
+ auth_id: Authid,
) -> Result<Self, Error> {
let pid = unsafe { libc::getpid() };
@@ -87,7 +87,7 @@ impl UPID {
task_id,
worker_type: worker_type.to_owned(),
worker_id,
- userid,
+ auth_id,
node: proxmox::tools::nodename().to_owned(),
})
}
@@ -122,7 +122,7 @@ impl std::str::FromStr for UPID {
task_id: usize::from_str_radix(&cap["task_id"], 16).unwrap(),
worker_type: cap["wtype"].to_string(),
worker_id,
- userid: cap["userid"].parse()?,
+ auth_id: cap["authid"].parse()?,
node: cap["node"].to_string(),
})
} else {
@@ -146,6 +146,6 @@ impl std::fmt::Display for UPID {
// more that 8 characters for pstart
write!(f, "UPID:{}:{:08X}:{:08X}:{:08X}:{:08X}:{}:{}:{}:",
- self.node, self.pid, self.pstart, self.task_id, self.starttime, self.worker_type, wid, self.userid)
+ self.node, self.pid, self.pstart, self.task_id, self.starttime, self.worker_type, wid, self.auth_id)
}
}
diff --git a/src/server/verify_job.rs b/src/server/verify_job.rs
index 064fb2b7..169e1687 100644
--- a/src/server/verify_job.rs
+++ b/src/server/verify_job.rs
@@ -20,7 +20,7 @@ use crate::{
pub fn do_verification_job(
mut job: Job,
verification_job: VerificationJobConfig,
- userid: &Userid,
+ auth_id: &Authid,
schedule: Option<String>,
) -> Result<String, Error> {
let datastore = DataStore::lookup_datastore(&verification_job.store)?;
@@ -46,14 +46,14 @@ pub fn do_verification_job(
})
}
- let email = crate::server::lookup_user_email(userid);
+ let email = crate::server::lookup_user_email(auth_id.user());
let job_id = job.jobname().to_string();
let worker_type = job.jobtype().to_string();
let upid_str = WorkerTask::new_thread(
&worker_type,
Some(job.jobname().to_string()),
- userid.clone(),
+ auth_id.clone(),
false,
move |worker| {
job.start(&worker.upid().to_string())?;
diff --git a/src/server/worker_task.rs b/src/server/worker_task.rs
index 8ef0fde7..d811fdd9 100644
--- a/src/server/worker_task.rs
+++ b/src/server/worker_task.rs
@@ -21,7 +21,7 @@ use super::UPID;
use crate::tools::logrotate::{LogRotate, LogRotateFiles};
use crate::tools::{FileLogger, FileLogOptions};
-use crate::api2::types::Userid;
+use crate::api2::types::Authid;
macro_rules! PROXMOX_BACKUP_VAR_RUN_DIR_M { () => ("/run/proxmox-backup") }
macro_rules! PROXMOX_BACKUP_LOG_DIR_M { () => ("/var/log/proxmox-backup") }
@@ -644,10 +644,10 @@ impl Drop for WorkerTask {
impl WorkerTask {
- pub fn new(worker_type: &str, worker_id: Option<String>, userid: Userid, to_stdout: bool) -> Result<Arc<Self>, Error> {
+ pub fn new(worker_type: &str, worker_id: Option<String>, auth_id: Authid, to_stdout: bool) -> Result<Arc<Self>, Error> {
println!("register worker");
- let upid = UPID::new(worker_type, worker_id, userid)?;
+ let upid = UPID::new(worker_type, worker_id, auth_id)?;
let task_id = upid.task_id;
let mut path = std::path::PathBuf::from(PROXMOX_BACKUP_TASK_DIR);
@@ -699,14 +699,14 @@ impl WorkerTask {
pub fn spawn<F, T>(
worker_type: &str,
worker_id: Option<String>,
- userid: Userid,
+ auth_id: Authid,
to_stdout: bool,
f: F,
) -> Result<String, Error>
where F: Send + 'static + FnOnce(Arc<WorkerTask>) -> T,
T: Send + 'static + Future<Output = Result<(), Error>>,
{
- let worker = WorkerTask::new(worker_type, worker_id, userid, to_stdout)?;
+ let worker = WorkerTask::new(worker_type, worker_id, auth_id, to_stdout)?;
let upid_str = worker.upid.to_string();
let f = f(worker.clone());
tokio::spawn(async move {
@@ -721,7 +721,7 @@ impl WorkerTask {
pub fn new_thread<F>(
worker_type: &str,
worker_id: Option<String>,
- userid: Userid,
+ auth_id: Authid,
to_stdout: bool,
f: F,
) -> Result<String, Error>
@@ -729,7 +729,7 @@ impl WorkerTask {
{
println!("register worker thread");
- let worker = WorkerTask::new(worker_type, worker_id, userid, to_stdout)?;
+ let worker = WorkerTask::new(worker_type, worker_id, auth_id, to_stdout)?;
let upid_str = worker.upid.to_string();
let _child = std::thread::Builder::new().name(upid_str.clone()).spawn(move || {
diff --git a/tests/worker-task-abort.rs b/tests/worker-task-abort.rs
index 360d17da..3cb41e32 100644
--- a/tests/worker-task-abort.rs
+++ b/tests/worker-task-abort.rs
@@ -57,7 +57,7 @@ fn worker_task_abort() -> Result<(), Error> {
let res = server::WorkerTask::new_thread(
"garbage_collection",
None,
- proxmox_backup::api2::types::Userid::root_userid().clone(),
+ proxmox_backup::api2::types::Authid::root_auth_id().clone(),
true,
move |worker| {
println!("WORKER {}", worker);
diff --git a/www/config/ACLView.js b/www/config/ACLView.js
index f02d8de5..d552b029 100644
--- a/www/config/ACLView.js
+++ b/www/config/ACLView.js
@@ -53,7 +53,7 @@ Ext.define('PBS.config.ACLView', {
'delete': 1,
path: rec.data.path,
role: rec.data.roleid,
- userid: rec.data.ugid,
+ auth_id: rec.data.ugid,
},
callback: function() {
me.reload();
diff --git a/www/window/ACLEdit.js b/www/window/ACLEdit.js
index e33f1f36..ffeb9e81 100644
--- a/www/window/ACLEdit.js
+++ b/www/window/ACLEdit.js
@@ -40,7 +40,7 @@ Ext.define('PBS.window.ACLEdit', {
{
xtype: 'pbsUserSelector',
fieldLabel: gettext('User'),
- name: 'userid',
+ name: 'auth_id',
allowBlank: false,
},
{
--
2.20.1
next prev parent reply other threads:[~2020-10-28 11:36 UTC|newest]
Thread overview: 25+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-10-28 11:36 [pbs-devel] [PATCH proxmox-backup 00/16] API tokens Fabian Grünbichler
2020-10-28 11:36 ` [pbs-devel] [PATCH proxmox-widget-toolkit] add PermissionView Fabian Grünbichler
2020-10-28 16:18 ` [pbs-devel] applied: " Thomas Lamprecht
2020-10-28 11:36 ` [pbs-devel] [PATCH proxmox-backup 01/16] api: add Authid as wrapper around Userid Fabian Grünbichler
2020-10-28 11:36 ` [pbs-devel] [PATCH proxmox] rpcenv: rename user to auth_id Fabian Grünbichler
2020-10-28 11:36 ` [pbs-devel] [PATCH proxmox-backup 02/16] config: add token.shadow file Fabian Grünbichler
2020-10-28 11:36 ` Fabian Grünbichler [this message]
2020-10-28 11:36 ` [pbs-devel] [PATCH proxmox-backup 04/16] REST: extract and handle API tokens Fabian Grünbichler
2020-10-28 11:36 ` [pbs-devel] [PATCH proxmox-backup 05/16] api: add API token endpoints Fabian Grünbichler
2020-10-28 11:36 ` [pbs-devel] [PATCH proxmox-backup 06/16] api: allow listing users + tokens Fabian Grünbichler
2020-10-28 11:36 ` [pbs-devel] [PATCH proxmox-backup 07/16] api: add permissions endpoint Fabian Grünbichler
2020-10-28 11:36 ` [pbs-devel] [PATCH proxmox-backup 08/16] client/remote: allow using ApiToken + secret Fabian Grünbichler
2020-10-28 11:36 ` [pbs-devel] [PATCH proxmox-backup 09/16] owner checks: handle backups owned by API tokens Fabian Grünbichler
2020-10-28 11:37 ` [pbs-devel] [PATCH proxmox-backup 10/16] tasks: allow unpriv users to read their tokens' tasks Fabian Grünbichler
2020-10-28 11:37 ` [pbs-devel] [PATCH proxmox-backup 11/16] manager: add token commands Fabian Grünbichler
2020-10-28 11:37 ` [pbs-devel] [PATCH proxmox-backup 12/16] manager: add user permissions command Fabian Grünbichler
2020-10-28 11:37 ` [pbs-devel] [PATCH proxmox-backup 13/16] gui: add permissions button to user view Fabian Grünbichler
2020-10-28 11:37 ` [pbs-devel] [PATCH proxmox-backup 14/16] gui: add API token UI Fabian Grünbichler
2020-10-28 11:37 ` [pbs-devel] [PATCH proxmox-backup 15/16] acls: allow viewing/editing user's token ACLs Fabian Grünbichler
2020-10-28 11:37 ` [pbs-devel] [PATCH proxmox-backup 16/16] gui: add API " Fabian Grünbichler
2020-10-29 14:23 ` [pbs-devel] applied: [PATCH proxmox-backup 00/16] API tokens Wolfgang Bumiller
2020-10-29 19:50 ` [pbs-devel] " Thomas Lamprecht
2020-10-30 8:03 ` Fabian Grünbichler
2020-10-30 8:48 ` Thomas Lamprecht
2020-10-30 9:55 ` Fabian Grünbichler
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20201028113632.814586-6-f.gruenbichler@proxmox.com \
--to=f.gruenbichler@proxmox.com \
--cc=pbs-devel@lists.proxmox.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.