From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [212.224.123.68]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits)) (No client certificate requested) by lists.proxmox.com (Postfix) with ESMTPS id 3969692B41 for ; Tue, 14 Feb 2023 15:34:03 +0100 (CET) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id 12F2B9969 for ; Tue, 14 Feb 2023 15:33:33 +0100 (CET) Received: from proxmox-new.maurer-it.com (proxmox-new.maurer-it.com [94.136.29.106]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits) server-digest SHA256) (No client certificate requested) by firstgate.proxmox.com (Proxmox) with ESMTPS for ; Tue, 14 Feb 2023 15:33:31 +0100 (CET) Received: from proxmox-new.maurer-it.com (localhost.localdomain [127.0.0.1]) by proxmox-new.maurer-it.com (Proxmox) with ESMTP id E7E1B46FE0 for ; Tue, 14 Feb 2023 15:33:30 +0100 (CET) Date: Tue, 14 Feb 2023 15:33:20 +0100 From: Fabian =?iso-8859-1?q?Gr=FCnbichler?= To: Proxmox Backup Server development discussion References: <20230213154555.49610-1-h.laimer@proxmox.com> <20230213154555.49610-2-h.laimer@proxmox.com> In-Reply-To: <20230213154555.49610-2-h.laimer@proxmox.com> MIME-Version: 1.0 User-Agent: astroid/0.16.0 (https://github.com/astroidmail/astroid) Message-Id: <1676378795.mvvh7qehki.astroid@yuna.none> Content-Type: text/plain; charset=utf-8 Content-Transfer-Encoding: quoted-printable X-SPAM-LEVEL: Spam detection results: 0 AWL 0.127 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record URIBL_BLOCKED 0.001 ADMINISTRATOR NOTICE: The query to URIBL was blocked. See http://wiki.apache.org/spamassassin/DnsBlocklists#dnsbl-block for more information. [jobs.rs, tasks.rs, job.store, proxmox.com, pull.rs, sync.rs, remote.rs] Subject: Re: [pbs-devel] [PATCH proxmox-backup 1/4] api2: make remote for sync-jobs optional X-BeenThere: pbs-devel@lists.proxmox.com X-Mailman-Version: 2.1.29 Precedence: list List-Id: Proxmox Backup Server development discussion List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Tue, 14 Feb 2023 14:34:03 -0000 On February 13, 2023 4:45 pm, Hannes Laimer wrote: > ... and update places where it is used. > A SyncJob not having a remote means it is pulling > from a local datastore. high level: I wonder whether we really need this for sync jobs, or whether = just having it for pull (or as a new API/CLI endpoint copy/move?) would be enoug= h as a start? is there a use case for scheduled local syncing? =20 > Signed-off-by: Hannes Laimer > --- > pbs-api-types/src/jobs.rs | 4 +- > src/api2/config/remote.rs | 2 +- > src/api2/config/sync.rs | 41 ++++++++++------ > src/api2/node/tasks.rs | 4 +- > src/api2/pull.rs | 78 +++++++++++++++++++++++-------- > src/server/email_notifications.rs | 16 +++---- > 6 files changed, 101 insertions(+), 44 deletions(-) >=20 > diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs > index cf7618c4..68db6cb8 100644 > --- a/pbs-api-types/src/jobs.rs > +++ b/pbs-api-types/src/jobs.rs > @@ -462,6 +462,7 @@ pub const GROUP_FILTER_LIST_SCHEMA: Schema =3D > }, > remote: { > schema: REMOTE_ID_SCHEMA, > + optional: true, > }, > "remote-store": { > schema: DATASTORE_SCHEMA, > @@ -506,7 +507,8 @@ pub struct SyncJobConfig { > pub ns: Option, > #[serde(skip_serializing_if =3D "Option::is_none")] > pub owner: Option, > - pub remote: String, > + #[serde(skip_serializing_if =3D "Option::is_none")] > + pub remote: Option, > pub remote_store: String, > #[serde(skip_serializing_if =3D "Option::is_none")] > pub remote_ns: Option, > diff --git a/src/api2/config/remote.rs b/src/api2/config/remote.rs > index 2f02d121..aa74bdc0 100644 > --- a/src/api2/config/remote.rs > +++ b/src/api2/config/remote.rs > @@ -268,7 +268,7 @@ pub fn delete_remote(name: String, digest: Option) -> Result<(), Error> > =20 > let job_list: Vec =3D sync_jobs.convert_to_typed_arra= y("sync")?; > for job in job_list { > - if job.remote =3D=3D name { > + if job.remote.map_or(false, |id| id =3D=3D name) { > param_bail!( > "name", > "remote '{}' is used by sync job '{}' (datastore '{}')", > diff --git a/src/api2/config/sync.rs b/src/api2/config/sync.rs > index bd7373df..4c5d06e2 100644 > --- a/src/api2/config/sync.rs > +++ b/src/api2/config/sync.rs > @@ -8,8 +8,8 @@ use proxmox_schema::{api, param_bail}; > =20 > use pbs_api_types::{ > Authid, SyncJobConfig, SyncJobConfigUpdater, JOB_ID_SCHEMA, PRIV_DAT= ASTORE_AUDIT, > - PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, = PRIV_REMOTE_AUDIT, > - PRIV_REMOTE_READ, PROXMOX_CONFIG_DIGEST_SCHEMA, > + PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, = PRIV_DATASTORE_READ, > + PRIV_REMOTE_AUDIT, PRIV_REMOTE_READ, PROXMOX_CONFIG_DIGEST_SCHEMA, > }; > use pbs_config::sync; > =20 > @@ -25,8 +25,13 @@ pub fn check_sync_job_read_access( > return false; > } > =20 > - let remote_privs =3D user_info.lookup_privs(auth_id, &["remote", &jo= b.remote]); > - remote_privs & PRIV_REMOTE_AUDIT !=3D 0 > + if let Some(remote) =3D &job.remote { > + let remote_privs =3D user_info.lookup_privs(auth_id, &["remote",= remote]); > + remote_privs & PRIV_REMOTE_AUDIT !=3D 0 > + } else { > + let source_ds_privs =3D user_info.lookup_privs(auth_id, &["datas= tore", &job.remote_store]); > + source_ds_privs & PRIV_DATASTORE_AUDIT !=3D 0 > + } > } > =20 > /// checks whether user can run the corresponding pull job > @@ -63,8 +68,13 @@ pub fn check_sync_job_modify_access( > return false; > } > =20 > - let remote_privs =3D user_info.lookup_privs(auth_id, &["remote", &jo= b.remote, &job.remote_store]); > - remote_privs & PRIV_REMOTE_READ !=3D 0 > + if let Some(remote) =3D &job.remote { > + let remote_privs =3D user_info.lookup_privs(auth_id, &["remote",= remote, &job.remote_store]); > + remote_privs & PRIV_REMOTE_READ !=3D 0 > + } else { > + let source_ds_privs =3D user_info.lookup_privs(auth_id, &["datas= tore", &job.remote_store]); > + source_ds_privs & PRIV_DATASTORE_READ !=3D 0 > + } > } > =20 > #[api( > @@ -191,6 +201,8 @@ pub fn read_sync_job(id: String, rpcenv: &mut dyn Rpc= Environment) -> Result #[serde(rename_all =3D "kebab-case")] > /// Deletable property name > pub enum DeletableProperty { > + /// Delete the remote property(-> meaning local). > + Remote, > /// Delete the owner property. > Owner, > /// Delete the comment property. > @@ -273,6 +285,9 @@ pub fn update_sync_job( > if let Some(delete) =3D delete { > for delete_prop in delete { > match delete_prop { > + DeletableProperty::Remote =3D> { > + data.remote =3D None; > + } > DeletableProperty::Owner =3D> { > data.owner =3D None; > } > @@ -329,7 +344,7 @@ pub fn update_sync_job( > data.ns =3D Some(ns); > } > if let Some(remote) =3D update.remote { > - data.remote =3D remote; > + data.remote =3D Some(remote); > } > if let Some(remote_store) =3D update.remote_store { > data.remote_store =3D remote_store; > @@ -495,7 +510,7 @@ acl:1:/remote/remote1/remotestore1:write@pbs:RemoteSy= ncOperator > =20 > let mut job =3D SyncJobConfig { > id: "regular".to_string(), > - remote: "remote0".to_string(), > + remote: Some("remote0".to_string()), > remote_store: "remotestore1".to_string(), > remote_ns: None, > store: "localstore0".to_string(), > @@ -529,11 +544,11 @@ acl:1:/remote/remote1/remotestore1:write@pbs:Remote= SyncOperator > assert!(!check_sync_job_read_access(&user_info, &read_auth_id, &job)= ); > =20 > // reading without proper read permissions on local end must fail > - job.remote =3D "remote1".to_string(); > + job.remote =3D Some("remote1".to_string()); > assert!(!check_sync_job_read_access(&user_info, &read_auth_id, &job)= ); > =20 > // reading without proper read permissions on remote end must fail > - job.remote =3D "remote0".to_string(); > + job.remote =3D Some("remote0".to_string()); > job.store =3D "localstore1".to_string(); > assert!(!check_sync_job_read_access(&user_info, &read_auth_id, &job)= ); > =20 > @@ -546,10 +561,10 @@ acl:1:/remote/remote1/remotestore1:write@pbs:Remote= SyncOperator > )); > =20 > // writing without proper write permissions on local end must fail > - job.remote =3D "remote1".to_string(); > + job.remote =3D Some("remote1".to_string()); > =20 > // writing without proper write permissions on remote end must fail > - job.remote =3D "remote0".to_string(); > + job.remote =3D Some("remote0".to_string()); > job.store =3D "localstore1".to_string(); > assert!(!check_sync_job_modify_access( > &user_info, > @@ -558,7 +573,7 @@ acl:1:/remote/remote1/remotestore1:write@pbs:RemoteSy= ncOperator > )); > =20 > // reset remote to one where users have access > - job.remote =3D "remote1".to_string(); > + job.remote =3D Some("remote1".to_string()); > =20 > // user with read permission can only read, but not modify/run > assert!(check_sync_job_read_access(&user_info, &read_auth_id, &job))= ; > diff --git a/src/api2/node/tasks.rs b/src/api2/node/tasks.rs > index d386f805..780cb6d1 100644 > --- a/src/api2/node/tasks.rs > +++ b/src/api2/node/tasks.rs > @@ -75,14 +75,14 @@ fn check_job_privs(auth_id: &Authid, user_info: &Cach= edUserInfo, upid: &UPID) -> > let local_store =3D captures.get(3); > let local_ns =3D captures.get(4).map(|m| m.as_str()); > =20 > - if let (Some(remote), Some(remote_store), Some(local_sto= re)) =3D > + if let (remote, Some(remote_store), Some(local_store)) = =3D > (remote, remote_store, local_store) > { > return check_pull_privs( > auth_id, > local_store.as_str(), > local_ns, > - remote.as_str(), > + remote.map(|remote| remote.as_str()), > remote_store.as_str(), > false, > ); > diff --git a/src/api2/pull.rs b/src/api2/pull.rs > index b2473ec8..c4255254 100644 > --- a/src/api2/pull.rs > +++ b/src/api2/pull.rs > @@ -9,7 +9,8 @@ use proxmox_sys::task_log; > use pbs_api_types::{ > Authid, BackupNamespace, GroupFilter, RateLimitConfig, SyncJobConfig= , DATASTORE_SCHEMA, > GROUP_FILTER_LIST_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, PRIV_DATASTOR= E_BACKUP, > - PRIV_DATASTORE_PRUNE, PRIV_REMOTE_READ, REMOTE_ID_SCHEMA, REMOVE_VAN= ISHED_BACKUPS_SCHEMA, > + PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_REMOTE_READ, REMOTE_= ID_SCHEMA, > + REMOVE_VANISHED_BACKUPS_SCHEMA, > }; > use pbs_config::CachedUserInfo; > use proxmox_rest_server::WorkerTask; > @@ -21,7 +22,7 @@ pub fn check_pull_privs( > auth_id: &Authid, > store: &str, > ns: Option<&str>, > - remote: &str, > + remote: Option<&str>, > remote_store: &str, > delete: bool, > ) -> Result<(), Error> { > @@ -38,12 +39,22 @@ pub fn check_pull_privs( > PRIV_DATASTORE_BACKUP, > false, > )?; > - user_info.check_privs( > - auth_id, > - &["remote", remote, remote_store], > - PRIV_REMOTE_READ, > - false, > - )?; > + > + if let Some(remote) =3D remote { > + user_info.check_privs( > + auth_id, > + &["remote", remote, remote_store], > + PRIV_REMOTE_READ, > + false, > + )?; > + } else { > + user_info.check_privs( > + auth_id, > + &["datastore", remote_store], > + PRIV_DATASTORE_READ, > + false, > + )?; > + } > =20 > if delete { > user_info.check_privs( > @@ -64,7 +75,7 @@ impl TryFrom<&SyncJobConfig> for PullParameters { > PullParameters::new( > &sync_job.store, > sync_job.ns.clone().unwrap_or_default(), > - &sync_job.remote, > + sync_job.remote.clone().as_deref(), > &sync_job.remote_store, > sync_job.remote_ns.clone().unwrap_or_default(), > sync_job > @@ -89,7 +100,7 @@ pub fn do_sync_job( > ) -> Result { > let job_id =3D format!( > "{}:{}:{}:{}:{}", > - sync_job.remote, > + sync_job.remote.clone().unwrap_or("localhost".to_string()), > sync_job.remote_store, > sync_job.store, > sync_job.ns.clone().unwrap_or_default(), > @@ -112,7 +123,6 @@ pub fn do_sync_job( > =20 > let worker_future =3D async move { > let pull_params =3D PullParameters::try_from(&sync_job)?= ; > - let client =3D pull_params.client().await?; > =20 > task_log!(worker, "Starting datastore sync job '{}'", jo= b_id); > if let Some(event_str) =3D schedule { > @@ -122,11 +132,35 @@ pub fn do_sync_job( > worker, > "sync datastore '{}' from '{}/{}'", > sync_job.store, > - sync_job.remote, > + sync_job.remote.clone().unwrap_or("local".to_string(= )), > sync_job.remote_store, > ); > =20 > - pull_store(&worker, &client, pull_params).await?; > + if sync_job.remote.is_some() { > + let client =3D pull_params.client().await?; > + pull_store(&worker, Some(&client), pull_params).awai= t?; > + } else { > + match (sync_job.ns, sync_job.remote_ns) { > + (Some(target_ns), Some(source_ns)) > + if target_ns.path().starts_with(source_ns.pa= th()) > + && sync_job.store =3D=3D sync_job.remote= _store =3D> > + { > + task_log!( > + worker, > + "Can't sync namespace into one of its su= b-namespaces, skipping" > + ); > + } > + (_, None) if sync_job.store =3D=3D sync_job.remo= te_store =3D> { > + task_log!( > + worker, > + "Can't sync root namespace into same dat= astore, skipping" > + ); > + } > + _ =3D> { > + pull_store(&worker, None, pull_params).await= ?; > + } > + } > + } > =20 > task_log!(worker, "sync job '{}' end", &job_id); > =20 > @@ -178,6 +212,7 @@ pub fn do_sync_job( > }, > remote: { > schema: REMOTE_ID_SCHEMA, > + optional: true, > }, > "remote-store": { > schema: DATASTORE_SCHEMA, > @@ -218,7 +253,7 @@ The delete flag additionally requires the Datastore.P= rune privilege on '/datasto > async fn pull( > store: String, > ns: Option, > - remote: String, > + remote: Option, > remote_store: String, > remote_ns: Option, > remove_vanished: Option, > @@ -241,7 +276,7 @@ async fn pull( > &auth_id, > &store, > ns_str.as_deref(), > - &remote, > + remote.as_deref(), > &remote_store, > delete, > )?; > @@ -249,7 +284,7 @@ async fn pull( > let pull_params =3D PullParameters::new( > &store, > ns, > - &remote, > + remote.as_deref(), > &remote_store, > remote_ns.unwrap_or_default(), > auth_id.clone(), > @@ -258,7 +293,12 @@ async fn pull( > group_filter, > limit, > )?; > - let client =3D pull_params.client().await?; > + > + let client =3D if remote.is_some() { > + Some(pull_params.client().await?) > + } else { > + None > + }; > =20 > // fixme: set to_stdout to false? > // FIXME: add namespace to worker id? > @@ -272,11 +312,11 @@ async fn pull( > worker, > "pull datastore '{}' from '{}/{}'", > store, > - remote, > + remote.as_deref().unwrap_or("localhost"), > remote_store, > ); > =20 > - let pull_future =3D pull_store(&worker, &client, pull_params= ); > + let pull_future =3D pull_store(&worker, client.as_ref(), pul= l_params); > (select! { > success =3D pull_future.fuse() =3D> success, > abort =3D worker.abort_future().map(|_| Err(format_err!(= "pull aborted"))) =3D> abort, > diff --git a/src/server/email_notifications.rs b/src/server/email_notific= ations.rs > index b3298cf9..31a46b0f 100644 > --- a/src/server/email_notifications.rs > +++ b/src/server/email_notifications.rs > @@ -486,15 +486,15 @@ pub fn send_sync_status( > } > }; > =20 > + let source_str =3D if let Some(remote) =3D job.remote.clone() { > + format!("Sync remote '{}'", remote) > + } else { > + format!("Sync local") > + }; > + > let subject =3D match result { > - Ok(()) =3D> format!( > - "Sync remote '{}' datastore '{}' successful", > - job.remote, job.remote_store, > - ), > - Err(_) =3D> format!( > - "Sync remote '{}' datastore '{}' failed", > - job.remote, job.remote_store, > - ), > + Ok(()) =3D> format!("{} datastore '{}' successful", source_str, = job.remote_store,), > + Err(_) =3D> format!("{} datastore '{}' failed", source_str, job.= remote_store,), > }; > =20 > send_job_status_mail(email, &subject, &text)?; > --=20 > 2.30.2 >=20 >=20 >=20 > _______________________________________________ > pbs-devel mailing list > pbs-devel@lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel >=20 >=20 >=20