From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <l.wagner@proxmox.com>
Received: from firstgate.proxmox.com (firstgate.proxmox.com [212.224.123.68])
 (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)
 key-exchange X25519 server-signature RSA-PSS (2048 bits))
 (No client certificate requested)
 by lists.proxmox.com (Postfix) with ESMTPS id 6AB7DD57C
 for <pbs-devel@lists.proxmox.com>; Thu, 21 Sep 2023 13:07:03 +0200 (CEST)
Received: from firstgate.proxmox.com (localhost [127.0.0.1])
 by firstgate.proxmox.com (Proxmox) with ESMTP id 3BC741EA6E
 for <pbs-devel@lists.proxmox.com>; Thu, 21 Sep 2023 13:06:33 +0200 (CEST)
Received: from proxmox-new.maurer-it.com (proxmox-new.maurer-it.com
 [94.136.29.106])
 (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)
 key-exchange X25519 server-signature RSA-PSS (2048 bits) server-digest SHA256)
 (No client certificate requested)
 by firstgate.proxmox.com (Proxmox) with ESMTPS
 for <pbs-devel@lists.proxmox.com>; Thu, 21 Sep 2023 13:06:31 +0200 (CEST)
Received: from proxmox-new.maurer-it.com (localhost.localdomain [127.0.0.1])
 by proxmox-new.maurer-it.com (Proxmox) with ESMTP id 2AA924863F
 for <pbs-devel@lists.proxmox.com>; Thu, 21 Sep 2023 13:06:31 +0200 (CEST)
Message-ID: <e9d57be2-f7b1-431c-8f95-8ace0e01b1d4@proxmox.com>
Date: Thu, 21 Sep 2023 13:06:30 +0200
MIME-Version: 1.0
User-Agent: Mozilla Thunderbird
From: Lukas Wagner <l.wagner@proxmox.com>
To: Proxmox Backup Server development discussion
 <pbs-devel@lists.proxmox.com>, Hannes Laimer <h.laimer@proxmox.com>
References: <20230808121344.199500-1-h.laimer@proxmox.com>
 <20230808121344.199500-2-h.laimer@proxmox.com>
Content-Language: de-AT, en-US
In-Reply-To: <20230808121344.199500-2-h.laimer@proxmox.com>
Content-Type: text/plain; charset=UTF-8; format=flowed
Content-Transfer-Encoding: 8bit
X-SPAM-LEVEL: Spam detection results:  0
 AWL -0.031 Adjusted score from AWL reputation of From: address
 BAYES_00                 -1.9 Bayes spam probability is 0 to 1%
 DMARC_MISSING             0.1 Missing DMARC policy
 KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment
 SPF_HELO_NONE           0.001 SPF: HELO does not publish an SPF Record
 SPF_PASS               -0.001 SPF: sender matches SPF record
 URIBL_BLOCKED 0.001 ADMINISTRATOR NOTICE: The query to URIBL was blocked. See
 http://wiki.apache.org/spamassassin/DnsBlocklists#dnsbl-block for more
 information. [pull.rs]
Subject: Re: [pbs-devel] [PATCH proxmox-backup v3 1/6] api2: make Remote for
 SyncJob optional
X-BeenThere: pbs-devel@lists.proxmox.com
X-Mailman-Version: 2.1.29
Precedence: list
List-Id: Proxmox Backup Server development discussion
 <pbs-devel.lists.proxmox.com>
List-Unsubscribe: <https://lists.proxmox.com/cgi-bin/mailman/options/pbs-devel>, 
 <mailto:pbs-devel-request@lists.proxmox.com?subject=unsubscribe>
List-Archive: <http://lists.proxmox.com/pipermail/pbs-devel/>
List-Post: <mailto:pbs-devel@lists.proxmox.com>
List-Help: <mailto:pbs-devel-request@lists.proxmox.com?subject=help>
List-Subscribe: <https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel>, 
 <mailto:pbs-devel-request@lists.proxmox.com?subject=subscribe>
X-List-Received-Date: Thu, 21 Sep 2023 11:07:03 -0000

Comments inline:

On 8/8/23 14:13, Hannes Laimer wrote:
> diff --git a/src/api2/pull.rs b/src/api2/pull.rs
> index daeba7cf..664ecce5 100644
> --- a/src/api2/pull.rs
> +++ b/src/api2/pull.rs
> @@ -8,7 +8,7 @@ use proxmox_sys::task_log;
>   
>   use pbs_api_types::{
>       Authid, BackupNamespace, GroupFilter, RateLimitConfig, SyncJobConfig, DATASTORE_SCHEMA,
> -    GROUP_FILTER_LIST_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, PRIV_DATASTORE_BACKUP,
> +    GROUP_FILTER_LIST_SCHEMA, MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_REDUCED_SCHEMA, PRIV_DATASTORE_BACKUP,
>       PRIV_DATASTORE_PRUNE, PRIV_REMOTE_READ, REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA,
>       TRANSFER_LAST_SCHEMA,
>   };
> @@ -22,7 +22,7 @@ pub fn check_pull_privs(
>       auth_id: &Authid,
>       store: &str,
>       ns: Option<&str>,
> -    remote: &str,
> +    remote: Option<&str>,
>       remote_store: &str,
>       delete: bool,
>   ) -> Result<(), Error> {
> @@ -39,12 +39,22 @@ pub fn check_pull_privs(
>           PRIV_DATASTORE_BACKUP,
>           false,
>       )?;
> -    user_info.check_privs(
> -        auth_id,
> -        &["remote", remote, remote_store],
> -        PRIV_REMOTE_READ,
> -        false,
> -    )?;
> +
> +    if let Some(remote) = remote {
> +        user_info.check_privs(
> +            auth_id,
> +            &["remote", remote, remote_store],
> +            PRIV_REMOTE_READ,
> +            false,
> +        )?;
> +    } else {
> +        user_info.check_privs(
> +            auth_id,
> +            &["datastore", remote_store],
> +            PRIV_DATASTORE_BACKUP,
> +            false,
> +        )?;
> +    }
>   
>       if delete {
>           user_info.check_privs(
> @@ -65,7 +75,7 @@ impl TryFrom<&SyncJobConfig> for PullParameters {
>           PullParameters::new(
>               &sync_job.store,
>               sync_job.ns.clone().unwrap_or_default(),
> -            &sync_job.remote,
> +            sync_job.remote.as_deref().unwrap_or("local"),
>               &sync_job.remote_store,
>               sync_job.remote_ns.clone().unwrap_or_default(),
>               sync_job
> @@ -91,7 +101,7 @@ pub fn do_sync_job(
>   ) -> Result<String, Error> {
>       let job_id = format!(
>           "{}:{}:{}:{}:{}",
> -        sync_job.remote,
> +        sync_job.remote.clone().unwrap_or("localhost".to_string()),
>           sync_job.remote_store,
>           sync_job.store,
>           sync_job.ns.clone().unwrap_or_default(),
> @@ -124,11 +134,28 @@ pub fn do_sync_job(
>                       worker,
>                       "sync datastore '{}' from '{}/{}'",
>                       sync_job.store,
> -                    sync_job.remote,
> +                    sync_job.remote.clone().unwrap_or("local".to_string()),
>                       sync_job.remote_store,
>                   );
>   
> -                pull_store(&worker, &client, pull_params).await?;
> +                if sync_job.remote.is_some() {
> +                    pull_store(&worker, &client, pull_params).await?;
> +                } else {
> +                    if let (Some(target_ns), Some(source_ns)) = (sync_job.ns, sync_job.remote_ns) {
> +                        if target_ns.path().starts_with(source_ns.path())
> +                            && sync_job.store == sync_job.remote_store
> +                            && sync_job.max_depth.map_or(true, |sync_depth| {
> +                            target_ns.depth() + sync_depth > MAX_NAMESPACE_DEPTH
> +                        }) {
> +                            task_log!(
> +                                worker,
> +                                "Can't sync namespace into one of its sub-namespaces, would exceed maximum namespace depth, skipping"
> +                            );
> +                        }
> +                    } else {
> +                        pull_store(&worker, &client, pull_params).await?;
> +                    }
> +                }
>   
>                   task_log!(worker, "sync job '{}' end", &job_id);
>   
> @@ -180,6 +207,7 @@ pub fn do_sync_job(
>               },
>               remote: {
>                   schema: REMOTE_ID_SCHEMA,
> +                optional: true,
>               },
>               "remote-store": {
>                   schema: DATASTORE_SCHEMA,
> @@ -224,7 +252,7 @@ The delete flag additionally requires the Datastore.Prune privilege on '/datasto
>   async fn pull(
>       store: String,
>       ns: Option<BackupNamespace>,
> -    remote: String,
> +    remote: Option<String>,
>       remote_store: String,
>       remote_ns: Option<BackupNamespace>,
>       remove_vanished: Option<bool>,
> @@ -248,7 +276,7 @@ async fn pull(
>           &auth_id,
>           &store,
>           ns_str.as_deref(),
> -        &remote,
> +        remote.as_deref(),
>           &remote_store,
>           delete,
>       )?;
> @@ -256,7 +284,7 @@ async fn pull(
>       let pull_params = PullParameters::new(
>           &store,
>           ns,
> -        &remote,
> +        remote.as_deref().unwrap_or("local"),
>           &remote_store,
>           remote_ns.unwrap_or_default(),
>           auth_id.clone(),
> @@ -280,7 +308,7 @@ async fn pull(
>                   worker,
>                   "pull datastore '{}' from '{}/{}'",
>                   store,
> -                remote,
> +                remote.as_deref().unwrap_or("localhost"),
>                   remote_store,
>               );
>   
> @@ -299,4 +327,4 @@ async fn pull(
>       Ok(upid_str)
>   }
>   
> -pub const ROUTER: Router = Router::new().post(&API_METHOD_PULL);
> +pub const ROUTER: Router = Router::new().post(&API_METHOD_PULL);
> \ No newline at end of file

You stripped the newline at the end of file (and added it back a few 
patches later)

> diff --git a/src/server/email_notifications.rs b/src/server/email_notifications.rs
> index ea1476d7..18881782 100644
> --- a/src/server/email_notifications.rs
> +++ b/src/server/email_notifications.rs
> @@ -484,15 +484,17 @@ pub fn send_sync_status(
>           }
>       };
>   
> +    let tmp_src_string;
> +    let source_str = if let Some(remote) = &job.remote {
> +        tmp_src_string = format!("Sync remote '{}'", remote);
> +        &tmp_src_string
> +    } else {
> +        "Sync local"
> +    };
> +

nit: considering that this is not in a hot loop or anything, I'd
just do a

let source_str = if let Some(...) {
   format!(...)
} else {
   "Sync Local".into()
}

instead of using the temporary variable. Looks far nicer and I guess we 
can spare a couple extra µs when sending the notification email. ;)
No hard feelings tho.

-- 
- Lukas