From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [212.224.123.68]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits) server-digest SHA256) (No client certificate requested) by lists.proxmox.com (Postfix) with ESMTPS id 2FE6494544 for ; Wed, 11 Jan 2023 15:52:13 +0100 (CET) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id 114DE1FF7F for ; Wed, 11 Jan 2023 15:52:13 +0100 (CET) Received: from lana.proxmox.com (unknown [94.136.29.99]) by firstgate.proxmox.com (Proxmox) with ESMTP for ; Wed, 11 Jan 2023 15:52:11 +0100 (CET) Received: by lana.proxmox.com (Postfix, from userid 10043) id AC8522C28D7; Wed, 11 Jan 2023 15:52:11 +0100 (CET) From: Stefan Hanreich To: pbs-devel@lists.proxmox.com Date: Wed, 11 Jan 2023 15:52:09 +0100 Message-Id: <20230111145210.516392-2-s.hanreich@proxmox.com> X-Mailer: git-send-email 2.30.2 In-Reply-To: <20230111145210.516392-1-s.hanreich@proxmox.com> References: <20230111145210.516392-1-s.hanreich@proxmox.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-SPAM-LEVEL: Spam detection results: 0 AWL -0.374 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment KAM_LAZY_DOMAIN_SECURITY 1 Sending domain does not have any anti-forgery methods NO_DNS_FOR_FROM 0.001 Envelope sender has no MX or A DNS records RDNS_NONE 0.793 Delivered to internal network by a host with no rDNS SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_NONE 0.001 SPF: sender does not publish an SPF Record URIBL_BLOCKED 0.001 ADMINISTRATOR NOTICE: The query to URIBL was blocked. See http://wiki.apache.org/spamassassin/DnsBlocklists#dnsbl-block for more information. [pull.rs, jobs.rs, sync.rs, proxmox-backup-manager.rs] Subject: [pbs-devel] [PATCH proxmox-backup v2 1/2] partial fix #3701: sync/pull: add transfer-last parameter X-BeenThere: pbs-devel@lists.proxmox.com X-Mailman-Version: 2.1.29 Precedence: list List-Id: Proxmox Backup Server development discussion List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 11 Jan 2023 14:52:13 -0000 Specifying the transfer-last parameter limits the amount of backups that get synced via the pull command/sync job. The parameter specifies how many of the N latest backups should get pulled/synced. All other backups will get skipped. This is particularly useful in situations where the sync target has less disk space than the source. Syncing all backups from the source is not possible if there is not enough disk space on the target. Additionally this can be used for limiting the amount of data transferred, reducing load on the network. Signed-off-by: Stefan Hanreich --- I had to make slight adjustments to Wolfgang's proposed condition because it wouldn't work in cases where transfer-last was greater than the total amount of backups available. Nevertheless, the condition should now be a lot less obtuse and easier to read. pbs-api-types/src/jobs.rs | 11 +++++++++++ src/api2/config/sync.rs | 9 +++++++++ src/api2/pull.rs | 10 +++++++++- src/bin/proxmox-backup-manager.rs | 11 ++++++++++- src/server/pull.rs | 17 ++++++++++++++++- 5 files changed, 55 insertions(+), 3 deletions(-) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index cf7618c4..b9f57719 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -444,6 +444,11 @@ pub const GROUP_FILTER_SCHEMA: Schema = StringSchema::new( pub const GROUP_FILTER_LIST_SCHEMA: Schema = ArraySchema::new("List of group filters.", &GROUP_FILTER_SCHEMA).schema(); +pub const TRANSFER_LAST_SCHEMA: Schema = + IntegerSchema::new("The maximum amount of snapshots to be transferred (per group).") + .minimum(1) + .schema(); + #[api( properties: { id: { @@ -493,6 +498,10 @@ pub const GROUP_FILTER_LIST_SCHEMA: Schema = schema: GROUP_FILTER_LIST_SCHEMA, optional: true, }, + "transfer-last": { + schema: TRANSFER_LAST_SCHEMA, + optional: true, + }, } )] #[derive(Serialize, Deserialize, Clone, Updater, PartialEq)] @@ -522,6 +531,8 @@ pub struct SyncJobConfig { pub group_filter: Option>, #[serde(flatten)] pub limit: RateLimitConfig, + #[serde(skip_serializing_if = "Option::is_none")] + pub transfer_last: Option, } impl SyncJobConfig { diff --git a/src/api2/config/sync.rs b/src/api2/config/sync.rs index bd7373df..01e5f2ce 100644 --- a/src/api2/config/sync.rs +++ b/src/api2/config/sync.rs @@ -215,6 +215,8 @@ pub enum DeletableProperty { RemoteNs, /// Delete the max_depth property, MaxDepth, + /// Delete the transfer_last property, + TransferLast, } #[api( @@ -309,6 +311,9 @@ pub fn update_sync_job( DeletableProperty::MaxDepth => { data.max_depth = None; } + DeletableProperty::TransferLast => { + data.transfer_last = None; + } } } } @@ -343,6 +348,9 @@ pub fn update_sync_job( if let Some(group_filter) = update.group_filter { data.group_filter = Some(group_filter); } + if let Some(transfer_last) = update.transfer_last { + data.transfer_last = Some(transfer_last); + } if update.limit.rate_in.is_some() { data.limit.rate_in = update.limit.rate_in; @@ -507,6 +515,7 @@ acl:1:/remote/remote1/remotestore1:write@pbs:RemoteSyncOperator group_filter: None, schedule: None, limit: pbs_api_types::RateLimitConfig::default(), // no limit + transfer_last: None, }; // should work without ACLs diff --git a/src/api2/pull.rs b/src/api2/pull.rs index b2473ec8..daeba7cf 100644 --- a/src/api2/pull.rs +++ b/src/api2/pull.rs @@ -10,6 +10,7 @@ use pbs_api_types::{ Authid, BackupNamespace, GroupFilter, RateLimitConfig, SyncJobConfig, DATASTORE_SCHEMA, GROUP_FILTER_LIST_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_PRUNE, PRIV_REMOTE_READ, REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA, + TRANSFER_LAST_SCHEMA, }; use pbs_config::CachedUserInfo; use proxmox_rest_server::WorkerTask; @@ -76,6 +77,7 @@ impl TryFrom<&SyncJobConfig> for PullParameters { sync_job.max_depth, sync_job.group_filter.clone(), sync_job.limit.clone(), + sync_job.transfer_last, ) } } @@ -201,7 +203,11 @@ pub fn do_sync_job( limit: { type: RateLimitConfig, flatten: true, - } + }, + "transfer-last": { + schema: TRANSFER_LAST_SCHEMA, + optional: true, + }, }, }, access: { @@ -225,6 +231,7 @@ async fn pull( max_depth: Option, group_filter: Option>, limit: RateLimitConfig, + transfer_last: Option, rpcenv: &mut dyn RpcEnvironment, ) -> Result { let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; @@ -257,6 +264,7 @@ async fn pull( max_depth, group_filter, limit, + transfer_last, )?; let client = pull_params.client().await?; diff --git a/src/bin/proxmox-backup-manager.rs b/src/bin/proxmox-backup-manager.rs index 06330c78..9ea5830c 100644 --- a/src/bin/proxmox-backup-manager.rs +++ b/src/bin/proxmox-backup-manager.rs @@ -13,7 +13,7 @@ use pbs_api_types::percent_encoding::percent_encode_component; use pbs_api_types::{ BackupNamespace, GroupFilter, RateLimitConfig, SyncJobConfig, DATASTORE_SCHEMA, GROUP_FILTER_LIST_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, NS_MAX_DEPTH_SCHEMA, - REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA, UPID_SCHEMA, + REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA, TRANSFER_LAST_SCHEMA, UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA, }; use pbs_client::{display_task_log, view_task_result}; @@ -272,6 +272,10 @@ fn task_mgmt_cli() -> CommandLineInterface { schema: OUTPUT_FORMAT, optional: true, }, + "transfer-last": { + schema: TRANSFER_LAST_SCHEMA, + optional: true, + }, } } )] @@ -287,6 +291,7 @@ async fn pull_datastore( max_depth: Option, group_filter: Option>, limit: RateLimitConfig, + transfer_last: Option, param: Value, ) -> Result { let output_format = get_output_format(¶m); @@ -319,6 +324,10 @@ async fn pull_datastore( args["remove-vanished"] = Value::from(remove_vanished); } + if transfer_last.is_some() { + args["transfer-last"] = json!(transfer_last) + } + let mut limit_json = json!(limit); let limit_map = limit_json .as_object_mut() diff --git a/src/server/pull.rs b/src/server/pull.rs index 65eedf2c..81f4faf3 100644 --- a/src/server/pull.rs +++ b/src/server/pull.rs @@ -1,5 +1,6 @@ //! Sync datastore from remote server +use std::cmp::min; use std::collections::{HashMap, HashSet}; use std::io::{Seek, SeekFrom}; use std::sync::atomic::{AtomicUsize, Ordering}; @@ -59,6 +60,8 @@ pub(crate) struct PullParameters { group_filter: Option>, /// Rate limits for all transfers from `remote` limit: RateLimitConfig, + /// How many snapshots should be transferred at most (taking the newest N snapshots) + transfer_last: Option, } impl PullParameters { @@ -78,6 +81,7 @@ impl PullParameters { max_depth: Option, group_filter: Option>, limit: RateLimitConfig, + transfer_last: Option, ) -> Result { let store = DataStore::lookup_datastore(store, Some(Operation::Write))?; @@ -109,6 +113,7 @@ impl PullParameters { max_depth, group_filter, limit, + transfer_last, }) } @@ -573,7 +578,7 @@ impl std::fmt::Display for SkipInfo { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, - "skipped: {} snapshot(s) ({}) older than the newest local snapshot", + "skipped: {} snapshot(s) ({}) - older than the newest local snapshot or excluded by transfer-last", self.count, self.affected().map_err(|_| std::fmt::Error)? ) @@ -646,6 +651,11 @@ async fn pull_group( count: 0, }; + let total_amount = list.len(); + + let mut transfer_amount = params.transfer_last.unwrap_or(total_amount); + transfer_amount = min(transfer_amount, total_amount); + for (pos, item) in list.into_iter().enumerate() { let snapshot = item.backup; @@ -668,6 +678,11 @@ async fn pull_group( } } + if pos < (total_amount - transfer_amount) { + skip_info.update(snapshot.time); + continue; + } + // get updated auth_info (new tickets) let auth_info = client.login().await?; -- 2.30.2