* [pbs-devel] [PATCH v4 proxmox-backup 1/3] partial fix #3701: sync job: pull: add transfer-last parameter
2023-04-18 14:59 [pbs-devel] [PATCH v4 proxmox-backup 0/3] add transfer-last parameter to pull/sync job Stefan Hanreich
@ 2023-04-18 14:59 ` Stefan Hanreich
2023-04-18 14:59 ` [pbs-devel] [PATCH v4 proxmox-backup 2/3] sync job: pull: improve log output Stefan Hanreich
` (2 subsequent siblings)
3 siblings, 0 replies; 5+ messages in thread
From: Stefan Hanreich @ 2023-04-18 14:59 UTC (permalink / raw)
To: pbs-devel
Specifying the transfer-last parameter limits the amount of backups
that get synced via the pull command/sync job. The parameter specifies
how many of the N latest backups should get pulled/synced. All other
backups will get skipped.
This is particularly useful in situations where the sync target has
less disk space than the source. Syncing all backups from the source
is not possible if there is not enough disk space on the target.
Additionally this can be used for limiting the amount of data
transferred, reducing load on the network.
The newest backup will always get re-synced, regardless of the setting
of the transfer-last parameter.
Signed-off-by: Stefan Hanreich <s.hanreich@proxmox.com>
---
pbs-api-types/src/jobs.rs | 11 +++++++++++
src/api2/config/sync.rs | 9 +++++++++
src/api2/pull.rs | 10 +++++++++-
src/bin/proxmox-backup-manager.rs | 11 ++++++++++-
src/server/pull.rs | 24 +++++++++++++++++++-----
5 files changed, 58 insertions(+), 7 deletions(-)
diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs
index cf7618c4..23e19b7b 100644
--- a/pbs-api-types/src/jobs.rs
+++ b/pbs-api-types/src/jobs.rs
@@ -444,6 +444,11 @@ pub const GROUP_FILTER_SCHEMA: Schema = StringSchema::new(
pub const GROUP_FILTER_LIST_SCHEMA: Schema =
ArraySchema::new("List of group filters.", &GROUP_FILTER_SCHEMA).schema();
+pub const TRANSFER_LAST_SCHEMA: Schema =
+ IntegerSchema::new("Limit transfer to last N snapshots (per group), skipping others")
+ .minimum(1)
+ .schema();
+
#[api(
properties: {
id: {
@@ -493,6 +498,10 @@ pub const GROUP_FILTER_LIST_SCHEMA: Schema =
schema: GROUP_FILTER_LIST_SCHEMA,
optional: true,
},
+ "transfer-last": {
+ schema: TRANSFER_LAST_SCHEMA,
+ optional: true,
+ },
}
)]
#[derive(Serialize, Deserialize, Clone, Updater, PartialEq)]
@@ -522,6 +531,8 @@ pub struct SyncJobConfig {
pub group_filter: Option<Vec<GroupFilter>>,
#[serde(flatten)]
pub limit: RateLimitConfig,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub transfer_last: Option<usize>,
}
impl SyncJobConfig {
diff --git a/src/api2/config/sync.rs b/src/api2/config/sync.rs
index bd7373df..01e5f2ce 100644
--- a/src/api2/config/sync.rs
+++ b/src/api2/config/sync.rs
@@ -215,6 +215,8 @@ pub enum DeletableProperty {
RemoteNs,
/// Delete the max_depth property,
MaxDepth,
+ /// Delete the transfer_last property,
+ TransferLast,
}
#[api(
@@ -309,6 +311,9 @@ pub fn update_sync_job(
DeletableProperty::MaxDepth => {
data.max_depth = None;
}
+ DeletableProperty::TransferLast => {
+ data.transfer_last = None;
+ }
}
}
}
@@ -343,6 +348,9 @@ pub fn update_sync_job(
if let Some(group_filter) = update.group_filter {
data.group_filter = Some(group_filter);
}
+ if let Some(transfer_last) = update.transfer_last {
+ data.transfer_last = Some(transfer_last);
+ }
if update.limit.rate_in.is_some() {
data.limit.rate_in = update.limit.rate_in;
@@ -507,6 +515,7 @@ acl:1:/remote/remote1/remotestore1:write@pbs:RemoteSyncOperator
group_filter: None,
schedule: None,
limit: pbs_api_types::RateLimitConfig::default(), // no limit
+ transfer_last: None,
};
// should work without ACLs
diff --git a/src/api2/pull.rs b/src/api2/pull.rs
index b2473ec8..daeba7cf 100644
--- a/src/api2/pull.rs
+++ b/src/api2/pull.rs
@@ -10,6 +10,7 @@ use pbs_api_types::{
Authid, BackupNamespace, GroupFilter, RateLimitConfig, SyncJobConfig, DATASTORE_SCHEMA,
GROUP_FILTER_LIST_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, PRIV_DATASTORE_BACKUP,
PRIV_DATASTORE_PRUNE, PRIV_REMOTE_READ, REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA,
+ TRANSFER_LAST_SCHEMA,
};
use pbs_config::CachedUserInfo;
use proxmox_rest_server::WorkerTask;
@@ -76,6 +77,7 @@ impl TryFrom<&SyncJobConfig> for PullParameters {
sync_job.max_depth,
sync_job.group_filter.clone(),
sync_job.limit.clone(),
+ sync_job.transfer_last,
)
}
}
@@ -201,7 +203,11 @@ pub fn do_sync_job(
limit: {
type: RateLimitConfig,
flatten: true,
- }
+ },
+ "transfer-last": {
+ schema: TRANSFER_LAST_SCHEMA,
+ optional: true,
+ },
},
},
access: {
@@ -225,6 +231,7 @@ async fn pull(
max_depth: Option<usize>,
group_filter: Option<Vec<GroupFilter>>,
limit: RateLimitConfig,
+ transfer_last: Option<usize>,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
@@ -257,6 +264,7 @@ async fn pull(
max_depth,
group_filter,
limit,
+ transfer_last,
)?;
let client = pull_params.client().await?;
diff --git a/src/bin/proxmox-backup-manager.rs b/src/bin/proxmox-backup-manager.rs
index 740fdc49..b4cb6cb3 100644
--- a/src/bin/proxmox-backup-manager.rs
+++ b/src/bin/proxmox-backup-manager.rs
@@ -13,7 +13,7 @@ use pbs_api_types::percent_encoding::percent_encode_component;
use pbs_api_types::{
BackupNamespace, GroupFilter, RateLimitConfig, SyncJobConfig, DATASTORE_SCHEMA,
GROUP_FILTER_LIST_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, NS_MAX_DEPTH_SCHEMA,
- REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA, UPID_SCHEMA,
+ REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA, TRANSFER_LAST_SCHEMA, UPID_SCHEMA,
VERIFICATION_OUTDATED_AFTER_SCHEMA,
};
use pbs_client::{display_task_log, view_task_result};
@@ -272,6 +272,10 @@ fn task_mgmt_cli() -> CommandLineInterface {
schema: OUTPUT_FORMAT,
optional: true,
},
+ "transfer-last": {
+ schema: TRANSFER_LAST_SCHEMA,
+ optional: true,
+ },
}
}
)]
@@ -287,6 +291,7 @@ async fn pull_datastore(
max_depth: Option<usize>,
group_filter: Option<Vec<GroupFilter>>,
limit: RateLimitConfig,
+ transfer_last: Option<usize>,
param: Value,
) -> Result<Value, Error> {
let output_format = get_output_format(¶m);
@@ -319,6 +324,10 @@ async fn pull_datastore(
args["remove-vanished"] = Value::from(remove_vanished);
}
+ if transfer_last.is_some() {
+ args["transfer-last"] = json!(transfer_last)
+ }
+
let mut limit_json = json!(limit);
let limit_map = limit_json
.as_object_mut()
diff --git a/src/server/pull.rs b/src/server/pull.rs
index 65eedf2c..0219d47e 100644
--- a/src/server/pull.rs
+++ b/src/server/pull.rs
@@ -59,6 +59,8 @@ pub(crate) struct PullParameters {
group_filter: Option<Vec<GroupFilter>>,
/// Rate limits for all transfers from `remote`
limit: RateLimitConfig,
+ /// How many snapshots should be transferred at most (taking the newest N snapshots)
+ transfer_last: Option<usize>,
}
impl PullParameters {
@@ -78,6 +80,7 @@ impl PullParameters {
max_depth: Option<usize>,
group_filter: Option<Vec<GroupFilter>>,
limit: RateLimitConfig,
+ transfer_last: Option<usize>,
) -> Result<Self, Error> {
let store = DataStore::lookup_datastore(store, Some(Operation::Write))?;
@@ -109,6 +112,7 @@ impl PullParameters {
max_depth,
group_filter,
limit,
+ transfer_last,
})
}
@@ -632,6 +636,7 @@ async fn pull_group(
let fingerprint = client.fingerprint();
let last_sync = params.store.last_successful_backup(&target_ns, group)?;
+ let last_sync_time = last_sync.unwrap_or(i64::MIN);
let mut remote_snapshots = std::collections::HashSet::new();
@@ -646,6 +651,13 @@ async fn pull_group(
count: 0,
};
+ let total_amount = list.len();
+
+ let cutoff = params
+ .transfer_last
+ .map(|count| total_amount.saturating_sub(count))
+ .unwrap_or_default();
+
for (pos, item) in list.into_iter().enumerate() {
let snapshot = item.backup;
@@ -661,11 +673,13 @@ async fn pull_group(
remote_snapshots.insert(snapshot.time);
- if let Some(last_sync_time) = last_sync {
- if last_sync_time > snapshot.time {
- skip_info.update(snapshot.time);
- continue;
- }
+ if last_sync_time > snapshot.time {
+ skip_info.update(snapshot.time);
+ continue;
+ }
+
+ if pos < cutoff && last_sync_time != snapshot.time {
+ continue;
}
// get updated auth_info (new tickets)
--
2.30.2
^ permalink raw reply [flat|nested] 5+ messages in thread
* [pbs-devel] [PATCH v4 proxmox-backup 2/3] sync job: pull: improve log output
2023-04-18 14:59 [pbs-devel] [PATCH v4 proxmox-backup 0/3] add transfer-last parameter to pull/sync job Stefan Hanreich
2023-04-18 14:59 ` [pbs-devel] [PATCH v4 proxmox-backup 1/3] partial fix #3701: sync job: pull: add transfer-last parameter Stefan Hanreich
@ 2023-04-18 14:59 ` Stefan Hanreich
2023-04-18 14:59 ` [pbs-devel] [PATCH v4 proxmox-backup 3/3] ui: sync job: add transfer-last parameter Stefan Hanreich
2023-04-25 8:06 ` [pbs-devel] applied-series: [PATCH v4 proxmox-backup 0/3] add transfer-last parameter to pull/sync job Fabian Grünbichler
3 siblings, 0 replies; 5+ messages in thread
From: Stefan Hanreich @ 2023-04-18 14:59 UTC (permalink / raw)
To: pbs-devel
Adding an opening line for every group makes parsing the log easier.
We can also remove the 're-sync [...] done' line, because the next
line should be a progress line anyway.
The new output for the sync job/pull logs looks as follows:
- skipped already synced (happens in most jobs, except for first run)
- re-sync of last synced snapshot (if it still exists on source)
- skipped because of transfer-last (if set and skips something)
- sync of new snapshots (if they exist)
Suggested-By: Fabian Grünbichler <f.gruenbichler@proxmox.com>
Signed-off-by: Stefan Hanreich <s.hanreich@proxmox.com>
---
src/server/pull.rs | 54 +++++++++++++++++++++++++++++++++++-----------
1 file changed, 41 insertions(+), 13 deletions(-)
diff --git a/src/server/pull.rs b/src/server/pull.rs
index 0219d47e..e50037ed 100644
--- a/src/server/pull.rs
+++ b/src/server/pull.rs
@@ -535,19 +535,39 @@ async fn pull_snapshot_from(
} else {
task_log!(worker, "re-sync snapshot {}", snapshot.dir());
pull_snapshot(worker, reader, snapshot, downloaded_chunks).await?;
- task_log!(worker, "re-sync snapshot {} done", snapshot.dir());
}
Ok(())
}
+enum SkipReason {
+ AlreadySynced,
+ TransferLast,
+}
+
struct SkipInfo {
oldest: i64,
newest: i64,
count: u64,
+ skip_reason: SkipReason,
}
impl SkipInfo {
+ fn new(skip_reason: SkipReason) -> Self {
+ SkipInfo {
+ oldest: i64::MAX,
+ newest: i64::MIN,
+ count: 0,
+ skip_reason,
+ }
+ }
+
+ fn reset(&mut self) {
+ self.count = 0;
+ self.oldest = i64::MAX;
+ self.newest = i64::MIN;
+ }
+
fn update(&mut self, backup_time: i64) {
self.count += 1;
@@ -575,11 +595,17 @@ impl SkipInfo {
impl std::fmt::Display for SkipInfo {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ let reason_string = match self.skip_reason {
+ SkipReason::AlreadySynced => "older than the newest local snapshot",
+ SkipReason::TransferLast => "due to transfer-last",
+ };
+
write!(
f,
- "skipped: {} snapshot(s) ({}) older than the newest local snapshot",
+ "skipped: {} snapshot(s) ({}) - {}",
self.count,
- self.affected().map_err(|_| std::fmt::Error)?
+ self.affected().map_err(|_| std::fmt::Error)?,
+ reason_string
)
}
}
@@ -610,6 +636,8 @@ async fn pull_group(
remote_ns: BackupNamespace,
progress: &mut StoreProgress,
) -> Result<(), Error> {
+ task_log!(worker, "sync group {}", group);
+
let path = format!(
"api2/json/admin/datastore/{}/snapshots",
params.source.store()
@@ -645,11 +673,8 @@ async fn pull_group(
progress.group_snapshots = list.len() as u64;
- let mut skip_info = SkipInfo {
- oldest: i64::MAX,
- newest: i64::MIN,
- count: 0,
- };
+ let mut already_synced_skip_info = SkipInfo::new(SkipReason::AlreadySynced);
+ let mut transfer_last_skip_info = SkipInfo::new(SkipReason::TransferLast);
let total_amount = list.len();
@@ -674,12 +699,19 @@ async fn pull_group(
remote_snapshots.insert(snapshot.time);
if last_sync_time > snapshot.time {
- skip_info.update(snapshot.time);
+ already_synced_skip_info.update(snapshot.time);
continue;
+ } else if already_synced_skip_info.count > 0 {
+ task_log!(worker, "{}", already_synced_skip_info);
+ already_synced_skip_info.reset();
}
if pos < cutoff && last_sync_time != snapshot.time {
+ transfer_last_skip_info.update(snapshot.time);
continue;
+ } else if transfer_last_skip_info.count > 0 {
+ task_log!(worker, "{}", transfer_last_skip_info);
+ transfer_last_skip_info.reset();
}
// get updated auth_info (new tickets)
@@ -739,10 +771,6 @@ async fn pull_group(
}
}
- if skip_info.count > 0 {
- task_log!(worker, "{}", skip_info);
- }
-
Ok(())
}
--
2.30.2
^ permalink raw reply [flat|nested] 5+ messages in thread
* [pbs-devel] [PATCH v4 proxmox-backup 3/3] ui: sync job: add transfer-last parameter
2023-04-18 14:59 [pbs-devel] [PATCH v4 proxmox-backup 0/3] add transfer-last parameter to pull/sync job Stefan Hanreich
2023-04-18 14:59 ` [pbs-devel] [PATCH v4 proxmox-backup 1/3] partial fix #3701: sync job: pull: add transfer-last parameter Stefan Hanreich
2023-04-18 14:59 ` [pbs-devel] [PATCH v4 proxmox-backup 2/3] sync job: pull: improve log output Stefan Hanreich
@ 2023-04-18 14:59 ` Stefan Hanreich
2023-04-25 8:06 ` [pbs-devel] applied-series: [PATCH v4 proxmox-backup 0/3] add transfer-last parameter to pull/sync job Fabian Grünbichler
3 siblings, 0 replies; 5+ messages in thread
From: Stefan Hanreich @ 2023-04-18 14:59 UTC (permalink / raw)
To: pbs-devel
Signed-off-by: Stefan Hanreich <s.hanreich@proxmox.com>
---
www/config/SyncView.js | 9 ++++++++-
www/window/SyncJobEdit.js | 13 +++++++++++++
2 files changed, 21 insertions(+), 1 deletion(-)
diff --git a/www/config/SyncView.js b/www/config/SyncView.js
index a90e9a70..bf9072cb 100644
--- a/www/config/SyncView.js
+++ b/www/config/SyncView.js
@@ -3,7 +3,7 @@ Ext.define('pbs-sync-jobs-status', {
fields: [
'id', 'owner', 'remote', 'remote-store', 'remote-ns', 'store', 'ns',
'schedule', 'group-filter', 'next-run', 'last-run-upid', 'last-run-state',
- 'last-run-endtime',
+ 'last-run-endtime', 'transfer-last',
{
name: 'duration',
calculate: function(data) {
@@ -241,6 +241,13 @@ Ext.define('PBS.config.SyncJobView', {
renderer: v => v ? Ext.String.htmlEncode(v) : gettext('All'),
width: 80,
},
+ {
+ header: gettext('Transfer Last'),
+ dataIndex: 'transfer-last',
+ flex: 1,
+ sortable: true,
+ hidden: true,
+ },
{
header: gettext('Schedule'),
dataIndex: 'schedule',
diff --git a/www/window/SyncJobEdit.js b/www/window/SyncJobEdit.js
index 948ad5da..48a0c7a9 100644
--- a/www/window/SyncJobEdit.js
+++ b/www/window/SyncJobEdit.js
@@ -232,6 +232,19 @@ Ext.define('PBS.window.SyncJobEdit', {
editable: '{isCreate}',
},
},
+ {
+ fieldLabel: gettext('Transfer Last'),
+ xtype: 'pbsPruneKeepInput',
+ name: 'transfer-last',
+ emptyText: gettext('all'),
+ autoEl: {
+ tag: 'div',
+ 'data-qtip': gettext('The maximum amount of snapshots to be transferred (per group)'),
+ },
+ cbind: {
+ deleteEmpty: '{!isCreate}',
+ },
+ },
],
},
{
--
2.30.2
^ permalink raw reply [flat|nested] 5+ messages in thread