From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [212.224.123.68]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits)) (No client certificate requested) by lists.proxmox.com (Postfix) with ESMTPS id 654D3610AE for ; Fri, 11 Sep 2020 14:35:32 +0200 (CEST) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id 5A06524C5C for ; Fri, 11 Sep 2020 14:35:02 +0200 (CEST) Received: from proxmox-new.maurer-it.com (proxmox-new.maurer-it.com [212.186.127.180]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits)) (No client certificate requested) by firstgate.proxmox.com (Proxmox) with ESMTPS id C47E324C51 for ; Fri, 11 Sep 2020 14:35:00 +0200 (CEST) Received: from proxmox-new.maurer-it.com (localhost.localdomain [127.0.0.1]) by proxmox-new.maurer-it.com (Proxmox) with ESMTP id 91F5444B6C for ; Fri, 11 Sep 2020 14:35:00 +0200 (CEST) From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= To: pbs-devel@lists.proxmox.com Date: Fri, 11 Sep 2020 14:34:38 +0200 Message-Id: <20200911123439.1876094-6-f.gruenbichler@proxmox.com> X-Mailer: git-send-email 2.20.1 In-Reply-To: <20200911123439.1876094-1-f.gruenbichler@proxmox.com> References: <20200911123439.1876094-1-f.gruenbichler@proxmox.com> MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-SPAM-LEVEL: Spam detection results: 0 AWL 0.026 Adjusted score from AWL reputation of From: address KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment RCVD_IN_DNSWL_MED -2.3 Sender listed at https://www.dnswl.org/, medium trust SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record URIBL_BLOCKED 0.001 ADMINISTRATOR NOTICE: The query to URIBL was blocked. See http://wiki.apache.org/spamassassin/DnsBlocklists#dnsbl-block for more information. [backup.rs, proxmox-backup-client.rs, pull.rs, datastore.rs, reader.rs] Subject: [pbs-devel] [PATCH proxmox-backup 5/6] BackupDir: make constructor fallible X-BeenThere: pbs-devel@lists.proxmox.com X-Mailman-Version: 2.1.29 Precedence: list List-Id: Proxmox Backup Server development discussion List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Fri, 11 Sep 2020 12:35:32 -0000 since converting from i64 epoch timestamp to DateTime is not always possible. previously, passing invalid backup-time from client to server (or vice-versa) panicked the corresponding tokio task. now we get proper error messages including the invalid timestamp. Signed-off-by: Fabian Grünbichler --- Notes: needs corresponding patch in proxmox-backup-qemu. maybe we also want to provide constructors that take a DateTime, there are a few callers that already have one and convert it to an i64 to create a BackupDir that immediately converts it back to a DateTime again.. src/api2/admin/datastore.rs | 20 +++++++++--------- src/api2/backup.rs | 2 +- src/api2/reader.rs | 2 +- src/backup/backup_info.rs | 36 +++++++++++++++++++------------- src/bin/proxmox-backup-client.rs | 13 ++++++------ src/client/pull.rs | 2 +- 6 files changed, 41 insertions(+), 34 deletions(-) diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs index 85c84df4..be2796db 100644 --- a/src/api2/admin/datastore.rs +++ b/src/api2/admin/datastore.rs @@ -230,7 +230,7 @@ pub fn list_snapshot_files( let datastore = DataStore::lookup_datastore(&store)?; - let snapshot = BackupDir::new(backup_type, backup_id, backup_time); + let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?; let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)) != 0; if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; } @@ -280,7 +280,7 @@ fn delete_snapshot( let user_info = CachedUserInfo::new()?; let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]); - let snapshot = BackupDir::new(backup_type, backup_id, backup_time); + let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?; let datastore = DataStore::lookup_datastore(&store)?; @@ -490,7 +490,7 @@ pub fn verify( match (backup_type, backup_id, backup_time) { (Some(backup_type), Some(backup_id), Some(backup_time)) => { worker_id = format!("{}_{}_{}_{:08X}", store, backup_type, backup_id, backup_time); - let dir = BackupDir::new(backup_type, backup_id, backup_time); + let dir = BackupDir::new(backup_type, backup_id, backup_time)?; backup_dir = Some(dir); } (Some(backup_type), Some(backup_id), None) => { @@ -897,7 +897,7 @@ fn download_file( let backup_id = tools::required_string_param(¶m, "backup-id")?; let backup_time = tools::required_integer_param(¶m, "backup-time")?; - let backup_dir = BackupDir::new(backup_type, backup_id, backup_time); + let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?; let allowed = (user_privs & PRIV_DATASTORE_READ) != 0; if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; } @@ -970,7 +970,7 @@ fn download_file_decoded( let backup_id = tools::required_string_param(¶m, "backup-id")?; let backup_time = tools::required_integer_param(¶m, "backup-time")?; - let backup_dir = BackupDir::new(backup_type, backup_id, backup_time); + let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?; let allowed = (user_privs & PRIV_DATASTORE_READ) != 0; if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; } @@ -1083,7 +1083,7 @@ fn upload_backup_log( let backup_id = tools::required_string_param(¶m, "backup-id")?; let backup_time = tools::required_integer_param(¶m, "backup-time")?; - let backup_dir = BackupDir::new(backup_type, backup_id, backup_time); + let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?; let userid: Userid = rpcenv.get_user().unwrap().parse()?; check_backup_owner(&datastore, backup_dir.group(), &userid)?; @@ -1159,7 +1159,7 @@ fn catalog( let user_info = CachedUserInfo::new()?; let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]); - let backup_dir = BackupDir::new(backup_type, backup_id, backup_time); + let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?; let allowed = (user_privs & PRIV_DATASTORE_READ) != 0; if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; } @@ -1276,7 +1276,7 @@ fn pxar_file_download( let backup_id = tools::required_string_param(¶m, "backup-id")?; let backup_time = tools::required_integer_param(¶m, "backup-time")?; - let backup_dir = BackupDir::new(backup_type, backup_id, backup_time); + let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?; let allowed = (user_privs & PRIV_DATASTORE_READ) != 0; if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; } @@ -1417,7 +1417,7 @@ fn get_notes( let user_info = CachedUserInfo::new()?; let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]); - let backup_dir = BackupDir::new(backup_type, backup_id, backup_time); + let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?; let allowed = (user_privs & PRIV_DATASTORE_READ) != 0; if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; } @@ -1470,7 +1470,7 @@ fn set_notes( let user_info = CachedUserInfo::new()?; let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]); - let backup_dir = BackupDir::new(backup_type, backup_id, backup_time); + let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?; let allowed = (user_privs & PRIV_DATASTORE_READ) != 0; if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; } diff --git a/src/api2/backup.rs b/src/api2/backup.rs index c00f9be8..9420b146 100644 --- a/src/api2/backup.rs +++ b/src/api2/backup.rs @@ -114,7 +114,7 @@ async move { } let last_backup = BackupInfo::last_backup(&datastore.base_path(), &backup_group, true).unwrap_or(None); - let backup_dir = BackupDir::new_with_group(backup_group.clone(), backup_time); + let backup_dir = BackupDir::new_with_group(backup_group.clone(), backup_time)?; let _last_guard = if let Some(last) = &last_backup { if backup_dir.backup_time() <= last.backup_dir.backup_time() { diff --git a/src/api2/reader.rs b/src/api2/reader.rs index cf82af06..5252d2e9 100644 --- a/src/api2/reader.rs +++ b/src/api2/reader.rs @@ -83,7 +83,7 @@ fn upgrade_to_backup_reader_protocol( let env_type = rpcenv.env_type(); - let backup_dir = BackupDir::new(backup_type, backup_id, backup_time); + let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?; let path = datastore.base_path(); //let files = BackupInfo::list_files(&path, &backup_dir)?; diff --git a/src/backup/backup_info.rs b/src/backup/backup_info.rs index 4dcf897a..023625f5 100644 --- a/src/backup/backup_info.rs +++ b/src/backup/backup_info.rs @@ -2,9 +2,10 @@ use crate::tools; use anyhow::{bail, format_err, Error}; use regex::Regex; +use std::convert::TryFrom; use std::os::unix::io::RawFd; -use chrono::{DateTime, TimeZone, SecondsFormat, Utc}; +use chrono::{DateTime, LocalResult, TimeZone, SecondsFormat, Utc}; use std::path::{PathBuf, Path}; use lazy_static::lazy_static; @@ -106,7 +107,7 @@ impl BackupGroup { if file_type != nix::dir::Type::Directory { return Ok(()); } let dt = backup_time.parse::>()?; - let backup_dir = BackupDir::new(self.backup_type.clone(), self.backup_id.clone(), dt.timestamp()); + let backup_dir = BackupDir::new(self.backup_type.clone(), self.backup_id.clone(), dt.timestamp())?; let files = list_backup_files(l2_fd, backup_time)?; list.push(BackupInfo { backup_dir, files }); @@ -208,19 +209,22 @@ pub struct BackupDir { impl BackupDir { - pub fn new(backup_type: T, backup_id: U, timestamp: i64) -> Self + pub fn new(backup_type: T, backup_id: U, timestamp: i64) -> Result where T: Into, U: Into, { - // Note: makes sure that nanoseconds is 0 - Self { - group: BackupGroup::new(backup_type.into(), backup_id.into()), - backup_time: Utc.timestamp(timestamp, 0), - } + let group = BackupGroup::new(backup_type.into(), backup_id.into()); + BackupDir::new_with_group(group, timestamp) } - pub fn new_with_group(group: BackupGroup, timestamp: i64) -> Self { - Self { group, backup_time: Utc.timestamp(timestamp, 0) } + + pub fn new_with_group(group: BackupGroup, timestamp: i64) -> Result { + let backup_time = match Utc.timestamp_opt(timestamp, 0) { + LocalResult::Single(time) => time, + _ => bail!("can't create BackupDir with invalid backup time {}", timestamp), + }; + + Ok(Self { group, backup_time }) } pub fn group(&self) -> &BackupGroup { @@ -257,7 +261,7 @@ impl std::str::FromStr for BackupDir { let group = BackupGroup::new(cap.get(1).unwrap().as_str(), cap.get(2).unwrap().as_str()); let backup_time = cap.get(3).unwrap().as_str().parse::>()?; - Ok(BackupDir::from((group, backup_time.timestamp()))) + BackupDir::try_from((group, backup_time.timestamp())) } } @@ -270,9 +274,11 @@ impl std::fmt::Display for BackupDir { } } -impl From<(BackupGroup, i64)> for BackupDir { - fn from((group, timestamp): (BackupGroup, i64)) -> Self { - Self { group, backup_time: Utc.timestamp(timestamp, 0) } +impl TryFrom<(BackupGroup, i64)> for BackupDir { + type Error = Error; + + fn try_from((group, timestamp): (BackupGroup, i64)) -> Result { + BackupDir::new_with_group(group, timestamp) } } @@ -334,7 +340,7 @@ impl BackupInfo { if file_type != nix::dir::Type::Directory { return Ok(()); } let dt = backup_time.parse::>()?; - let backup_dir = BackupDir::new(backup_type, backup_id, dt.timestamp()); + let backup_dir = BackupDir::new(backup_type, backup_id, dt.timestamp())?; let files = list_backup_files(l2_fd, backup_time)?; diff --git a/src/bin/proxmox-backup-client.rs b/src/bin/proxmox-backup-client.rs index 19f8ccda..aea715aa 100644 --- a/src/bin/proxmox-backup-client.rs +++ b/src/bin/proxmox-backup-client.rs @@ -376,7 +376,7 @@ async fn list_backup_groups(param: Value) -> Result { let render_last_backup = |_v: &Value, record: &Value| -> Result { let item: GroupListItem = serde_json::from_value(record.to_owned())?; - let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.last_backup); + let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.last_backup)?; Ok(snapshot.relative_path().to_str().unwrap().to_owned()) }; @@ -447,7 +447,7 @@ async fn list_snapshots(param: Value) -> Result { let render_snapshot_path = |_v: &Value, record: &Value| -> Result { let item: SnapshotListItem = serde_json::from_value(record.to_owned())?; - let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time); + let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time)?; Ok(snapshot.relative_path().to_str().unwrap().to_owned()) }; @@ -1046,7 +1046,7 @@ async fn create_backup( None }; - let snapshot = BackupDir::new(backup_type, backup_id, backup_time.timestamp()); + let snapshot = BackupDir::new(backup_type, backup_id, backup_time.timestamp())?; let mut manifest = BackupManifest::new(snapshot); let mut catalog = None; @@ -1571,7 +1571,7 @@ async fn prune_async(mut param: Value) -> Result { let render_snapshot_path = |_v: &Value, record: &Value| -> Result { let item: PruneListItem = serde_json::from_value(record.to_owned())?; - let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time); + let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time)?; Ok(snapshot.relative_path().to_str().unwrap().to_owned()) }; @@ -1763,8 +1763,9 @@ async fn complete_backup_snapshot_do(param: &HashMap) -> Vec