From: "Fabian Grünbichler" <f.gruenbichler@proxmox.com>
To: pbs-devel@lists.proxmox.com
Subject: [pbs-devel] [PATCH proxmox-backup 5/6] BackupDir: make constructor fallible
Date: Fri, 11 Sep 2020 14:34:38 +0200 [thread overview]
Message-ID: <20200911123439.1876094-6-f.gruenbichler@proxmox.com> (raw)
In-Reply-To: <20200911123439.1876094-1-f.gruenbichler@proxmox.com>
since converting from i64 epoch timestamp to DateTime is not always
possible. previously, passing invalid backup-time from client to server
(or vice-versa) panicked the corresponding tokio task. now we get proper
error messages including the invalid timestamp.
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
---
Notes:
needs corresponding patch in proxmox-backup-qemu.
maybe we also want to provide constructors that take a DateTime,
there are a few callers that already have one and convert it to an
i64 to create a BackupDir that immediately converts it back to a
DateTime again..
src/api2/admin/datastore.rs | 20 +++++++++---------
src/api2/backup.rs | 2 +-
src/api2/reader.rs | 2 +-
src/backup/backup_info.rs | 36 +++++++++++++++++++-------------
src/bin/proxmox-backup-client.rs | 13 ++++++------
src/client/pull.rs | 2 +-
6 files changed, 41 insertions(+), 34 deletions(-)
diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs
index 85c84df4..be2796db 100644
--- a/src/api2/admin/datastore.rs
+++ b/src/api2/admin/datastore.rs
@@ -230,7 +230,7 @@ pub fn list_snapshot_files(
let datastore = DataStore::lookup_datastore(&store)?;
- let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
+ let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)) != 0;
if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
@@ -280,7 +280,7 @@ fn delete_snapshot(
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
- let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
+ let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
let datastore = DataStore::lookup_datastore(&store)?;
@@ -490,7 +490,7 @@ pub fn verify(
match (backup_type, backup_id, backup_time) {
(Some(backup_type), Some(backup_id), Some(backup_time)) => {
worker_id = format!("{}_{}_{}_{:08X}", store, backup_type, backup_id, backup_time);
- let dir = BackupDir::new(backup_type, backup_id, backup_time);
+ let dir = BackupDir::new(backup_type, backup_id, backup_time)?;
backup_dir = Some(dir);
}
(Some(backup_type), Some(backup_id), None) => {
@@ -897,7 +897,7 @@ fn download_file(
let backup_id = tools::required_string_param(¶m, "backup-id")?;
let backup_time = tools::required_integer_param(¶m, "backup-time")?;
- let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
+ let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
@@ -970,7 +970,7 @@ fn download_file_decoded(
let backup_id = tools::required_string_param(¶m, "backup-id")?;
let backup_time = tools::required_integer_param(¶m, "backup-time")?;
- let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
+ let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
@@ -1083,7 +1083,7 @@ fn upload_backup_log(
let backup_id = tools::required_string_param(¶m, "backup-id")?;
let backup_time = tools::required_integer_param(¶m, "backup-time")?;
- let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
+ let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let userid: Userid = rpcenv.get_user().unwrap().parse()?;
check_backup_owner(&datastore, backup_dir.group(), &userid)?;
@@ -1159,7 +1159,7 @@ fn catalog(
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
- let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
+ let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
@@ -1276,7 +1276,7 @@ fn pxar_file_download(
let backup_id = tools::required_string_param(¶m, "backup-id")?;
let backup_time = tools::required_integer_param(¶m, "backup-time")?;
- let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
+ let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
@@ -1417,7 +1417,7 @@ fn get_notes(
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
- let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
+ let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
@@ -1470,7 +1470,7 @@ fn set_notes(
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
- let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
+ let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
diff --git a/src/api2/backup.rs b/src/api2/backup.rs
index c00f9be8..9420b146 100644
--- a/src/api2/backup.rs
+++ b/src/api2/backup.rs
@@ -114,7 +114,7 @@ async move {
}
let last_backup = BackupInfo::last_backup(&datastore.base_path(), &backup_group, true).unwrap_or(None);
- let backup_dir = BackupDir::new_with_group(backup_group.clone(), backup_time);
+ let backup_dir = BackupDir::new_with_group(backup_group.clone(), backup_time)?;
let _last_guard = if let Some(last) = &last_backup {
if backup_dir.backup_time() <= last.backup_dir.backup_time() {
diff --git a/src/api2/reader.rs b/src/api2/reader.rs
index cf82af06..5252d2e9 100644
--- a/src/api2/reader.rs
+++ b/src/api2/reader.rs
@@ -83,7 +83,7 @@ fn upgrade_to_backup_reader_protocol(
let env_type = rpcenv.env_type();
- let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
+ let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let path = datastore.base_path();
//let files = BackupInfo::list_files(&path, &backup_dir)?;
diff --git a/src/backup/backup_info.rs b/src/backup/backup_info.rs
index 4dcf897a..023625f5 100644
--- a/src/backup/backup_info.rs
+++ b/src/backup/backup_info.rs
@@ -2,9 +2,10 @@ use crate::tools;
use anyhow::{bail, format_err, Error};
use regex::Regex;
+use std::convert::TryFrom;
use std::os::unix::io::RawFd;
-use chrono::{DateTime, TimeZone, SecondsFormat, Utc};
+use chrono::{DateTime, LocalResult, TimeZone, SecondsFormat, Utc};
use std::path::{PathBuf, Path};
use lazy_static::lazy_static;
@@ -106,7 +107,7 @@ impl BackupGroup {
if file_type != nix::dir::Type::Directory { return Ok(()); }
let dt = backup_time.parse::<DateTime<Utc>>()?;
- let backup_dir = BackupDir::new(self.backup_type.clone(), self.backup_id.clone(), dt.timestamp());
+ let backup_dir = BackupDir::new(self.backup_type.clone(), self.backup_id.clone(), dt.timestamp())?;
let files = list_backup_files(l2_fd, backup_time)?;
list.push(BackupInfo { backup_dir, files });
@@ -208,19 +209,22 @@ pub struct BackupDir {
impl BackupDir {
- pub fn new<T, U>(backup_type: T, backup_id: U, timestamp: i64) -> Self
+ pub fn new<T, U>(backup_type: T, backup_id: U, timestamp: i64) -> Result<Self, Error>
where
T: Into<String>,
U: Into<String>,
{
- // Note: makes sure that nanoseconds is 0
- Self {
- group: BackupGroup::new(backup_type.into(), backup_id.into()),
- backup_time: Utc.timestamp(timestamp, 0),
- }
+ let group = BackupGroup::new(backup_type.into(), backup_id.into());
+ BackupDir::new_with_group(group, timestamp)
}
- pub fn new_with_group(group: BackupGroup, timestamp: i64) -> Self {
- Self { group, backup_time: Utc.timestamp(timestamp, 0) }
+
+ pub fn new_with_group(group: BackupGroup, timestamp: i64) -> Result<Self, Error> {
+ let backup_time = match Utc.timestamp_opt(timestamp, 0) {
+ LocalResult::Single(time) => time,
+ _ => bail!("can't create BackupDir with invalid backup time {}", timestamp),
+ };
+
+ Ok(Self { group, backup_time })
}
pub fn group(&self) -> &BackupGroup {
@@ -257,7 +261,7 @@ impl std::str::FromStr for BackupDir {
let group = BackupGroup::new(cap.get(1).unwrap().as_str(), cap.get(2).unwrap().as_str());
let backup_time = cap.get(3).unwrap().as_str().parse::<DateTime<Utc>>()?;
- Ok(BackupDir::from((group, backup_time.timestamp())))
+ BackupDir::try_from((group, backup_time.timestamp()))
}
}
@@ -270,9 +274,11 @@ impl std::fmt::Display for BackupDir {
}
}
-impl From<(BackupGroup, i64)> for BackupDir {
- fn from((group, timestamp): (BackupGroup, i64)) -> Self {
- Self { group, backup_time: Utc.timestamp(timestamp, 0) }
+impl TryFrom<(BackupGroup, i64)> for BackupDir {
+ type Error = Error;
+
+ fn try_from((group, timestamp): (BackupGroup, i64)) -> Result<Self, Error> {
+ BackupDir::new_with_group(group, timestamp)
}
}
@@ -334,7 +340,7 @@ impl BackupInfo {
if file_type != nix::dir::Type::Directory { return Ok(()); }
let dt = backup_time.parse::<DateTime<Utc>>()?;
- let backup_dir = BackupDir::new(backup_type, backup_id, dt.timestamp());
+ let backup_dir = BackupDir::new(backup_type, backup_id, dt.timestamp())?;
let files = list_backup_files(l2_fd, backup_time)?;
diff --git a/src/bin/proxmox-backup-client.rs b/src/bin/proxmox-backup-client.rs
index 19f8ccda..aea715aa 100644
--- a/src/bin/proxmox-backup-client.rs
+++ b/src/bin/proxmox-backup-client.rs
@@ -376,7 +376,7 @@ async fn list_backup_groups(param: Value) -> Result<Value, Error> {
let render_last_backup = |_v: &Value, record: &Value| -> Result<String, Error> {
let item: GroupListItem = serde_json::from_value(record.to_owned())?;
- let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.last_backup);
+ let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.last_backup)?;
Ok(snapshot.relative_path().to_str().unwrap().to_owned())
};
@@ -447,7 +447,7 @@ async fn list_snapshots(param: Value) -> Result<Value, Error> {
let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
let item: SnapshotListItem = serde_json::from_value(record.to_owned())?;
- let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time);
+ let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time)?;
Ok(snapshot.relative_path().to_str().unwrap().to_owned())
};
@@ -1046,7 +1046,7 @@ async fn create_backup(
None
};
- let snapshot = BackupDir::new(backup_type, backup_id, backup_time.timestamp());
+ let snapshot = BackupDir::new(backup_type, backup_id, backup_time.timestamp())?;
let mut manifest = BackupManifest::new(snapshot);
let mut catalog = None;
@@ -1571,7 +1571,7 @@ async fn prune_async(mut param: Value) -> Result<Value, Error> {
let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
let item: PruneListItem = serde_json::from_value(record.to_owned())?;
- let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time);
+ let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time)?;
Ok(snapshot.relative_path().to_str().unwrap().to_owned())
};
@@ -1763,8 +1763,9 @@ async fn complete_backup_snapshot_do(param: &HashMap<String, String>) -> Vec<Str
if let (Some(backup_id), Some(backup_type), Some(backup_time)) =
(item["backup-id"].as_str(), item["backup-type"].as_str(), item["backup-time"].as_i64())
{
- let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
- result.push(snapshot.relative_path().to_str().unwrap().to_owned());
+ if let Ok(snapshot) = BackupDir::new(backup_type, backup_id, backup_time) {
+ result.push(snapshot.relative_path().to_str().unwrap().to_owned());
+ }
}
}
}
diff --git a/src/client/pull.rs b/src/client/pull.rs
index 2428051a..ab7e9891 100644
--- a/src/client/pull.rs
+++ b/src/client/pull.rs
@@ -347,7 +347,7 @@ pub async fn pull_group(
let mut remote_snapshots = std::collections::HashSet::new();
for item in list {
- let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time);
+ let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time)?;
// in-progress backups can't be synced
if let None = item.size {
--
2.20.1
next prev parent reply other threads:[~2020-09-11 12:35 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-09-11 12:34 [pbs-devel] [PATCH proxmox-backup(-qemu) 0/6] improve timestamp handling Fabian Grünbichler
2020-09-11 12:34 ` [pbs-devel] [PATCH proxmox-backup 1/6] catalog dump: preserve original mtime Fabian Grünbichler
2020-09-11 12:34 ` [pbs-devel] [PATCH proxmox-backup 2/6] don't truncate DateTime nanoseconds Fabian Grünbichler
2020-09-11 12:34 ` [pbs-devel] [PATCH proxmox-backup 3/6] use non-panicky timestamp_opt where appropriate Fabian Grünbichler
2020-09-11 12:34 ` [pbs-devel] [PATCH proxmox-backup 4/6] handle invalid mtime when formating entries Fabian Grünbichler
2020-09-11 12:34 ` Fabian Grünbichler [this message]
2020-09-11 14:12 ` [pbs-devel] [PATCH proxmox-backup 5/6] BackupDir: make constructor fallible Dietmar Maurer
2020-09-11 14:28 ` Dietmar Maurer
2020-09-11 12:34 ` [pbs-devel] [PATCH proxmox-backup-qemu 6/6] update to new BackupDir constructor Fabian Grünbichler
2020-09-11 14:15 ` [pbs-devel] applied: " Dietmar Maurer
2020-09-11 14:00 ` [pbs-devel] applied: [PATCH proxmox-backup(-qemu) 0/6] improve timestamp handling Dietmar Maurer
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200911123439.1876094-6-f.gruenbichler@proxmox.com \
--to=f.gruenbichler@proxmox.com \
--cc=pbs-devel@lists.proxmox.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.