From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [IPv6:2a01:7e0:0:424::9]) by lore.proxmox.com (Postfix) with ESMTPS id 97BF51FF168 for ; Mon, 14 Oct 2024 15:42:50 +0200 (CEST) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id 11CA43621B; Mon, 14 Oct 2024 15:43:22 +0200 (CEST) Date: Mon, 14 Oct 2024 15:42:44 +0200 From: Fabian =?iso-8859-1?q?Gr=FCnbichler?= To: Proxmox Backup Server development discussion References: <20240904141155.350454-1-h.laimer@proxmox.com> <20240904141155.350454-8-h.laimer@proxmox.com> In-Reply-To: <20240904141155.350454-8-h.laimer@proxmox.com> MIME-Version: 1.0 User-Agent: astroid/0.16.0 (https://github.com/astroidmail/astroid) Message-Id: <1728907855.3hw8kydnft.astroid@yuna.none> X-SPAM-LEVEL: Spam detection results: 0 AWL 0.049 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record Subject: Re: [pbs-devel] [PATCH proxmox-backup v12 07/26] api: admin: add (un)mount endpoint for removable datastores X-BeenThere: pbs-devel@lists.proxmox.com X-Mailman-Version: 2.1.29 Precedence: list List-Id: Proxmox Backup Server development discussion List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: Proxmox Backup Server development discussion Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Errors-To: pbs-devel-bounces@lists.proxmox.com Sender: "pbs-devel" On September 4, 2024 4:11 pm, Hannes Laimer wrote: > Signed-off-by: Hannes Laimer > --- > pbs-api-types/src/maintenance.rs | 4 + > src/api2/admin/datastore.rs | 243 +++++++++++++++++++++++++++++-- > 2 files changed, 237 insertions(+), 10 deletions(-) a pretty substantial patch - no commit message at all?? > > diff --git a/pbs-api-types/src/maintenance.rs b/pbs-api-types/src/maintenance.rs > index 9f51292e..60181258 100644 > --- a/pbs-api-types/src/maintenance.rs > +++ b/pbs-api-types/src/maintenance.rs > @@ -78,6 +78,10 @@ pub struct MaintenanceMode { > } > > impl MaintenanceMode { > + pub fn new(ty: MaintenanceType, message: Option) -> Self { > + Self { ty, message } > + } > + nit: if we want this, it should be in its own patch and convert existing constructions as well.. or this could be dropped and the call to `new` below could just init the struct.. > /// Used for deciding whether the datastore is cleared from the internal cache after the last > /// task finishes, so all open files are closed. > pub fn is_offline(&self) -> bool { > diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs > index 976617d9..3c95888d 100644 > --- a/src/api2/admin/datastore.rs > +++ b/src/api2/admin/datastore.rs > @@ -3,7 +3,7 @@ > use std::collections::HashSet; > use std::ffi::OsStr; > use std::os::unix::ffi::OsStrExt; > -use std::path::PathBuf; > +use std::path::{Path, PathBuf}; > use std::sync::Arc; > > use anyhow::{bail, format_err, Error}; > @@ -13,7 +13,7 @@ use hyper::{header, Body, Response, StatusCode}; > use serde::Deserialize; > use serde_json::{json, Value}; > use tokio_stream::wrappers::ReceiverStream; > -use tracing::{info, warn}; > +use tracing::{debug, info, warn}; > > use proxmox_async::blocking::WrappedReaderStream; > use proxmox_async::{io::AsyncChannelWriter, stream::AsyncReaderStream}; > @@ -29,6 +29,7 @@ use proxmox_sys::fs::{ > file_read_firstline, file_read_optional_string, replace_file, CreateOptions, > }; > use proxmox_time::CalendarEvent; > +use proxmox_worker_task::WorkerTaskContext; > > use pxar::accessor::aio::Accessor; > use pxar::EntryKind; > @@ -36,12 +37,12 @@ use pxar::EntryKind; > use pbs_api_types::{ > print_ns_and_snapshot, print_store_and_ns, Authid, BackupContent, BackupNamespace, BackupType, > Counts, CryptMode, DataStoreConfig, DataStoreListItem, DataStoreStatus, > - GarbageCollectionJobStatus, GroupListItem, JobScheduleStatus, KeepOptions, Operation, > - PruneJobOptions, SnapshotListItem, SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, > - BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, > - DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, > - PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, > - PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID, UPID_SCHEMA, > + GarbageCollectionJobStatus, GroupListItem, JobScheduleStatus, KeepOptions, MaintenanceMode, > + MaintenanceType, Operation, PruneJobOptions, SnapshotListItem, SnapshotVerifyState, > + BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, > + BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, MAX_NAMESPACE_DEPTH, > + NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, > + PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID, UPID_SCHEMA, > VERIFICATION_OUTDATED_AFTER_SCHEMA, > }; > use pbs_client::pxar::{create_tar, create_zip}; > @@ -57,8 +58,8 @@ use pbs_datastore::index::IndexFile; > use pbs_datastore::manifest::{BackupManifest, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME}; > use pbs_datastore::prune::compute_prune_info; > use pbs_datastore::{ > - check_backup_owner, task_tracking, BackupDir, BackupGroup, DataStore, LocalChunkReader, > - StoreProgress, CATALOG_NAME, > + check_backup_owner, is_datastore_available, task_tracking, BackupDir, BackupGroup, DataStore, > + LocalChunkReader, StoreProgress, CATALOG_NAME, > }; > use pbs_tools::json::required_string_param; > use proxmox_rest_server::{formatter, WorkerTask}; > @@ -2384,6 +2385,226 @@ pub async fn set_backup_owner( > .await? > } > > +/// Here we > +/// > +/// 1. mount the removable device to `/mount/` > +/// 2. bind mount `/mount//` to `/mnt/datastore/` > +/// 3. unmount `/mount/` couldn't we instead mount directly, and adapt get_absolute_path to return /mnt/datastore/{datastore.name}/{datastore.path} ? the "concurrent mounts" can be solved by locking (using the UUID or datastore name as lock scope) or by going via a Mount maintenance mode.. seems to me like there's a lot less that could go wrong/fail in that case? but maybe I am missing some background that should have been in the commit message ;) if I think of the semantics of "removable datastores", then if we want to support multiple such datastores on a single device (at least as an option in the future), then unmounting only makes sense when thinking of the backing device (after all, I want to unmount to unplug it, and I don't want to do that 10 times if the backing devices contains 10 datastores, and keep track of them all, and ensure nothing mounts them again in the meantime, ..).. so only mounting it once in the first place (and pointing the datastore at the relevant subdir of the mountpoint) seems like the better option? > +/// > +/// leaving us with the datastore being mounted directly with its name under /mnt/datastore/... > +/// > +/// The reason for the randomized device mounting paths is to avoid two tasks trying to mount to > +/// the same path, this is *very* unlikely since the device is only mounted really shortly, but > +/// technically possible. > +pub fn do_mount_device(datastore: DataStoreConfig) -> Result<(), Error> { > + if let (Some(uuid), Some(mount_point)) = ( > + datastore.backing_device.as_ref(), > + datastore.get_mount_point(), > + ) { > + if pbs_datastore::is_datastore_available(&datastore) { > + bail!("datastore '{}' is already mounted", datastore.name); > + } > + let tmp_mount_path = format!( > + "{}/{:x}", > + pbs_buildcfg::rundir!("/mount"), > + proxmox_uuid::Uuid::generate() > + ); > + > + let default_options = proxmox_sys::fs::CreateOptions::new(); > + proxmox_sys::fs::create_path( > + &tmp_mount_path, > + Some(default_options.clone()), > + Some(default_options.clone()), > + )?; > + > + debug!("mounting '{uuid}' to '{}'", tmp_mount_path); > + crate::tools::disks::mount_by_uuid(uuid, Path::new(&tmp_mount_path))?; > + > + let full_store_path = format!( > + "{tmp_mount_path}/{}", > + datastore.path.trim_start_matches('/') > + ); > + > + proxmox_sys::fs::create_path( > + &mount_point, > + Some(default_options.clone()), > + Some(default_options.clone()), > + )?; > + > + // can't be created before it is mounted, so we have to do it here > + proxmox_sys::fs::create_path( > + &full_store_path, > + Some(default_options.clone()), > + Some(default_options.clone()), > + )?; shouldn't this create_path call be limited to the initial creation of the datastore? in all other cases it should already be there.. > + > + info!( > + "mounting '{}'({}) to '{}'", > + datastore.name, datastore.path, mount_point > + ); > + if let Err(err) = > + crate::tools::disks::bind_mount(Path::new(&full_store_path), Path::new(&mount_point)) > + { > + debug!("unmounting '{}'", tmp_mount_path); > + let _ = crate::tools::disks::unmount_by_mountpoint(&tmp_mount_path); > + let _ = std::fs::remove_dir(std::path::Path::new(&tmp_mount_path)); > + return Err(format_err!( > + "Datastore '{}' cound not be mounted: {}.", > + datastore.name, > + err > + )); > + } > + > + debug!("unmounting '{}'", tmp_mount_path); > + crate::tools::disks::unmount_by_mountpoint(&tmp_mount_path)?; > + std::fs::remove_dir(std::path::Path::new(&tmp_mount_path))?; > + > + Ok(()) > + } else { > + Err(format_err!( > + "Datastore '{}' cannot be mounted because it is not removable.", > + datastore.name > + )) > + } > +} > + > +#[api( > + protected: true, > + input: { > + properties: { > + store: { > + schema: DATASTORE_SCHEMA, > + }, > + } > + }, > + returns: { > + schema: UPID_SCHEMA, > + }, > + access: { > + permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false), > + }, > +)] > +/// Mount removable datastore. > +pub fn mount(store: String, rpcenv: &mut dyn RpcEnvironment) -> Result { > + let (section_config, _digest) = pbs_config::datastore::config()?; > + let datastore: DataStoreConfig = section_config.lookup("datastore", &store)?; > + > + if datastore.backing_device.is_none() { > + bail!("datastore '{store}' is not removable"); > + } > + > + let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; > + let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; > + > + let upid = WorkerTask::new_thread( > + "mount-device", > + Some(store), > + auth_id.to_string(), > + to_stdout, > + move |_worker| do_mount_device(datastore), > + )?; > + > + Ok(json!(upid)) > +} > + > +fn do_unmount_device( > + datastore: DataStoreConfig, > + worker: Option<&dyn WorkerTaskContext>, > +) -> Result<(), Error> { > + let mut active_operations = task_tracking::get_active_operations(&datastore.name)?; > + let mut old_status = String::new(); > + while active_operations.read + active_operations.write > 0 { > + if let Some(worker) = worker { > + if worker.abort_requested() { > + bail!("aborted, due to user request"); > + } > + let status = format!( > + "cannot unmount yet, still {} read and {} write operations active", > + active_operations.read, active_operations.write > + ); > + if status != old_status { > + info!("{status}"); > + old_status = status; > + } > + } > + std::thread::sleep(std::time::Duration::from_millis(250)); I think once per second is probably enough? on a busy/big server there might be a lot of operations that we are waiting for (and thus a lot of decrements of the counters).. > + active_operations = task_tracking::get_active_operations(&datastore.name)?; > + } > + if let Some(mount_point) = datastore.get_mount_point() { shouldn't this if be right at the start? > + crate::tools::disks::unmount_by_mountpoint(&mount_point)?; > + > + let _lock = pbs_config::datastore::lock_config()?; > + let (mut section_config, _digest) = pbs_config::datastore::config()?; > + let mut store_config: DataStoreConfig = > + section_config.lookup("datastore", &datastore.name)?; should we re-check the current maintenance_mode here to avoid races? > + store_config.maintenance_mode = None; should use set_maintenance_mode > + section_config.set_data(&datastore.name, "datastore", &store_config)?; > + pbs_config::datastore::save_config(§ion_config)?; > + } > + Ok(()) > +} > + > +#[api( > + protected: true, > + input: { > + properties: { > + store: { schema: DATASTORE_SCHEMA }, > + }, > + }, > + returns: { > + schema: UPID_SCHEMA, > + }, > + access: { > + permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true), > + } > +)] > +/// Unmount a removable device that is associated with the datastore > +pub async fn unmount(store: String, rpcenv: &mut dyn RpcEnvironment) -> Result { > + let _lock = pbs_config::datastore::lock_config()?; > + let (mut section_config, _digest) = pbs_config::datastore::config()?; > + let mut datastore: DataStoreConfig = section_config.lookup("datastore", &store)?; > + > + if datastore.backing_device.is_none() { > + bail!("datastore '{store}' is not removable"); > + } > + > + if !is_datastore_available(&datastore) { > + bail!("datastore '{store}' is not mounted"); > + } > + > + datastore.set_maintenance_mode(Some(MaintenanceMode::new(MaintenanceType::Unmount, None)))?; > + section_config.set_data(&store, "datastore", &datastore)?; > + pbs_config::datastore::save_config(§ion_config)?; > + > + drop(_lock); > + > + let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; > + let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; > + > + if let Ok(proxy_pid) = proxmox_rest_server::read_pid(pbs_buildcfg::PROXMOX_BACKUP_PROXY_PID_FN) > + { > + let sock = proxmox_daemon::command_socket::path_from_pid(proxy_pid); > + let _ = proxmox_daemon::command_socket::send_raw( > + sock, > + &format!( > + "{{\"command\":\"update-datastore-cache\",\"args\":\"{}\"}}\n", > + &store > + ), > + ) > + .await; > + } > + > + let upid = WorkerTask::new_thread( > + "unmount-device", > + Some(store), > + auth_id.to_string(), > + to_stdout, > + move |worker| do_unmount_device(datastore, Some(&worker)), > + )?; > + > + Ok(json!(upid)) > +} > + > #[sortable] > const DATASTORE_INFO_SUBDIRS: SubdirMap = &[ > ( > @@ -2422,6 +2643,7 @@ const DATASTORE_INFO_SUBDIRS: SubdirMap = &[ > .get(&API_METHOD_LIST_GROUPS) > .delete(&API_METHOD_DELETE_GROUP), > ), > + ("mount", &Router::new().post(&API_METHOD_MOUNT)), > ( > "namespace", > // FIXME: move into datastore:: sub-module?! > @@ -2456,6 +2678,7 @@ const DATASTORE_INFO_SUBDIRS: SubdirMap = &[ > .delete(&API_METHOD_DELETE_SNAPSHOT), > ), > ("status", &Router::new().get(&API_METHOD_STATUS)), > + ("unmount", &Router::new().post(&API_METHOD_UNMOUNT)), > ( > "upload-backup-log", > &Router::new().upload(&API_METHOD_UPLOAD_BACKUP_LOG), > -- > 2.39.2 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel@lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > > _______________________________________________ pbs-devel mailing list pbs-devel@lists.proxmox.com https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel