From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [212.224.123.68]) by lore.proxmox.com (Postfix) with ESMTPS id 27DAA1FF15C for ; Wed, 13 Nov 2024 16:01:57 +0100 (CET) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id B4DFD18CE5; Wed, 13 Nov 2024 16:01:51 +0100 (CET) From: Hannes Laimer To: pbs-devel@lists.proxmox.com Date: Wed, 13 Nov 2024 16:00:43 +0100 Message-Id: <20241113150102.164820-8-h.laimer@proxmox.com> X-Mailer: git-send-email 2.39.5 In-Reply-To: <20241113150102.164820-1-h.laimer@proxmox.com> References: <20241113150102.164820-1-h.laimer@proxmox.com> MIME-Version: 1.0 X-SPAM-LEVEL: Spam detection results: 0 AWL 0.022 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record Subject: [pbs-devel] [PATCH proxmox-backup v13 07/26] api: admin: add (un)mount endpoint for removable datastores X-BeenThere: pbs-devel@lists.proxmox.com X-Mailman-Version: 2.1.29 Precedence: list List-Id: Proxmox Backup Server development discussion List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: Proxmox Backup Server development discussion Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Errors-To: pbs-devel-bounces@lists.proxmox.com Sender: "pbs-devel" Removable datastores can be mounted unless - they are already - their device is not present For unmounting the maintenance mode is set to `unmount`, which prohibits the starting of any new tasks envolving any IO, this mode is unset either - on completion of the unmount - on abort of the unmount tasks If the unmounting itself should fail, the maintenance mode stays in place and requires manual intervention by unsetting it in the config file directly. This is intentional, as unmounting should not fail, and if it should the situation should be looked at. Signed-off-by: Hannes Laimer --- changes since v12: * allow multiple stores on one device * add best effort attempt to unmount after failed creation src/api2/admin/datastore.rs | 267 ++++++++++++++++++++++++++++++++++-- 1 file changed, 257 insertions(+), 10 deletions(-) diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs index b73ad0ff..a12262e7 100644 --- a/src/api2/admin/datastore.rs +++ b/src/api2/admin/datastore.rs @@ -3,7 +3,7 @@ use std::collections::HashSet; use std::ffi::OsStr; use std::os::unix::ffi::OsStrExt; -use std::path::PathBuf; +use std::path::{Path, PathBuf}; use std::sync::Arc; use anyhow::{bail, format_err, Error}; @@ -13,7 +13,7 @@ use hyper::{header, Body, Response, StatusCode}; use serde::Deserialize; use serde_json::{json, Value}; use tokio_stream::wrappers::ReceiverStream; -use tracing::{info, warn}; +use tracing::{debug, info, warn}; use proxmox_async::blocking::WrappedReaderStream; use proxmox_async::{io::AsyncChannelWriter, stream::AsyncReaderStream}; @@ -29,6 +29,7 @@ use proxmox_sys::fs::{ file_read_firstline, file_read_optional_string, replace_file, CreateOptions, }; use proxmox_time::CalendarEvent; +use proxmox_worker_task::WorkerTaskContext; use pxar::accessor::aio::Accessor; use pxar::EntryKind; @@ -36,12 +37,12 @@ use pxar::EntryKind; use pbs_api_types::{ print_ns_and_snapshot, print_store_and_ns, Authid, BackupContent, BackupNamespace, BackupType, Counts, CryptMode, DataStoreConfig, DataStoreListItem, DataStoreStatus, - GarbageCollectionJobStatus, GroupListItem, JobScheduleStatus, KeepOptions, Operation, - PruneJobOptions, SnapshotListItem, SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, - BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, - DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, - PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, - PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID, UPID_SCHEMA, + GarbageCollectionJobStatus, GroupListItem, JobScheduleStatus, KeepOptions, MaintenanceMode, + MaintenanceType, Operation, PruneJobOptions, SnapshotListItem, SnapshotVerifyState, + BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, + BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, MAX_NAMESPACE_DEPTH, + NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, + PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID, UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA, }; use pbs_client::pxar::{create_tar, create_zip}; @@ -57,8 +58,8 @@ use pbs_datastore::index::IndexFile; use pbs_datastore::manifest::{BackupManifest, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME}; use pbs_datastore::prune::compute_prune_info; use pbs_datastore::{ - check_backup_owner, task_tracking, BackupDir, BackupGroup, DataStore, LocalChunkReader, - StoreProgress, CATALOG_NAME, + check_backup_owner, is_datastore_mounted_at, task_tracking, BackupDir, BackupGroup, DataStore, + LocalChunkReader, StoreProgress, CATALOG_NAME, }; use pbs_tools::json::required_string_param; use proxmox_rest_server::{formatter, WorkerTask}; @@ -2384,6 +2385,250 @@ pub async fn set_backup_owner( .await? } +/// Here we +/// +/// 1. mount the removable device to `/mount/` +/// 2. bind mount `/mount//` to `/mnt/datastore/` +/// 3. unmount `/mount/` +/// +/// leaving us with the datastore being mounted directly with its name under /mnt/datastore/... +/// +/// The reason for the randomized device mounting paths is to avoid two tasks trying to mount to +/// the same path, this is *very* unlikely since the device is only mounted really shortly, but +/// technically possible. +pub fn do_mount_device(datastore: DataStoreConfig) -> Result<(), Error> { + if let (Some(uuid), Some(mount_point)) = ( + datastore.backing_device.as_ref(), + datastore.get_mount_point(), + ) { + if pbs_datastore::is_datastore_mounted_at(mount_point.clone(), uuid.to_string()) { + bail!("device is already mounted at '{}'", mount_point); + } + let tmp_mount_path = format!( + "{}/{:x}", + pbs_buildcfg::rundir!("/mount"), + proxmox_uuid::Uuid::generate() + ); + + let default_options = proxmox_sys::fs::CreateOptions::new(); + proxmox_sys::fs::create_path( + &tmp_mount_path, + Some(default_options.clone()), + Some(default_options.clone()), + )?; + + debug!("mounting '{uuid}' to '{}'", tmp_mount_path); + crate::tools::disks::mount_by_uuid(uuid, Path::new(&tmp_mount_path))?; + + let full_store_path = format!( + "{tmp_mount_path}/{}", + datastore.path.trim_start_matches('/') + ); + let backup_user = pbs_config::backup_user()?; + let options = CreateOptions::new() + .owner(backup_user.uid) + .group(backup_user.gid); + + proxmox_sys::fs::create_path( + &mount_point, + Some(default_options.clone()), + Some(options.clone()), + )?; + + // can't be created before it is mounted, so we have to do it here + proxmox_sys::fs::create_path( + &full_store_path, + Some(default_options.clone()), + Some(options.clone()), + )?; + + info!( + "mounting '{}'({}) to '{}'", + datastore.name, datastore.path, mount_point + ); + if let Err(err) = + crate::tools::disks::bind_mount(Path::new(&full_store_path), Path::new(&mount_point)) + { + debug!("unmounting '{}'", tmp_mount_path); + let _ = crate::tools::disks::unmount_by_mountpoint(Path::new(&tmp_mount_path)); + let _ = std::fs::remove_dir(std::path::Path::new(&tmp_mount_path)); + return Err(format_err!( + "Datastore '{}' cound not be mounted: {}.", + datastore.name, + err + )); + } + + debug!("unmounting '{}'", tmp_mount_path); + crate::tools::disks::unmount_by_mountpoint(Path::new(&tmp_mount_path))?; + std::fs::remove_dir(std::path::Path::new(&tmp_mount_path))?; + + Ok(()) + } else { + Err(format_err!( + "Datastore '{}' cannot be mounted because it is not removable.", + datastore.name + )) + } +} + +#[api( + protected: true, + input: { + properties: { + store: { + schema: DATASTORE_SCHEMA, + }, + } + }, + returns: { + schema: UPID_SCHEMA, + }, + access: { + permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false), + }, +)] +/// Mount removable datastore. +pub fn mount(store: String, rpcenv: &mut dyn RpcEnvironment) -> Result { + let (section_config, _digest) = pbs_config::datastore::config()?; + let datastore: DataStoreConfig = section_config.lookup("datastore", &store)?; + + if datastore.backing_device.is_none() { + bail!("datastore '{store}' is not removable"); + } + + let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; + let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; + + let upid = WorkerTask::new_thread( + "mount-device", + Some(store), + auth_id.to_string(), + to_stdout, + move |_worker| do_mount_device(datastore), + )?; + + Ok(json!(upid)) +} + +fn unset_unmount_maintenance(store: &str) -> Result<(), Error> { + let _lock = pbs_config::datastore::lock_config()?; + let (mut section_config, _digest) = pbs_config::datastore::config()?; + let mut store_config: DataStoreConfig = section_config.lookup("datastore", store)?; + if store_config + .get_maintenance_mode() + .map_or(true, |m| m.ty != MaintenanceType::Unmount) + { + bail!("Maintenance mode should have been 'Unmount'") + } + store_config.maintenance_mode = None; + section_config.set_data(store, "datastore", &store_config)?; + pbs_config::datastore::save_config(§ion_config)?; + Ok(()) +} + +fn do_unmount_device( + datastore: DataStoreConfig, + worker: Option<&dyn WorkerTaskContext>, +) -> Result<(), Error> { + let mut active_operations = task_tracking::get_active_operations(&datastore.name)?; + let mut old_status = String::new(); + while active_operations.read + active_operations.write > 0 { + if let Some(worker) = worker { + if worker.abort_requested() { + unset_unmount_maintenance(&datastore.name)?; + bail!("aborted, due to user request"); + } + let status = format!( + "cannot unmount yet, still {} read and {} write operations active", + active_operations.read, active_operations.write + ); + if status != old_status { + info!("{status}"); + old_status = status; + } + } + std::thread::sleep(std::time::Duration::from_secs(1)); + active_operations = task_tracking::get_active_operations(&datastore.name)?; + } + if let Some(mount_point) = datastore.get_mount_point() { + crate::tools::disks::unmount_by_mountpoint(Path::new(&mount_point))?; + unset_unmount_maintenance(&datastore.name)?; + } + Ok(()) +} + +#[api( + protected: true, + input: { + properties: { + store: { schema: DATASTORE_SCHEMA }, + }, + }, + returns: { + schema: UPID_SCHEMA, + }, + access: { + permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true), + } +)] +/// Unmount a removable device that is associated with the datastore +pub async fn unmount(store: String, rpcenv: &mut dyn RpcEnvironment) -> Result { + let _lock = pbs_config::datastore::lock_config()?; + let (mut section_config, _digest) = pbs_config::datastore::config()?; + let mut datastore: DataStoreConfig = section_config.lookup("datastore", &store)?; + + if datastore.backing_device.is_none() { + bail!("datastore '{store}' is not removable"); + } + + let mount_status = datastore + .get_mount_point() + .zip(datastore.backing_device.as_ref()) + .map(|(mount_point, device_uuid)| { + is_datastore_mounted_at(mount_point, device_uuid.to_string()) + }); + + if mount_status == Some(false) { + bail!("datastore '{store}' is not mounted"); + } + + datastore.set_maintenance_mode(Some(MaintenanceMode { + ty: MaintenanceType::Unmount, + message: None, + }))?; + section_config.set_data(&store, "datastore", &datastore)?; + pbs_config::datastore::save_config(§ion_config)?; + + drop(_lock); + + let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; + let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; + + if let Ok(proxy_pid) = proxmox_rest_server::read_pid(pbs_buildcfg::PROXMOX_BACKUP_PROXY_PID_FN) + { + let sock = proxmox_daemon::command_socket::path_from_pid(proxy_pid); + let _ = proxmox_daemon::command_socket::send_raw( + sock, + &format!( + "{{\"command\":\"update-datastore-cache\",\"args\":\"{}\"}}\n", + &store + ), + ) + .await; + } + + let upid = WorkerTask::new_thread( + "unmount-device", + Some(store), + auth_id.to_string(), + to_stdout, + move |worker| do_unmount_device(datastore, Some(&worker)), + )?; + + Ok(json!(upid)) +} + #[sortable] const DATASTORE_INFO_SUBDIRS: SubdirMap = &[ ( @@ -2422,6 +2667,7 @@ const DATASTORE_INFO_SUBDIRS: SubdirMap = &[ .get(&API_METHOD_LIST_GROUPS) .delete(&API_METHOD_DELETE_GROUP), ), + ("mount", &Router::new().post(&API_METHOD_MOUNT)), ( "namespace", // FIXME: move into datastore:: sub-module?! @@ -2456,6 +2702,7 @@ const DATASTORE_INFO_SUBDIRS: SubdirMap = &[ .delete(&API_METHOD_DELETE_SNAPSHOT), ), ("status", &Router::new().get(&API_METHOD_STATUS)), + ("unmount", &Router::new().post(&API_METHOD_UNMOUNT)), ( "upload-backup-log", &Router::new().upload(&API_METHOD_UPLOAD_BACKUP_LOG), -- 2.39.5 _______________________________________________ pbs-devel mailing list pbs-devel@lists.proxmox.com https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel