From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from firstgate.proxmox.com (firstgate.proxmox.com [212.224.123.68]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits)) (No client certificate requested) by lists.proxmox.com (Postfix) with ESMTPS id A846C94752 for ; Wed, 10 Apr 2024 16:18:04 +0200 (CEST) Received: from firstgate.proxmox.com (localhost [127.0.0.1]) by firstgate.proxmox.com (Proxmox) with ESMTP id 8869D11F17 for ; Wed, 10 Apr 2024 16:17:34 +0200 (CEST) Received: from proxmox-new.maurer-it.com (proxmox-new.maurer-it.com [94.136.29.106]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits)) (No client certificate requested) by firstgate.proxmox.com (Proxmox) with ESMTPS for ; Wed, 10 Apr 2024 16:17:29 +0200 (CEST) Received: from proxmox-new.maurer-it.com (localhost.localdomain [127.0.0.1]) by proxmox-new.maurer-it.com (Proxmox) with ESMTP id 6710A43BB1 for ; Wed, 10 Apr 2024 16:17:29 +0200 (CEST) From: Gabriel Goller To: pbs-devel@lists.proxmox.com Date: Wed, 10 Apr 2024 16:17:16 +0200 Message-ID: <20240410141722.147895-3-g.goller@proxmox.com> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20240410141722.147895-1-g.goller@proxmox.com> References: <20240410141722.147895-1-g.goller@proxmox.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-SPAM-LEVEL: Spam detection results: 0 AWL -0.137 Adjusted score from AWL reputation of From: address BAYES_00 -1.9 Bayes spam probability is 0 to 1% DMARC_MISSING 0.1 Missing DMARC policy KAM_DMARC_STATUS 0.01 Test Rule for DKIM or SPF Failure with Strict Alignment PROLO_LEO1 0.1 Meta Catches all Leo drug variations so far SPF_HELO_NONE 0.001 SPF: HELO does not publish an SPF Record SPF_PASS -0.001 SPF: sender matches SPF record X-Mailman-Approved-At: Wed, 10 Apr 2024 17:55:52 +0200 Subject: [pbs-devel] [PATCH proxmox-backup v3 2/3] removed task_log! macro and moved to tracing in api X-BeenThere: pbs-devel@lists.proxmox.com X-Mailman-Version: 2.1.29 Precedence: list List-Id: Proxmox Backup Server development discussion List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 10 Apr 2024 14:18:04 -0000 Imported `proxmox-log` and substituted all `task_log!` (and task_warn!, task_error!) invocations with tracing calls (info!, warn!, etc..). Removed worker references where it isn't necessary anymore. Signed-off-by: Gabriel Goller --- src/api2/admin/datastore.rs | 26 ++-- src/api2/config/acme.rs | 20 +-- src/api2/config/datastore.rs | 16 +- src/api2/config/prune.rs | 14 +- src/api2/node/apt.rs | 13 +- src/api2/node/certificates.rs | 64 ++++---- src/api2/node/disks/directory.rs | 13 +- src/api2/node/disks/mod.rs | 12 +- src/api2/node/disks/zfs.rs | 30 ++-- src/api2/node/mod.rs | 11 +- src/api2/pull.rs | 33 ++-- src/api2/tape/backup.rs | 72 ++++----- src/api2/tape/drive.rs | 146 +++++++----------- src/api2/tape/restore.rs | 254 ++++++++++--------------------- 14 files changed, 264 insertions(+), 460 deletions(-) diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs index 35628c59..2757da6e 100644 --- a/src/api2/admin/datastore.rs +++ b/src/api2/admin/datastore.rs @@ -13,6 +13,7 @@ use hyper::{header, Body, Response, StatusCode}; use serde::Deserialize; use serde_json::{json, Value}; use tokio_stream::wrappers::ReceiverStream; +use tracing::{info, warn}; use proxmox_async::blocking::WrappedReaderStream; use proxmox_async::{io::AsyncChannelWriter, stream::AsyncReaderStream}; @@ -26,7 +27,6 @@ use proxmox_sortable_macro::sortable; use proxmox_sys::fs::{ file_read_firstline, file_read_optional_string, replace_file, CreateOptions, }; -use proxmox_sys::{task_log, task_warn}; use pxar::accessor::aio::Accessor; use pxar::EntryKind; @@ -908,9 +908,9 @@ pub fn verify( )? }; if !failed_dirs.is_empty() { - task_log!(worker, "Failed to verify the following snapshots/groups:"); + info!("Failed to verify the following snapshots/groups:"); for dir in failed_dirs { - task_log!(worker, "\t{}", dir); + info!("\t{}", dir); } bail!("verification failed - please check the log for details"); } @@ -1030,9 +1030,9 @@ pub fn prune( return Ok(json!(prune_result)); } - let prune_group = move |worker: Arc| { + let prune_group = move |_worker: Arc| { if keep_all { - task_log!(worker, "No prune selection - keeping all files."); + info!("No prune selection - keeping all files."); } else { let mut opts = Vec::new(); if !ns.is_root() { @@ -1040,9 +1040,8 @@ pub fn prune( } crate::server::cli_keep_options(&mut opts, &keep_options); - task_log!(worker, "retention options: {}", opts.join(" ")); - task_log!( - worker, + info!("retention options: {}", opts.join(" ")); + info!( "Starting prune on {} group \"{}\"", print_store_and_ns(&store, &ns), group.group(), @@ -1059,7 +1058,7 @@ pub fn prune( let msg = format!("{}/{}/{timestamp} {mark}", group.ty, group.id); - task_log!(worker, "{msg}"); + info!("{msg}"); prune_result.push(PruneResult { backup_type: group.ty, @@ -1072,8 +1071,7 @@ pub fn prune( if !keep { if let Err(err) = backup_dir.destroy(false) { - task_warn!( - worker, + warn!( "failed to remove dir {:?}: {}", backup_dir.relative_path(), err, @@ -1097,7 +1095,7 @@ pub fn prune( )?; Ok(json!(upid)) } else { - let worker = WorkerTask::new("prune", Some(worker_id), auth_id.to_string(), true)?; + let (worker, _) = WorkerTask::new("prune", Some(worker_id), auth_id.to_string(), true)?; let result = prune_group(worker.clone()); worker.log_result(&Ok(())); Ok(json!(result)) @@ -1160,9 +1158,7 @@ pub fn prune_datastore( Some(worker_id), auth_id.to_string(), to_stdout, - move |worker| { - crate::server::prune_datastore(worker, auth_id, prune_options, datastore, dry_run) - }, + move |_worker| crate::server::prune_datastore(auth_id, prune_options, datastore, dry_run), )?; Ok(upid_str) diff --git a/src/api2/config/acme.rs b/src/api2/config/acme.rs index 4fa276f6..f22ee2ae 100644 --- a/src/api2/config/acme.rs +++ b/src/api2/config/acme.rs @@ -9,12 +9,12 @@ use hex::FromHex; use lazy_static::lazy_static; use serde::{Deserialize, Serialize}; use serde_json::{json, Value}; +use tracing::{info, warn}; use proxmox_router::{ http_bail, list_subdirs_api_method, Permission, Router, RpcEnvironment, SubdirMap, }; use proxmox_schema::{api, param_bail}; -use proxmox_sys::{task_log, task_warn}; use proxmox_acme::account::AccountData as AcmeAccountData; use proxmox_acme::Account; @@ -240,10 +240,10 @@ fn register_account( Some(name.to_string()), auth_id.to_string(), true, - move |worker| async move { + move |_worker| async move { let mut client = AcmeClient::new(directory); - task_log!(worker, "Registering ACME account '{}'...", &name); + info!("Registering ACME account '{}'...", &name); let account = do_register_account( &mut client, @@ -255,11 +255,7 @@ fn register_account( ) .await?; - task_log!( - worker, - "Registration successful, account URL: {}", - account.location - ); + info!("Registration successful, account URL: {}", account.location); Ok(()) }, @@ -354,7 +350,7 @@ pub fn deactivate_account( Some(name.to_string()), auth_id.to_string(), true, - move |worker| async move { + move |_worker| async move { match AcmeClient::load(&name) .await? .update_account(&json!({"status": "deactivated"})) @@ -363,11 +359,9 @@ pub fn deactivate_account( Ok(_account) => (), Err(err) if !force => return Err(err), Err(err) => { - task_warn!( - worker, + warn!( "error deactivating account {}, proceedeing anyway - {}", - name, - err, + name, err, ); } } diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs index 3081e1f4..b070df0e 100644 --- a/src/api2/config/datastore.rs +++ b/src/api2/config/datastore.rs @@ -4,11 +4,11 @@ use ::serde::{Deserialize, Serialize}; use anyhow::Error; use hex::FromHex; use serde_json::Value; +use tracing::warn; use proxmox_router::{http_bail, Permission, Router, RpcEnvironment, RpcEnvironmentType}; use proxmox_schema::{api, param_bail, ApiType}; use proxmox_section_config::SectionConfigData; -use proxmox_sys::{task_warn, WorkerTaskContext}; use proxmox_uuid::Uuid; use pbs_api_types::{ @@ -70,7 +70,6 @@ pub(crate) fn do_create_datastore( _lock: BackupLockGuard, mut config: SectionConfigData, datastore: DataStoreConfig, - worker: Option<&dyn WorkerTaskContext>, ) -> Result<(), Error> { let path: PathBuf = datastore.path.clone().into(); @@ -84,7 +83,6 @@ pub(crate) fn do_create_datastore( path, backup_user.uid, backup_user.gid, - worker, tuning.sync_level.unwrap_or_default(), )?; @@ -155,11 +153,11 @@ pub fn create_datastore( Some(config.name.to_string()), auth_id.to_string(), to_stdout, - move |worker| { - do_create_datastore(lock, section_config, config, Some(&worker))?; + move |_worker| { + do_create_datastore(lock, section_config, config)?; if let Some(prune_job_config) = prune_job_config { - do_create_prune_job(prune_job_config, Some(&worker)) + do_create_prune_job(prune_job_config) } else { Ok(()) } @@ -512,8 +510,8 @@ pub async fn delete_datastore( Some(name.clone()), auth_id.to_string(), to_stdout, - move |worker| { - pbs_datastore::DataStore::destroy(&name, destroy_data, &worker)?; + move |_worker| { + pbs_datastore::DataStore::destroy(&name, destroy_data)?; // ignore errors let _ = jobstate::remove_state_file("prune", &name); @@ -522,7 +520,7 @@ pub async fn delete_datastore( if let Err(err) = proxmox_async::runtime::block_on(crate::server::notify_datastore_removed()) { - task_warn!(worker, "failed to notify after datastore removal: {err}"); + warn!("failed to notify after datastore removal: {err}"); } Ok(()) diff --git a/src/api2/config/prune.rs b/src/api2/config/prune.rs index 4f7ce39c..4f3d94af 100644 --- a/src/api2/config/prune.rs +++ b/src/api2/config/prune.rs @@ -1,7 +1,5 @@ use anyhow::Error; use hex::FromHex; -use proxmox_sys::task_log; -use proxmox_sys::WorkerTaskContext; use serde::{Deserialize, Serialize}; use serde_json::Value; @@ -15,6 +13,7 @@ use pbs_api_types::{ use pbs_config::prune; use pbs_config::CachedUserInfo; +use tracing::info; #[api( input: { @@ -58,10 +57,7 @@ pub fn list_prune_jobs( Ok(list) } -pub fn do_create_prune_job( - config: PruneJobConfig, - worker: Option<&dyn WorkerTaskContext>, -) -> Result<(), Error> { +pub fn do_create_prune_job(config: PruneJobConfig) -> Result<(), Error> { let _lock = prune::lock_config()?; let (mut section_config, _digest) = prune::config()?; @@ -76,9 +72,7 @@ pub fn do_create_prune_job( crate::server::jobstate::create_state_file("prunejob", &config.id)?; - if let Some(worker) = worker { - task_log!(worker, "Prune job created: {}", config.id); - } + info!("Prune job created: {}", config.id); Ok(()) } @@ -108,7 +102,7 @@ pub fn create_prune_job( user_info.check_privs(&auth_id, &config.acl_path(), PRIV_DATASTORE_MODIFY, true)?; - do_create_prune_job(config, None) + do_create_prune_job(config) } #[api( diff --git a/src/api2/node/apt.rs b/src/api2/node/apt.rs index 9c0d9ecc..38af0452 100644 --- a/src/api2/node/apt.rs +++ b/src/api2/node/apt.rs @@ -1,6 +1,7 @@ use anyhow::{bail, format_err, Error}; use serde_json::{json, Value}; use std::os::unix::prelude::OsStrExt; +use tracing::{info, warn}; use proxmox_router::{ list_subdirs_api_method, Permission, Router, RpcEnvironment, RpcEnvironmentType, SubdirMap, @@ -83,9 +84,9 @@ fn read_and_update_proxy_config() -> Result, Error> { Ok(proxy_config) } -fn do_apt_update(worker: &WorkerTask, quiet: bool) -> Result<(), Error> { +fn do_apt_update(quiet: bool) -> Result<(), Error> { if !quiet { - worker.log_message("starting apt-get update") + info!("starting apt-get update"); } read_and_update_proxy_config()?; @@ -99,7 +100,7 @@ fn do_apt_update(worker: &WorkerTask, quiet: bool) -> Result<(), Error> { .map_err(|err| format_err!("failed to execute {:?} - {}", command, err))?; if !quiet { - worker.log_message(String::from_utf8(output.stdout)?); + info!("{}", String::from_utf8(output.stdout)?); } // TODO: improve run_command to allow outputting both, stderr and stdout @@ -114,7 +115,7 @@ fn do_apt_update(worker: &WorkerTask, quiet: bool) -> Result<(), Error> { } }) .unwrap_or_else(|_| String::from("non utf8 error message (suppressed)")); - worker.log_warning(msg); + warn!(msg); } else { bail!("terminated by signal"); } @@ -160,8 +161,8 @@ pub fn apt_update_database( let auth_id = rpcenv.get_auth_id().unwrap(); let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; - let upid_str = WorkerTask::new_thread("aptupdate", None, auth_id, to_stdout, move |worker| { - do_apt_update(&worker, quiet)?; + let upid_str = WorkerTask::new_thread("aptupdate", None, auth_id, to_stdout, move |_worker| { + do_apt_update(quiet)?; let mut cache = apt::update_cache()?; diff --git a/src/api2/node/certificates.rs b/src/api2/node/certificates.rs index 6dfcd0c0..e4fbc8ac 100644 --- a/src/api2/node/certificates.rs +++ b/src/api2/node/certificates.rs @@ -5,16 +5,17 @@ use anyhow::{bail, format_err, Error}; use openssl::pkey::PKey; use openssl::x509::X509; use serde::{Deserialize, Serialize}; +use tracing::info; use proxmox_router::list_subdirs_api_method; use proxmox_router::SubdirMap; use proxmox_router::{Permission, Router, RpcEnvironment}; use proxmox_schema::api; -use proxmox_sys::{task_log, task_warn}; use pbs_api_types::{NODE_SCHEMA, PRIV_SYS_MODIFY}; use pbs_buildcfg::configdir; use pbs_tools::cert; +use tracing::warn; use crate::acme::AcmeClient; use crate::api2::types::AcmeDomain; @@ -301,10 +302,7 @@ async fn order_certificate( }; if domains.is_empty() { - task_log!( - worker, - "No domains configured to be ordered from an ACME server." - ); + info!("No domains configured to be ordered from an ACME server."); return Ok(None); } @@ -312,11 +310,11 @@ async fn order_certificate( let mut acme = node_config.acme_client().await?; - task_log!(worker, "Placing ACME order"); + info!("Placing ACME order"); let order = acme .new_order(domains.iter().map(|d| d.domain.to_ascii_lowercase())) .await?; - task_log!(worker, "Order URL: {}", order.location); + info!("Order URL: {}", order.location); let identifiers: Vec = order .data @@ -328,7 +326,7 @@ async fn order_certificate( .collect(); for auth_url in &order.data.authorizations { - task_log!(worker, "Getting authorization details from '{}'", auth_url); + info!("Getting authorization details from '{}'", auth_url); let mut auth = acme.get_authorization(auth_url).await?; let domain = match &mut auth.identifier { @@ -336,11 +334,11 @@ async fn order_certificate( }; if auth.status == Status::Valid { - task_log!(worker, "{} is already validated!", domain); + info!("{} is already validated!", domain); continue; } - task_log!(worker, "The validation for {} is pending", domain); + info!("The validation for {} is pending", domain); let domain_config: &AcmeDomain = get_domain_config(&domain)?; let plugin_id = domain_config.plugin.as_deref().unwrap_or("standalone"); let mut plugin_cfg = @@ -348,31 +346,28 @@ async fn order_certificate( format_err!("plugin '{}' for domain '{}' not found!", plugin_id, domain) })?; - task_log!(worker, "Setting up validation plugin"); + info!("Setting up validation plugin"); let validation_url = plugin_cfg .setup(&mut acme, &auth, domain_config, Arc::clone(&worker)) .await?; - let result = request_validation(&worker, &mut acme, auth_url, validation_url).await; + let result = request_validation(&mut acme, auth_url, validation_url).await; if let Err(err) = plugin_cfg .teardown(&mut acme, &auth, domain_config, Arc::clone(&worker)) .await { - task_warn!( - worker, + warn!( "Failed to teardown plugin '{}' for domain '{}' - {}", - plugin_id, - domain, - err + plugin_id, domain, err ); } result?; } - task_log!(worker, "All domains validated"); - task_log!(worker, "Creating CSR"); + info!("All domains validated"); + info!("Creating CSR"); let csr = proxmox_acme::util::Csr::generate(&identifiers, &Default::default())?; let mut finalize_error_cnt = 0u8; @@ -385,7 +380,7 @@ async fn order_certificate( match order.status { Status::Pending => { - task_log!(worker, "still pending, trying to finalize anyway"); + info!("still pending, trying to finalize anyway"); let finalize = order .finalize .as_deref() @@ -400,7 +395,7 @@ async fn order_certificate( tokio::time::sleep(Duration::from_secs(5)).await; } Status::Ready => { - task_log!(worker, "order is ready, finalizing"); + info!("order is ready, finalizing"); let finalize = order .finalize .as_deref() @@ -409,18 +404,18 @@ async fn order_certificate( tokio::time::sleep(Duration::from_secs(5)).await; } Status::Processing => { - task_log!(worker, "still processing, trying again in 30 seconds"); + info!("still processing, trying again in 30 seconds"); tokio::time::sleep(Duration::from_secs(30)).await; } Status::Valid => { - task_log!(worker, "valid"); + info!("valid"); break; } other => bail!("order status: {:?}", other), } } - task_log!(worker, "Downloading certificate"); + info!("Downloading certificate"); let certificate = acme .get_certificate( order @@ -437,15 +432,14 @@ async fn order_certificate( } async fn request_validation( - worker: &WorkerTask, acme: &mut AcmeClient, auth_url: &str, validation_url: &str, ) -> Result<(), Error> { - task_log!(worker, "Triggering validation"); + info!("Triggering validation"); acme.request_challenge_validation(validation_url).await?; - task_log!(worker, "Sleeping for 5 seconds"); + info!("Sleeping for 5 seconds"); tokio::time::sleep(Duration::from_secs(5)).await; loop { @@ -454,10 +448,7 @@ async fn request_validation( let auth = acme.get_authorization(auth_url).await?; match auth.status { Status::Pending => { - task_log!( - worker, - "Status is still 'pending', trying again in 10 seconds" - ); + info!("Status is still 'pending', trying again in 10 seconds"); tokio::time::sleep(Duration::from_secs(10)).await; } Status::Valid => return Ok(()), @@ -582,15 +573,12 @@ pub fn revoke_acme_cert(rpcenv: &mut dyn RpcEnvironment) -> Result Result task_log!(worker, "{output}"), + Ok(output) => info!("{output}"), Err(err) => { - task_error!(worker, "{err}"); + error!("{err}"); bail!("Error during 'zpool create', see task log for more details"); } }; @@ -299,11 +296,11 @@ pub fn create_zpool( command.arg(&format!("compression={}", compression)); } command.args(["relatime=on", &name]); - task_log!(worker, "# {:?}", command); + info!("# {:?}", command); match proxmox_sys::command::run_command(command, None) { - Ok(output) => task_log!(worker, "{output}"), + Ok(output) => info!("{output}"), Err(err) => { - task_error!(worker, "{err}"); + error!("{err}"); bail!("Error during 'zfs set', see task log for more details"); } }; @@ -319,12 +316,7 @@ pub fn create_zpool( bail!("datastore '{}' already exists.", datastore.name); } - crate::api2::config::datastore::do_create_datastore( - lock, - config, - datastore, - Some(&worker), - )?; + crate::api2::config::datastore::do_create_datastore(lock, config, datastore)?; } Ok(()) diff --git a/src/api2/node/mod.rs b/src/api2/node/mod.rs index 931143fd..7055a683 100644 --- a/src/api2/node/mod.rs +++ b/src/api2/node/mod.rs @@ -25,6 +25,7 @@ use proxmox_sortable_macro::sortable; use proxmox_sys::fd::fd_change_cloexec; use pbs_api_types::{NODE_SCHEMA, PRIV_SYS_CONSOLE}; +use tracing::{info, warn}; use crate::auth::{private_auth_keyring, public_auth_keyring}; use crate::tools; @@ -181,20 +182,18 @@ async fn termproxy(cmd: Option, rpcenv: &mut dyn RpcEnvironment) -> Resu let stdout = child.stdout.take().expect("no child stdout handle"); let stderr = child.stderr.take().expect("no child stderr handle"); - let worker_stdout = worker.clone(); let stdout_fut = async move { let mut reader = BufReader::new(stdout).lines(); while let Some(line) = reader.next_line().await? { - worker_stdout.log_message(line); + info!(line); } Ok::<(), Error>(()) }; - let worker_stderr = worker.clone(); let stderr_fut = async move { let mut reader = BufReader::new(stderr).lines(); while let Some(line) = reader.next_line().await? { - worker_stderr.log_warning(line); + warn!(line); } Ok::<(), Error>(()) }; @@ -226,9 +225,9 @@ async fn termproxy(cmd: Option, rpcenv: &mut dyn RpcEnvironment) -> Resu } if let Err(err) = child.kill().await { - worker.log_warning(format!("error killing termproxy: {}", err)); + warn!("error killing termproxy: {}", err); } else if let Err(err) = child.wait().await { - worker.log_warning(format!("error awaiting termproxy: {}", err)); + warn!("error awaiting termproxy: {}", err); } } diff --git a/src/api2/pull.rs b/src/api2/pull.rs index 59db3660..06b752e9 100644 --- a/src/api2/pull.rs +++ b/src/api2/pull.rs @@ -1,10 +1,10 @@ //! Sync datastore from remote server use anyhow::{bail, format_err, Error}; use futures::{future::FutureExt, select}; +use tracing::info; use proxmox_router::{Permission, Router, RpcEnvironment}; use proxmox_schema::api; -use proxmox_sys::task_log; use pbs_api_types::{ Authid, BackupNamespace, GroupFilter, RateLimitConfig, SyncJobConfig, DATASTORE_SCHEMA, @@ -130,12 +130,12 @@ pub fn do_sync_job( let worker_future = async move { let pull_params = PullParameters::try_from(&sync_job)?; - task_log!(worker, "Starting datastore sync job '{}'", job_id); + info!("Starting datastore sync job '{}'", job_id); if let Some(event_str) = schedule { - task_log!(worker, "task triggered by schedule '{}'", event_str); + info!("task triggered by schedule '{}'", event_str); } - task_log!( - worker, + + info!( "sync datastore '{}' from '{}{}'", sync_job.store, sync_job @@ -145,33 +145,29 @@ pub fn do_sync_job( sync_job.remote_store, ); - let pull_stats = pull_store(&worker, pull_params).await?; + let pull_stats = pull_store(pull_params).await?; if pull_stats.bytes != 0 { let amount = HumanByte::from(pull_stats.bytes); let rate = HumanByte::new_binary( pull_stats.bytes as f64 / pull_stats.elapsed.as_secs_f64(), ); - task_log!( - worker, + info!( "Summary: sync job pulled {amount} in {} chunks (average rate: {rate}/s)", pull_stats.chunk_count, ); } else { - task_log!(worker, "Summary: sync job found no new data to pull"); + info!("Summary: sync job found no new data to pull"); } if let Some(removed) = pull_stats.removed { - task_log!( - worker, + info!( "Summary: removed vanished: snapshots: {}, groups: {}, namespaces: {}", - removed.snapshots, - removed.groups, - removed.namespaces, + removed.snapshots, removed.groups, removed.namespaces, ); } - task_log!(worker, "sync job '{}' end", &job_id); + info!("sync job '{}' end", &job_id); Ok(()) }; @@ -321,21 +317,20 @@ async fn pull( auth_id.to_string(), true, move |worker| async move { - task_log!( - worker, + info!( "pull datastore '{}' from '{}/{}'", store, remote.as_deref().unwrap_or("-"), remote_store, ); - let pull_future = pull_store(&worker, pull_params); + let pull_future = pull_store(pull_params); (select! { success = pull_future.fuse() => success, abort = worker.abort_future().map(|_| Err(format_err!("pull aborted"))) => abort, })?; - task_log!(worker, "pull datastore '{}' end", store); + info!("pull datastore '{}' end", store); Ok(()) }, diff --git a/src/api2/tape/backup.rs b/src/api2/tape/backup.rs index 28d7e720..fae6ee7c 100644 --- a/src/api2/tape/backup.rs +++ b/src/api2/tape/backup.rs @@ -2,11 +2,12 @@ use std::sync::{Arc, Mutex}; use anyhow::{bail, format_err, Error}; use serde_json::Value; +use tracing::{info, warn}; use proxmox_lang::try_block; use proxmox_router::{Permission, Router, RpcEnvironment, RpcEnvironmentType}; use proxmox_schema::api; -use proxmox_sys::{task_log, task_warn, WorkerTaskContext}; +use proxmox_sys::WorkerTaskContext; use pbs_api_types::{ print_ns_and_snapshot, print_store_and_ns, Authid, MediaPoolConfig, Operation, @@ -181,7 +182,7 @@ pub fn do_tape_backup_job( let job_result = try_block!({ if schedule.is_some() { // for scheduled tape backup jobs, we wait indefinitely for the lock - task_log!(worker, "waiting for drive lock..."); + info!("waiting for drive lock..."); loop { worker.check_abort()?; match lock_tape_device(&drive_config, &setup.drive) { @@ -196,9 +197,9 @@ pub fn do_tape_backup_job( } set_tape_device_state(&setup.drive, &worker.upid().to_string())?; - task_log!(worker, "Starting tape backup job '{}'", job_id); + info!("Starting tape backup job '{}'", job_id); if let Some(event_str) = schedule { - task_log!(worker, "task triggered by schedule '{}'", event_str); + info!("task triggered by schedule '{}'", event_str); } backup_worker( @@ -392,7 +393,7 @@ fn backup_worker( ) -> Result<(), Error> { let start = std::time::Instant::now(); - task_log!(worker, "update media online status"); + info!("update media online status"); let changer_name = update_media_online_status(&setup.drive)?; let root_namespace = setup.ns.clone().unwrap_or_default(); @@ -400,8 +401,7 @@ fn backup_worker( let pool = MediaPool::with_config(TAPE_STATUS_DIR, pool_config, changer_name, false)?; - let mut pool_writer = - PoolWriter::new(pool, &setup.drive, worker, email, force_media_set, ns_magic)?; + let mut pool_writer = PoolWriter::new(pool, &setup.drive, email, force_media_set, ns_magic)?; let mut group_list = Vec::new(); let namespaces = datastore.recursive_iter_backup_ns_ok(root_namespace, setup.max_depth)?; @@ -421,8 +421,7 @@ fn backup_worker( None => group_list, }; - task_log!( - worker, + info!( "found {} groups (out of {} total)", group_list.len(), group_count_full @@ -433,10 +432,7 @@ fn backup_worker( let latest_only = setup.latest_only.unwrap_or(false); if latest_only { - task_log!( - worker, - "latest-only: true (only considering latest snapshots)" - ); + info!("latest-only: true (only considering latest snapshots)"); } let datastore_name = datastore.name(); @@ -459,8 +455,7 @@ fn backup_worker( .collect(); if snapshot_list.is_empty() { - task_log!( - worker, + info!( "{}, group {} was empty", print_store_and_ns(datastore_name, group.backup_ns()), group.group() @@ -480,7 +475,7 @@ fn backup_worker( info.backup_dir.backup_ns(), info.backup_dir.as_ref(), ) { - task_log!(worker, "skip snapshot {}", rel_path); + info!("skip snapshot {}", rel_path); continue; } @@ -493,7 +488,7 @@ fn backup_worker( SnapshotBackupResult::Ignored => {} } progress.done_snapshots = 1; - task_log!(worker, "percentage done: {}", progress); + info!("percentage done: {}", progress); } } else { progress.group_snapshots = snapshot_list.len() as u64; @@ -506,7 +501,7 @@ fn backup_worker( info.backup_dir.backup_ns(), info.backup_dir.as_ref(), ) { - task_log!(worker, "skip snapshot {}", rel_path); + info!("skip snapshot {}", rel_path); continue; } @@ -519,7 +514,7 @@ fn backup_worker( SnapshotBackupResult::Ignored => {} } progress.done_snapshots = snapshot_number as u64 + 1; - task_log!(worker, "percentage done: {}", progress); + info!("percentage done: {}", progress); } } } @@ -527,18 +522,15 @@ fn backup_worker( pool_writer.commit()?; if need_catalog { - task_log!(worker, "append media catalog"); + info!("append media catalog"); let uuid = pool_writer.load_writable_media(worker)?; - let done = pool_writer.append_catalog_archive(worker)?; + let done = pool_writer.append_catalog_archive()?; if !done { - task_log!( - worker, - "catalog does not fit on tape, writing to next volume" - ); + info!("catalog does not fit on tape, writing to next volume"); pool_writer.set_media_status_full(&uuid)?; pool_writer.load_writable_media(worker)?; - let done = pool_writer.append_catalog_archive(worker)?; + let done = pool_writer.append_catalog_archive()?; if !done { bail!("write_catalog_archive failed on second media"); } @@ -546,9 +538,9 @@ fn backup_worker( } if setup.export_media_set.unwrap_or(false) { - pool_writer.export_media_set(worker)?; + pool_writer.export_media_set()?; } else if setup.eject_media.unwrap_or(false) { - pool_writer.eject_media(worker)?; + pool_writer.eject_media()?; } if errors { @@ -558,7 +550,7 @@ fn backup_worker( summary.used_tapes = match pool_writer.get_used_media_labels() { Ok(tapes) => Some(tapes), Err(err) => { - task_warn!(worker, "could not collect list of used tapes: {err}"); + warn!("could not collect list of used tapes: {err}"); None } }; @@ -592,7 +584,7 @@ fn backup_snapshot( snapshot: BackupDir, ) -> Result { let snapshot_path = snapshot.relative_path(); - task_log!(worker, "backup snapshot {:?}", snapshot_path); + info!("backup snapshot {:?}", snapshot_path); let snapshot_reader = match snapshot.locked_reader() { Ok(reader) => reader, @@ -600,15 +592,10 @@ fn backup_snapshot( if !snapshot.full_path().exists() { // we got an error and the dir does not exist, // it probably just vanished, so continue - task_log!(worker, "snapshot {:?} vanished, skipping", snapshot_path); + info!("snapshot {:?} vanished, skipping", snapshot_path); return Ok(SnapshotBackupResult::Ignored); } - task_warn!( - worker, - "failed opening snapshot {:?}: {}", - snapshot_path, - err - ); + warn!("failed opening snapshot {:?}: {}", snapshot_path, err); return Ok(SnapshotBackupResult::Error); } }; @@ -654,7 +641,7 @@ fn backup_snapshot( let snapshot_reader = snapshot_reader.lock().unwrap(); - let (done, _bytes) = pool_writer.append_snapshot_archive(worker, &snapshot_reader)?; + let (done, _bytes) = pool_writer.append_snapshot_archive(&snapshot_reader)?; if !done { // does not fit on tape, so we try on next volume @@ -663,19 +650,14 @@ fn backup_snapshot( worker.check_abort()?; pool_writer.load_writable_media(worker)?; - let (done, _bytes) = pool_writer.append_snapshot_archive(worker, &snapshot_reader)?; + let (done, _bytes) = pool_writer.append_snapshot_archive(&snapshot_reader)?; if !done { bail!("write_snapshot_archive failed on second media"); } } - task_log!( - worker, - "end backup {}:{:?}", - datastore.name(), - snapshot_path - ); + info!("end backup {}:{:?}", datastore.name(), snapshot_path); Ok(SnapshotBackupResult::Success) } diff --git a/src/api2/tape/drive.rs b/src/api2/tape/drive.rs index 5a5d39d9..1bd19383 100644 --- a/src/api2/tape/drive.rs +++ b/src/api2/tape/drive.rs @@ -4,6 +4,7 @@ use std::sync::Arc; use anyhow::{bail, format_err, Error}; use serde_json::Value; +use tracing::{info, warn}; use proxmox_router::{ list_subdirs_api_method, Permission, Router, RpcEnvironment, RpcEnvironmentType, SubdirMap, @@ -11,7 +12,6 @@ use proxmox_router::{ use proxmox_schema::api; use proxmox_section_config::SectionConfigData; use proxmox_sortable_macro::sortable; -use proxmox_sys::{task_log, task_warn}; use proxmox_uuid::Uuid; use pbs_api_types::{ @@ -130,13 +130,8 @@ pub fn load_media( drive.clone(), "load-media", Some(job_id), - move |worker, config| { - task_log!( - worker, - "loading media '{}' into drive '{}'", - label_text, - drive - ); + move |_worker, config| { + info!("loading media '{}' into drive '{}'", label_text, drive); let (mut changer, _) = required_media_changer(&config, &drive)?; changer.load_media(&label_text)?; Ok(()) @@ -249,8 +244,8 @@ pub fn unload( drive.clone(), "unload-media", Some(drive.clone()), - move |worker, config| { - task_log!(worker, "unloading media from drive '{}'", drive); + move |_worker, config| { + info!("unloading media from drive '{}'", drive); let (mut changer, _) = required_media_changer(&config, &drive)?; changer.unload_media(target_slot)?; @@ -298,9 +293,9 @@ pub fn format_media( drive.clone(), "format-media", Some(drive.clone()), - move |worker, config| { + move |_worker, config| { if let Some(ref label) = label_text { - task_log!(worker, "try to load media '{}'", label); + info!("try to load media '{}'", label); if let Some((mut changer, _)) = media_changer(&config, &drive)? { changer.load_media(label)?; } @@ -314,11 +309,8 @@ pub fn format_media( let mut handle = LtoTapeHandle::new(file)?; if let Ok(status) = handle.get_drive_and_media_status() { if status.density >= TapeDensity::LTO9 { - task_log!(worker, "Slow formatting LTO9+ media."); - task_log!( - worker, - "This can take a very long time due to media optimization." - ); + info!("Slow formatting LTO9+ media."); + info!("This can take a very long time due to media optimization."); } } } @@ -329,15 +321,15 @@ pub fn format_media( bail!("expected label '{}', found unrelated data", label); } /* assume drive contains no or unrelated data */ - task_log!(worker, "unable to read media label: {}", err); - task_log!(worker, "format anyways"); + info!("unable to read media label: {}", err); + info!("format anyways"); handle.format_media(fast.unwrap_or(true))?; } Ok((None, _)) => { if let Some(label) = label_text { bail!("expected label '{}', found empty tape", label); } - task_log!(worker, "found empty media - format anyways"); + info!("found empty media - format anyways"); handle.format_media(fast.unwrap_or(true))?; } Ok((Some(media_id), _key_config)) => { @@ -351,11 +343,9 @@ pub fn format_media( } } - task_log!( - worker, + info!( "found media '{}' with uuid '{}'", - media_id.label.label_text, - media_id.label.uuid, + media_id.label.label_text, media_id.label.uuid, ); let mut inventory = Inventory::new(TAPE_STATUS_DIR); @@ -503,7 +493,7 @@ pub fn label_media( drive.clone(), "label-media", Some(drive.clone()), - move |worker, config| { + move |_worker, config| { let mut drive = open_drive(&config, &drive)?; drive.rewind()?; @@ -525,7 +515,7 @@ pub fn label_media( pool: pool.clone(), }; - write_media_label(worker, &mut drive, label, pool) + write_media_label(&mut drive, label, pool) }, )?; @@ -533,7 +523,6 @@ pub fn label_media( } fn write_media_label( - worker: Arc, drive: &mut Box, label: MediaLabel, pool: Option, @@ -548,18 +537,9 @@ fn write_media_label( } drive.label_tape(&label)?; if let Some(ref pool) = pool { - task_log!( - worker, - "Label media '{}' for pool '{}'", - label.label_text, - pool - ); + info!("Label media '{}' for pool '{}'", label.label_text, pool); } else { - task_log!( - worker, - "Label media '{}' (no pool assignment)", - label.label_text - ); + info!("Label media '{}' (no pool assignment)", label.label_text); } let media_id = MediaId { @@ -746,10 +726,10 @@ pub fn clean_drive(drive: String, rpcenv: &mut dyn RpcEnvironment) -> Result Result Result { - task_warn!(worker, "error getting media by unique label: {err}"); + warn!("error getting media by unique label: {err}"); // we can't be sure which uuid it is continue; } @@ -947,38 +927,29 @@ pub fn update_inventory( } if let Err(err) = changer.load_media(&label_text) { - task_warn!(worker, "unable to load media '{}' - {}", label_text, err); + warn!("unable to load media '{}' - {}", label_text, err); continue; } let mut drive = open_drive(&config, &drive)?; match drive.read_label() { Err(err) => { - task_warn!( - worker, - "unable to read label form media '{}' - {}", - label_text, - err - ); + warn!("unable to read label form media '{}' - {}", label_text, err); } Ok((None, _)) => { - task_log!(worker, "media '{}' is empty", label_text); + info!("media '{}' is empty", label_text); } Ok((Some(media_id), _key_config)) => { if label_text != media_id.label.label_text { - task_warn!( - worker, + warn!( "label text mismatch ({} != {})", - label_text, - media_id.label.label_text + label_text, media_id.label.label_text ); continue; } - task_log!( - worker, + info!( "inventorize media '{}' with uuid '{}'", - label_text, - media_id.label.uuid + label_text, media_id.label.uuid ); let _pool_lock = if let Some(pool) = media_id.pool() { @@ -999,15 +970,11 @@ pub fn update_inventory( if catalog { let media_set = inventory.compute_media_set_members(&set.uuid)?; if let Err(err) = fast_catalog_restore( - &worker, &mut drive, &media_set, &media_id.label.uuid, ) { - task_warn!( - worker, - "could not restore catalog for {label_text}: {err}" - ); + warn!("could not restore catalog for {label_text}: {err}"); } } } else { @@ -1063,14 +1030,13 @@ pub fn barcode_label_media( drive.clone(), "barcode-label-media", Some(drive.clone()), - move |worker, config| barcode_label_media_worker(worker, drive, &config, pool), + move |_worker, config| barcode_label_media_worker(drive, &config, pool), )?; Ok(upid_str.into()) } fn barcode_label_media_worker( - worker: Arc, drive: String, drive_config: &SectionConfigData, pool: Option, @@ -1103,24 +1069,23 @@ fn barcode_label_media_worker( inventory.reload()?; match inventory.find_media_by_label_text(&label_text) { Ok(Some(_)) => { - task_log!( - worker, + info!( "media '{}' already inventoried (already labeled)", label_text ); continue; } Err(err) => { - task_warn!(worker, "error getting media by unique label: {err}",); + warn!("error getting media by unique label: {err}",); continue; } Ok(None) => {} // ok to label } - task_log!(worker, "checking/loading media '{}'", label_text); + info!("checking/loading media '{}'", label_text); if let Err(err) = changer.load_media(&label_text) { - task_warn!(worker, "unable to load media '{}' - {}", label_text, err); + warn!("unable to load media '{}' - {}", label_text, err); continue; } @@ -1129,18 +1094,13 @@ fn barcode_label_media_worker( match drive.read_next_file() { Ok(_reader) => { - task_log!( - worker, - "media '{}' is not empty (format it first)", - label_text - ); + info!("media '{}' is not empty (format it first)", label_text); continue; } Err(BlockReadError::EndOfFile) => { /* EOF mark at BOT, assume tape is empty */ } Err(BlockReadError::EndOfStream) => { /* tape is empty */ } Err(_err) => { - task_warn!( - worker, + warn!( "media '{}' read error (maybe not empty - format it first)", label_text ); @@ -1156,7 +1116,7 @@ fn barcode_label_media_worker( pool: pool.clone(), }; - write_media_label(worker.clone(), &mut drive, label, pool.clone())? + write_media_label(&mut drive, label, pool.clone())? } Ok(()) @@ -1315,14 +1275,12 @@ pub fn catalog_media( let media_id = match drive.read_label()? { (Some(media_id), key_config) => { - task_log!( - worker, + info!( "found media label: {}", serde_json::to_string_pretty(&serde_json::to_value(&media_id)?)? ); if key_config.is_some() { - task_log!( - worker, + info!( "encryption key config: {}", serde_json::to_string_pretty(&serde_json::to_value(&key_config)?)? ); @@ -1336,7 +1294,7 @@ pub fn catalog_media( let (_media_set_lock, media_set_uuid) = match media_id.media_set_label { None => { - task_log!(worker, "media is empty"); + info!("media is empty"); let _pool_lock = if let Some(pool) = media_id.pool() { lock_media_pool(TAPE_STATUS_DIR, &pool)? } else { @@ -1349,7 +1307,7 @@ pub fn catalog_media( Some(ref set) => { if set.unassigned() { // media is empty - task_log!(worker, "media is empty"); + info!("media is empty"); let _lock = lock_unassigned_media_pool(TAPE_STATUS_DIR)?; MediaCatalog::destroy(TAPE_STATUS_DIR, &media_id.label.uuid)?; inventory.store(media_id.clone(), false)?; @@ -1374,14 +1332,14 @@ pub fn catalog_media( if !scan { let media_set = inventory.compute_media_set_members(media_set_uuid)?; - if fast_catalog_restore(&worker, &mut drive, &media_set, &media_id.label.uuid)? { + if fast_catalog_restore(&mut drive, &media_set, &media_id.label.uuid)? { return Ok(()); } - task_log!(worker, "no catalog found"); + info!("no catalog found"); } - task_log!(worker, "scanning entire media to reconstruct catalog"); + info!("scanning entire media to reconstruct catalog"); drive.rewind()?; drive.read_label()?; // skip over labels - we already read them above diff --git a/src/api2/tape/restore.rs b/src/api2/tape/restore.rs index 8273c867..14a164e0 100644 --- a/src/api2/tape/restore.rs +++ b/src/api2/tape/restore.rs @@ -6,6 +6,7 @@ use std::sync::Arc; use anyhow::{bail, format_err, Error}; use serde_json::Value; +use tracing::{info, warn}; use proxmox_human_byte::HumanByte; use proxmox_io::ReadExt; @@ -13,7 +14,7 @@ use proxmox_router::{Permission, Router, RpcEnvironment, RpcEnvironmentType}; use proxmox_schema::{api, ApiType}; use proxmox_section_config::SectionConfigData; use proxmox_sys::fs::{replace_file, CreateOptions}; -use proxmox_sys::{task_log, task_warn, WorkerTaskContext}; +use proxmox_sys::WorkerTaskContext; use proxmox_uuid::Uuid; use pbs_api_types::{ @@ -399,12 +400,12 @@ pub fn restore( .and_then(lookup_user_email) .or_else(|| lookup_user_email(&auth_id.clone().into())); - task_log!(worker, "Mediaset '{media_set}'"); - task_log!(worker, "Pool: {pool}"); + info!("Mediaset '{media_set}'"); + info!("Pool: {pool}"); let res = if snapshots.is_some() || namespaces { restore_list_worker( - worker.clone(), + worker, snapshots.unwrap_or_default(), inventory, media_set_uuid, @@ -418,7 +419,7 @@ pub fn restore( ) } else { restore_full_worker( - worker.clone(), + worker, inventory, media_set_uuid, drive_config, @@ -430,10 +431,10 @@ pub fn restore( ) }; if res.is_ok() { - task_log!(worker, "Restore mediaset '{media_set}' done"); + info!("Restore mediaset '{media_set}' done"); } if let Err(err) = set_tape_device_state(&drive, "") { - task_log!(worker, "could not unset drive state for {drive}: {err}"); + info!("could not unset drive state for {drive}: {err}"); } res @@ -484,7 +485,7 @@ fn restore_full_worker( } if let Some(fingerprint) = encryption_key_fingerprint { - task_log!(worker, "Encryption key fingerprint: {fingerprint}"); + info!("Encryption key fingerprint: {fingerprint}"); } let used_datastores = store_map.used_datastores(); @@ -493,13 +494,9 @@ fn restore_full_worker( .map(|(t, _)| String::from(t.name())) .collect::>() .join(", "); - task_log!(worker, "Datastore(s): {datastore_list}",); - task_log!(worker, "Drive: {drive_name}"); - log_required_tapes( - &worker, - &inventory, - media_id_list.iter().map(|id| &id.label.uuid), - ); + info!("Datastore(s): {datastore_list}",); + info!("Drive: {drive_name}"); + log_required_tapes(&inventory, media_id_list.iter().map(|id| &id.label.uuid)); let mut datastore_locks = Vec::new(); for (target, _) in used_datastores.values() { @@ -529,7 +526,6 @@ fn restore_full_worker( #[allow(clippy::too_many_arguments)] fn check_snapshot_restorable( - worker: &WorkerTask, store_map: &DataStoreMap, store: &str, snapshot: &str, @@ -570,7 +566,7 @@ fn check_snapshot_restorable( auth_id, Some(restore_owner), ) { - task_warn!(worker, "cannot restore {store}:{snapshot} to {ns}: '{err}'"); + warn!("cannot restore {store}:{snapshot} to {ns}: '{err}'"); continue; } @@ -578,8 +574,7 @@ fn check_snapshot_restorable( if let Ok(owner) = datastore.get_owner(&ns, dir.as_ref()) { if restore_owner != &owner { // only the owner is allowed to create additional snapshots - task_warn!( - worker, + warn!( "restore of '{snapshot}' to {ns} failed, owner check failed ({restore_owner} \ != {owner})", ); @@ -590,10 +585,7 @@ fn check_snapshot_restorable( have_some_permissions = true; if datastore.snapshot_path(&ns, dir).exists() { - task_warn!( - worker, - "found snapshot {snapshot} on target datastore/namespace, skipping...", - ); + warn!("found snapshot {snapshot} on target datastore/namespace, skipping...",); continue; } can_restore_some = true; @@ -606,11 +598,7 @@ fn check_snapshot_restorable( Ok(can_restore_some) } -fn log_required_tapes<'a>( - worker: &WorkerTask, - inventory: &Inventory, - list: impl Iterator, -) { +fn log_required_tapes<'a>(inventory: &Inventory, list: impl Iterator) { let mut tape_list = list .map(|uuid| { inventory @@ -622,7 +610,7 @@ fn log_required_tapes<'a>( }) .collect::>(); tape_list.sort_unstable(); - task_log!(worker, "Required media list: {}", tape_list.join(";")); + info!("Required media list: {}", tape_list.join(";")); } #[allow(clippy::too_many_arguments)] @@ -654,14 +642,13 @@ fn restore_list_worker( let (ns, dir) = match parse_ns_and_snapshot(snapshot) { Ok((ns, dir)) if store_map.has_full_mapping(store, &ns) => (ns, dir), Err(err) => { - task_warn!(worker, "couldn't parse snapshot {snapshot} - {err}"); + warn!("couldn't parse snapshot {snapshot} - {err}"); continue; } _ => continue, }; let snapshot = print_ns_and_snapshot(&ns, &dir); match check_snapshot_restorable( - &worker, &store_map, store, &snapshot, @@ -675,7 +662,7 @@ fn restore_list_worker( Ok(true) => restorable.push((store.to_string(), snapshot.to_string(), ns, dir)), Ok(false) => {} Err(err) => { - task_warn!(worker, "{err}"); + warn!("{err}"); skipped.push(format!("{store}:{snapshot}")); } } @@ -693,7 +680,6 @@ fn restore_list_worker( match parse_ns_and_snapshot(snapshot) { Ok((ns, dir)) => { match check_snapshot_restorable( - &worker, &store_map, store, snapshot, @@ -709,14 +695,14 @@ fn restore_list_worker( } Ok(false) => None, Err(err) => { - task_warn!(worker, "{err}"); + warn!("{err}"); skipped.push(format!("{store}:{snapshot}")); None } } } Err(err) => { - task_warn!(worker, "could not restore {store_snapshot}: {err}"); + warn!("could not restore {store_snapshot}: {err}"); skipped.push(store_snapshot); None } @@ -734,10 +720,7 @@ fn restore_list_worker( let media_id = inventory.lookup_media(media_uuid).unwrap(); (media_id, file_num) } else { - task_warn!( - worker, - "did not find snapshot '{store}:{snapshot}' in media set", - ); + warn!("did not find snapshot '{store}:{snapshot}' in media set",); skipped.push(format!("{store}:{snapshot}")); continue; }; @@ -750,26 +733,25 @@ fn restore_list_worker( .or_default(); file_list.push(file_num); - task_log!( - worker, + info!( "found snapshot {snapshot} on {}: file {file_num}", media_id.label.label_text, ); } if snapshot_file_hash.is_empty() { - task_log!(worker, "nothing to restore, skipping remaining phases..."); + info!("nothing to restore, skipping remaining phases..."); if !skipped.is_empty() { - task_log!(worker, "skipped the following snapshots:"); + info!("skipped the following snapshots:"); for snap in skipped { - task_log!(worker, " {snap}"); + info!(" {snap}"); } } return Ok(()); } - task_log!(worker, "Phase 1: temporarily restore snapshots to temp dir"); - log_required_tapes(&worker, &inventory, snapshot_file_hash.keys()); + info!("Phase 1: temporarily restore snapshots to temp dir"); + log_required_tapes(&inventory, snapshot_file_hash.keys()); let mut datastore_chunk_map: HashMap> = HashMap::new(); let mut tmp_paths = Vec::new(); for (media_uuid, file_list) in snapshot_file_hash.iter_mut() { @@ -820,10 +802,10 @@ fn restore_list_worker( drop(catalog); if !media_file_chunk_map.is_empty() { - task_log!(worker, "Phase 2: restore chunks to datastores"); - log_required_tapes(&worker, &inventory, media_file_chunk_map.keys()); + info!("Phase 2: restore chunks to datastores"); + log_required_tapes(&inventory, media_file_chunk_map.keys()); } else { - task_log!(worker, "All chunks are already present, skip phase 2..."); + info!("All chunks are already present, skip phase 2..."); } for (media_uuid, file_chunk_map) in media_file_chunk_map.iter_mut() { @@ -838,10 +820,7 @@ fn restore_list_worker( restore_file_chunk_map(worker.clone(), &mut drive, &store_map, file_chunk_map)?; } - task_log!( - worker, - "Phase 3: copy snapshots from temp dir to datastores" - ); + info!("Phase 3: copy snapshots from temp dir to datastores"); let mut errors = false; for (source_datastore, snapshot, source_ns, backup_dir) in snapshots.into_iter() { if let Err(err) = proxmox_lang::try_block!({ @@ -898,20 +877,14 @@ fn restore_list_worker( Ok(()) }) { - task_warn!( - worker, - "could not restore {source_datastore}:{snapshot}: '{err}'" - ); + warn!("could not restore {source_datastore}:{snapshot}: '{err}'"); skipped.push(format!("{source_datastore}:{snapshot}")); } } - task_log!(worker, "Restore snapshot '{}' done", snapshot); + info!("Restore snapshot '{}' done", snapshot); Ok::<_, Error>(()) }) { - task_warn!( - worker, - "could not copy {source_datastore}:{snapshot}: {err}" - ); + warn!("could not copy {source_datastore}:{snapshot}: {err}"); errors = true; } } @@ -921,7 +894,7 @@ fn restore_list_worker( std::fs::remove_dir_all(&tmp_path) .map_err(|err| format_err!("remove_dir_all failed - {err}")) }) { - task_warn!(worker, "could not clean up temp dir {tmp_path:?}: {err}"); + warn!("could not clean up temp dir {tmp_path:?}: {err}"); errors = true; }; } @@ -930,19 +903,16 @@ fn restore_list_worker( bail!("errors during copy occurred"); } if !skipped.is_empty() { - task_log!(worker, "(partially) skipped the following snapshots:"); + info!("(partially) skipped the following snapshots:"); for snap in skipped { - task_log!(worker, " {snap}"); + info!(" {snap}"); } } Ok(()) }); if res.is_err() { - task_warn!( - worker, - "Error during restore, partially restored snapshots will NOT be cleaned up" - ); + warn!("Error during restore, partially restored snapshots will NOT be cleaned up"); } for (datastore, _) in store_map.used_datastores().values() { @@ -950,7 +920,7 @@ fn restore_list_worker( match std::fs::remove_dir_all(tmp_path) { Ok(()) => {} Err(err) if err.kind() == std::io::ErrorKind::NotFound => {} - Err(err) => task_warn!(worker, "error cleaning up: {}", err), + Err(err) => warn!("error cleaning up: {}", err), } } @@ -1033,13 +1003,10 @@ fn restore_snapshots_to_tmpdir( for file_num in file_list { let current_file_number = drive.current_file_number()?; if current_file_number != *file_num { - task_log!( - worker, - "was at file {current_file_number}, moving to {file_num}" - ); + info!("was at file {current_file_number}, moving to {file_num}"); drive.move_to_file(*file_num)?; let current_file_number = drive.current_file_number()?; - task_log!(worker, "now at file {}", current_file_number); + info!("now at file {}", current_file_number); } let mut reader = drive.read_next_file()?; @@ -1061,20 +1028,14 @@ fn restore_snapshots_to_tmpdir( let source_datastore = archive_header.store; let snapshot = archive_header.snapshot; - task_log!( - worker, - "File {file_num}: snapshot archive {source_datastore}:{snapshot}", - ); + info!("File {file_num}: snapshot archive {source_datastore}:{snapshot}",); let mut decoder = pxar::decoder::sync::Decoder::from_std(reader)?; let target_datastore = match store_map.target_store(&source_datastore) { Some(datastore) => datastore, None => { - task_warn!( - worker, - "could not find target datastore for {source_datastore}:{snapshot}", - ); + warn!("could not find target datastore for {source_datastore}:{snapshot}",); continue; } }; @@ -1126,10 +1087,10 @@ fn restore_file_chunk_map( for (nr, chunk_map) in file_chunk_map.iter_mut() { let current_file_number = drive.current_file_number()?; if current_file_number != *nr { - task_log!(worker, "was at file {current_file_number}, moving to {nr}"); + info!("was at file {current_file_number}, moving to {nr}"); drive.move_to_file(*nr)?; let current_file_number = drive.current_file_number()?; - task_log!(worker, "now at file {}", current_file_number); + info!("now at file {}", current_file_number); } let mut reader = drive.read_next_file()?; let header: MediaContentHeader = unsafe { reader.read_le_value()? }; @@ -1146,10 +1107,7 @@ fn restore_file_chunk_map( let source_datastore = archive_header.store; - task_log!( - worker, - "File {nr}: chunk archive for datastore '{source_datastore}'", - ); + info!("File {nr}: chunk archive for datastore '{source_datastore}'",); let datastore = store_map.target_store(&source_datastore).ok_or_else(|| { format_err!("unexpected chunk archive for store: {source_datastore}") @@ -1161,7 +1119,7 @@ fn restore_file_chunk_map( datastore.clone(), chunk_map, )?; - task_log!(worker, "restored {count} chunks"); + info!("restored {count} chunks"); } _ => bail!("unexpected content magic {:?}", header.content_magic), } @@ -1221,8 +1179,7 @@ fn restore_partial_chunk_archive<'a>( let elapsed = start_time.elapsed()?.as_secs_f64(); let bytes = bytes.load(std::sync::atomic::Ordering::SeqCst) as f64; - task_log!( - worker, + info!( "restored {} ({:.2}/s)", HumanByte::new_decimal(bytes), HumanByte::new_decimal(bytes / elapsed), @@ -1301,15 +1258,11 @@ pub fn restore_media( let current_file_number = drive.current_file_number()?; let reader = match drive.read_next_file() { Err(BlockReadError::EndOfFile) => { - task_log!( - worker, - "skip unexpected filemark at pos {}", - current_file_number - ); + info!("skip unexpected filemark at pos {}", current_file_number); continue; } Err(BlockReadError::EndOfStream) => { - task_log!(worker, "detected EOT after {} files", current_file_number); + info!("detected EOT after {} files", current_file_number); break; } Err(BlockReadError::Error(err)) => { @@ -1373,12 +1326,9 @@ fn restore_archive<'a>( let datastore_name = archive_header.store; let snapshot = archive_header.snapshot; - task_log!( - worker, + info!( "File {}: snapshot archive {}:{}", - current_file_number, - datastore_name, - snapshot + current_file_number, datastore_name, snapshot ); let (backup_ns, backup_dir) = parse_ns_and_snapshot(&snapshot)?; @@ -1413,16 +1363,16 @@ fn restore_archive<'a>( path.push(rel_path); if is_new { - task_log!(worker, "restore snapshot {}", backup_dir); + info!("restore snapshot {}", backup_dir); - match restore_snapshot_archive(worker.clone(), reader, &path) { + match restore_snapshot_archive(worker, reader, &path) { Err(err) => { std::fs::remove_dir_all(&path)?; bail!("restore snapshot {} failed - {}", backup_dir, err); } Ok(false) => { std::fs::remove_dir_all(&path)?; - task_log!(worker, "skip incomplete snapshot {}", backup_dir); + info!("skip incomplete snapshot {}", backup_dir); } Ok(true) => { catalog.register_snapshot( @@ -1438,7 +1388,7 @@ fn restore_archive<'a>( return Ok(()); } } else { - task_log!(worker, "skipping..."); + info!("skipping..."); } } @@ -1465,11 +1415,9 @@ fn restore_archive<'a>( let source_datastore = archive_header.store; - task_log!( - worker, + info!( "File {}: chunk archive for datastore '{}'", - current_file_number, - source_datastore + current_file_number, source_datastore ); let datastore = target .as_ref() @@ -1487,15 +1435,9 @@ fn restore_archive<'a>( .or_default(); let chunks = if let Some(datastore) = datastore { - restore_chunk_archive( - worker.clone(), - reader, - datastore, - checked_chunks, - verbose, - )? + restore_chunk_archive(worker, reader, datastore, checked_chunks, verbose)? } else { - scan_chunk_archive(worker.clone(), reader, verbose)? + scan_chunk_archive(worker, reader, verbose)? }; if let Some(chunks) = chunks { @@ -1505,12 +1447,12 @@ fn restore_archive<'a>( &source_datastore, &chunks[..], )?; - task_log!(worker, "register {} chunks", chunks.len()); + info!("register {} chunks", chunks.len()); catalog.commit_if_large()?; } return Ok(()); } else if target.is_some() { - task_log!(worker, "skipping..."); + info!("skipping..."); } reader.skip_data()?; // read all data @@ -1521,11 +1463,9 @@ fn restore_archive<'a>( let archive_header: CatalogArchiveHeader = serde_json::from_slice(&header_data) .map_err(|err| format_err!("unable to parse catalog archive header - {}", err))?; - task_log!( - worker, + info!( "File {}: skip catalog '{}'", - current_file_number, - archive_header.uuid + current_file_number, archive_header.uuid ); reader.skip_data()?; // read all data @@ -1560,7 +1500,7 @@ fn scan_chunk_archive<'a>( // check if this is an aborted stream without end marker if let Ok(false) = reader.has_end_marker() { - task_log!(worker, "missing stream end marker"); + info!("missing stream end marker"); return Ok(None); } @@ -1572,7 +1512,7 @@ fn scan_chunk_archive<'a>( worker.check_abort()?; if verbose { - task_log!(worker, "Found chunk: {}", hex::encode(digest)); + info!("Found chunk: {}", hex::encode(digest)); } chunks.push(digest); @@ -1596,8 +1536,6 @@ fn restore_chunk_archive<'a>( let bytes = Arc::new(std::sync::atomic::AtomicU64::new(0)); let bytes2 = bytes.clone(); - let worker2 = worker.clone(); - let writer_pool = ParallelHandler::new( "tape restore chunk writer", 4, @@ -1605,7 +1543,7 @@ fn restore_chunk_archive<'a>( let chunk_exists = datastore.cond_touch_chunk(&digest, false)?; if !chunk_exists { if verbose { - task_log!(worker2, "Insert chunk: {}", hex::encode(digest)); + info!("Insert chunk: {}", hex::encode(digest)); } bytes2.fetch_add(chunk.raw_size(), std::sync::atomic::Ordering::SeqCst); // println!("verify and write {}", hex::encode(&digest)); @@ -1616,7 +1554,7 @@ fn restore_chunk_archive<'a>( datastore.insert_chunk(&chunk, &digest)?; } else if verbose { - task_log!(worker2, "Found existing chunk: {}", hex::encode(digest)); + info!("Found existing chunk: {}", hex::encode(digest)); } Ok(()) }, @@ -1638,7 +1576,7 @@ fn restore_chunk_archive<'a>( // check if this is an aborted stream without end marker if let Ok(false) = reader.has_end_marker() { - task_log!(worker, "missing stream end marker"); + info!("missing stream end marker"); return Ok(None); } @@ -1662,8 +1600,7 @@ fn restore_chunk_archive<'a>( let elapsed = start_time.elapsed()?.as_secs_f64(); let bytes = bytes.load(std::sync::atomic::Ordering::SeqCst) as f64; - task_log!( - worker, + info!( "restored {} ({:.2}/s)", HumanByte::new_decimal(bytes), HumanByte::new_decimal(bytes / elapsed), @@ -1814,7 +1751,6 @@ fn try_restore_snapshot_archive( /// Try to restore media catalogs (form catalog_archives) pub fn fast_catalog_restore( - worker: &WorkerTask, drive: &mut Box, media_set: &MediaSet, uuid: &Uuid, // current media Uuid @@ -1835,14 +1771,11 @@ pub fn fast_catalog_restore( // limit reader scope let mut reader = match drive.read_next_file() { Err(BlockReadError::EndOfFile) => { - task_log!( - worker, - "skip unexpected filemark at pos {current_file_number}" - ); + info!("skip unexpected filemark at pos {current_file_number}"); continue; } Err(BlockReadError::EndOfStream) => { - task_log!(worker, "detected EOT after {current_file_number} files"); + info!("detected EOT after {current_file_number} files"); break; } Err(BlockReadError::Error(err)) => { @@ -1859,7 +1792,7 @@ pub fn fast_catalog_restore( if header.content_magic == PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0 || header.content_magic == PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_1 { - task_log!(worker, "found catalog at pos {}", current_file_number); + info!("found catalog at pos {}", current_file_number); let header_data = reader.read_exact_allocated(header.size as usize)?; @@ -1869,11 +1802,7 @@ pub fn fast_catalog_restore( })?; if &archive_header.media_set_uuid != media_set.uuid() { - task_log!( - worker, - "skipping unrelated catalog at pos {}", - current_file_number - ); + info!("skipping unrelated catalog at pos {}", current_file_number); reader.skip_data()?; // read all data continue; } @@ -1886,8 +1815,7 @@ pub fn fast_catalog_restore( }); if !wanted { - task_log!( - worker, + info!( "skip catalog because media '{}' not inventarized", catalog_uuid ); @@ -1900,11 +1828,7 @@ pub fn fast_catalog_restore( } else { // only restore if catalog does not exist if MediaCatalog::exists(TAPE_STATUS_DIR, catalog_uuid) { - task_log!( - worker, - "catalog for media '{}' already exists", - catalog_uuid - ); + info!("catalog for media '{}' already exists", catalog_uuid); reader.skip_data()?; // read all data continue; } @@ -1920,19 +1844,11 @@ pub fn fast_catalog_restore( match MediaCatalog::parse_catalog_header(&mut file)? { (true, Some(media_uuid), Some(media_set_uuid)) => { if &media_uuid != catalog_uuid { - task_log!( - worker, - "catalog uuid mismatch at pos {}", - current_file_number - ); + info!("catalog uuid mismatch at pos {}", current_file_number); continue; } if media_set_uuid != archive_header.media_set_uuid { - task_log!( - worker, - "catalog media_set mismatch at pos {}", - current_file_number - ); + info!("catalog media_set mismatch at pos {}", current_file_number); continue; } @@ -1943,18 +1859,14 @@ pub fn fast_catalog_restore( )?; if catalog_uuid == uuid { - task_log!(worker, "successfully restored catalog"); + info!("successfully restored catalog"); found_catalog = true } else { - task_log!( - worker, - "successfully restored related catalog {}", - media_uuid - ); + info!("successfully restored related catalog {}", media_uuid); } } _ => { - task_warn!(worker, "got incomplete catalog header - skip file"); + warn!("got incomplete catalog header - skip file"); continue; } } @@ -1968,7 +1880,7 @@ pub fn fast_catalog_restore( } moved_to_eom = true; - task_log!(worker, "searching for catalog at EOT (moving to EOT)"); + info!("searching for catalog at EOT (moving to EOT)"); drive.move_to_last_file()?; let new_file_number = drive.current_file_number()?; -- 2.43.0