public inbox for pbs-devel@lists.proxmox.com
 help / color / mirror / Atom feed
From: Dominik Csapak <d.csapak@proxmox.com>
To: Proxmox Backup Server development discussion
	<pbs-devel@lists.proxmox.com>, Dylan Whyte <d.whyte@proxmox.com>
Subject: Re: [pbs-devel] [PATCH V2 proxmox-backup] Fix 3335: Allow removing datastore contents on delete
Date: Mon, 21 Feb 2022 12:10:37 +0100	[thread overview]
Message-ID: <4dec537e-4526-0db7-bb06-bf25a3c04ddb@proxmox.com> (raw)
In-Reply-To: <20220126153836.2053113-1-d.whyte@proxmox.com>

hi, sorry for the late review, a few comments inline

On 1/26/22 16:38, Dylan Whyte wrote:
> This adds an option to 'datastore remove', to additionally remove the
> datatore's underlying contents.
> 
> In addition, the task is now carried out in a worker, in order to
> prevent the GUI from blocking or timing out during a removal (GUI patch
> to follow).
> 
> Signed-off-by: Dylan Whyte <d.whyte@proxmox.com>
> ---
> version 2:
> - Set 'destroy_data' argument as bool, rather than Option<bool> and
>    actually check that it's false, and not just Some(bool).
> 
>   src/api2/config/datastore.rs                | 79 ++++++++++++++++++---
>   src/bin/proxmox_backup_manager/datastore.rs | 55 +++++++++++++-
>   2 files changed, 121 insertions(+), 13 deletions(-)
> 
> diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs
> index 60bc3c0e..a976a618 100644
> --- a/src/api2/config/datastore.rs
> +++ b/src/api2/config/datastore.rs
> @@ -8,7 +8,7 @@ use hex::FromHex;
>   use proxmox_router::{Router, RpcEnvironment, RpcEnvironmentType, Permission};
>   use proxmox_schema::{api, ApiType};
>   use proxmox_section_config::SectionConfigData;
> -use proxmox_sys::WorkerTaskContext;
> +use proxmox_sys::{WorkerTaskContext, task_log};
>   
>   use pbs_datastore::chunk_store::ChunkStore;
>   use pbs_config::BackupLockGuard;
> @@ -329,6 +329,12 @@ pub fn update_datastore(
>                   optional: true,
>                   schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
>               },
> +            "destroy-data": {
> +                description: "Delete the datastore's underlying contents",
> +                optional: true,
> +                type: bool,
> +                default: false,
> +            },
>           },
>       },
>       access: {
> @@ -340,23 +346,19 @@ pub async fn delete_datastore(
>       name: String,
>       keep_job_configs: bool,
>       digest: Option<String>,
> +    destroy_data: bool,
>       rpcenv: &mut dyn RpcEnvironment,
> -) -> Result<(), Error> {
> +) -> Result<String, Error> {

not completely sure if that's a breaking change (but i'd guess from nothing to something
it's ok in this case)

>   
> -    let _lock = pbs_config::datastore::lock_config()?;
> +    let lock = pbs_config::datastore::lock_config()?;
>   
> -    let (mut config, expected_digest) = pbs_config::datastore::config()?;
> +    let (config, expected_digest) = pbs_config::datastore::config()?;
>   
>       if let Some(ref digest) = digest {
>           let digest = <[u8; 32]>::from_hex(digest)?;
>           crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
>       }
>   
> -    match config.sections.get(&name) {
> -        Some(_) => { config.sections.remove(&name); },
> -        None => bail!("datastore '{}' does not exist.", name),
> -    }
> -
>       if !keep_job_configs {
>           for job in list_verification_jobs(Some(name.clone()), Value::Null, rpcenv)? {
>               delete_verification_job(job.config.id, None, rpcenv)?
> @@ -371,7 +373,23 @@ pub async fn delete_datastore(
>           }
>       }
>   
> -    pbs_config::datastore::save_config(&config)?;
> +    let datastore_config: DataStoreConfig = config.lookup("datastore", &name)?;
> +
> +    let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
> +    let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
> +
> +    let upid = WorkerTask::new_thread(
> +        "delete-datastore",
> +        Some(datastore_config.name.to_string()),
> +        auth_id.to_string(),
> +        to_stdout,
> +        move |worker| do_delete_datastore(
> +            lock,
> +            config,
> +            datastore_config,
> +            destroy_data,
> +            &worker),
> +    )?;

while factoring that out is ok IMHO, i'd rather not have the
backuplockguard in the function signature

you can also simply move it into the scope by doing e.g.

move |worker| {
     let _lock = lock; // keep lock
     do_delete_datastore(...)
}

this way the 'do_delete_' call does not have to know about the lock

also the 'remove_state_file' as well as the 'notify_datastore_removed' calls

would belong after the  do_delete_datastore call in the worker,
else you remove the state_files even if there is some error and
the datastore is not actually removed

>   
>       // ignore errors
>       let _ = jobstate::remove_state_file("prune", &name);
> @@ -379,6 +397,47 @@ pub async fn delete_datastore(
>   
>       crate::server::notify_datastore_removed().await?;
>   
> +    Ok(upid)
> +}
> +
> +fn do_delete_datastore(
> +    _lock: BackupLockGuard,
> +    mut config: SectionConfigData,
> +    datastore: DataStoreConfig,
> +    destroy_data: bool,
> +    worker: &dyn WorkerTaskContext,
> +) -> Result<(), Error> {
> +    task_log!(worker, "Removing datastore: {}...", datastore.name);
> +    match config.sections.get(&datastore.name) {
> +        Some(_) => {
> +            if destroy_data {
> +                task_log!(worker, "Destroying datastore data...");
> +                match std::fs::read_dir(&datastore.path) {
> +                    Ok(dir) => {

this can be done with less indentation with:

let dir = std::fs::read_dir(...)
     .map_err("failed to read {}...")?;

instead of the match

> +                        for entry in dir {
> +                            if let Ok(entry) = entry {
> +                                let path = entry.path();
> +                                task_log!(worker, "Removing {}...", path.to_str().unwrap());
> +                                if path.is_dir() {
> +                                    std::fs::remove_dir_all(path)?;
> +                                } else {
> +                                    std::fs::remove_file(path)?;
> +                                }

while i know that we don't recommend having other data in the datastore,
imho it's not wise to remove *everything* in the datastore path

AFAICT we can know what we touch (ct/host/vm/.chunk dirs, .gc-status, .lock)
so we could only remove that?

also we should probably remove the datastore from the config before
removing files from it? otherwise i can start deleting
and another user could start his backup which will
interfere with each other and leave the datastore in an
undefined state?

what we at least should have here is some mechanism that nobody
can backup/restore/etc. from a datastore while it's deleted

> +                            };
> +                        }
> +                        task_log!(worker, "Finished destroying data...");
> +                    },
> +                    Err(err) => bail!("Failed to read {}: {}", &datastore.path, err),
> +                }
> +            }
> +            task_log!(worker, "Removing datastore from config...");
> +            config.sections.remove(&datastore.name);
> +        },
> +        None => bail!("datastore '{}' does not exist.", datastore.name),

this should not fail here, since we already get the config form above and
we have the config locked?

> +    }
> +
> +    pbs_config::datastore::save_config(&config)?;
> +
>       Ok(())
>   }
>   
> diff --git a/src/bin/proxmox_backup_manager/datastore.rs b/src/bin/proxmox_backup_manager/datastore.rs
> index a35e7bf5..98a0b1b2 100644
> --- a/src/bin/proxmox_backup_manager/datastore.rs
> +++ b/src/bin/proxmox_backup_manager/datastore.rs
> @@ -1,11 +1,13 @@
>   use anyhow::Error;
>   use serde_json::Value;
>   
> -use proxmox_router::{cli::*, ApiHandler, RpcEnvironment};
> +use proxmox_router::{cli::*, ApiHandler, RpcEnvironment, Permission};
>   use proxmox_schema::api;
>   
>   use pbs_client::view_task_result;
> -use pbs_api_types::{DataStoreConfig, DATASTORE_SCHEMA};
> +use pbs_api_types::{DataStoreConfig, DATASTORE_SCHEMA, PROXMOX_CONFIG_DIGEST_SCHEMA,
> +    PRIV_DATASTORE_ALLOCATE
> +};
>   
>   use proxmox_backup::api2;
>   use proxmox_backup::client_helpers::connect_to_localhost;
> @@ -100,6 +102,53 @@ async fn create_datastore(mut param: Value) -> Result<Value, Error> {
>       Ok(Value::Null)
>   }
>   
> +#[api(
> +    protected: true,
> +    input: {
> +        properties: {
> +            name: {
> +                schema: DATASTORE_SCHEMA,
> +            },
> +            "keep-job-configs": {
> +                description: "If enabled, the job configurations related to this datastore will be kept.",
> +                type: bool,
> +                optional: true,
> +                default: false,
> +            },
> +            digest: {
> +                optional: true,
> +                schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
> +            },
> +            "destroy-data": {
> +                description: "Delete the datastore's underlying contents",
> +                optional: true,
> +                type: bool,
> +                default: false,
> +            }
> +        },
> +    },
> +    access: {
> +        permission: &Permission::Privilege(&["datastore", "{name}"], PRIV_DATASTORE_ALLOCATE, false),
> +    },
> +)]
> +/// Remove a datastore configuration.
> +async fn delete_datastore(
> +    mut param: Value,
> +    rpcenv: &mut dyn RpcEnvironment,
> +) -> Result<Value, Error> {
> +    param["node"] = "localhost".into();
> +
> +    let info = &api2::config::datastore::API_METHOD_DELETE_DATASTORE;
> +    let result = match info.handler {
> +        ApiHandler::Async(handler) => (handler)(param, info, rpcenv).await?,
> +        _ => unreachable!(),
> +    };
> +
> +    crate::wait_for_local_worker(result.as_str().unwrap()).await?;
> +    Ok(Value::Null)
> +

i know the other functions in this file do it exactly like this,
but i'd probably prefer to have a http client here to
do the deletion via the api? (like we do in most other cases)

> +}
> +
>   pub fn datastore_commands() -> CommandLineInterface {
>   
>       let cmd_def = CliCommandMap::new()
> @@ -121,7 +170,7 @@ pub fn datastore_commands() -> CommandLineInterface {
>                   .completion_cb("prune-schedule", pbs_config::datastore::complete_calendar_event)
>           )
>           .insert("remove",
> -                CliCommand::new(&api2::config::datastore::API_METHOD_DELETE_DATASTORE)
> +                CliCommand::new(&API_METHOD_DELETE_DATASTORE)
>                   .arg_param(&["name"])
>                   .completion_cb("name", pbs_config::datastore::complete_datastore_name)
>           );





  parent reply	other threads:[~2022-02-21 11:10 UTC|newest]

Thread overview: 5+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-01-26 15:38 Dylan Whyte
2022-02-07 14:39 ` Oguz Bektas
2022-02-21 11:10 ` Dominik Csapak [this message]
2022-02-21 13:39   ` Thomas Lamprecht
2022-02-21 13:49     ` Dominik Csapak

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=4dec537e-4526-0db7-bb06-bf25a3c04ddb@proxmox.com \
    --to=d.csapak@proxmox.com \
    --cc=d.whyte@proxmox.com \
    --cc=pbs-devel@lists.proxmox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox
Service provided by Proxmox Server Solutions GmbH | Privacy | Legal