public inbox for pve-devel@lists.proxmox.com
 help / color / mirror / Atom feed
From: Fiona Ebner <f.ebner@proxmox.com>
To: pve-devel@lists.proxmox.com
Subject: [pve-devel] [PATCH qemu] PVE backup: improve error when copy-before-write fails for fleecing
Date: Mon, 29 Apr 2024 14:53:00 +0200	[thread overview]
Message-ID: <20240429125300.86575-1-f.ebner@proxmox.com> (raw)

With fleecing, failure for copy-before-write does not fail the guest
write, but only sets the snapshot error that is associated to the
copy-before-write filter, making further requests to the snapshot
access fail with EACCES, which then also fails the job. But that error
code is not the root cause of why the backup failed, so bubble up the
original snapshot error instead.

Reported-by: Friedrich Weber <f.weber@proxmox.com>
Signed-off-by: Fiona Ebner <f.ebner@proxmox.com>
Tested-by: Friedrich Weber <f.weber@proxmox.com>
---

Should have a "fixes #5409:" prefix when applied in the parent git
module. Please consider applying this after the update to QEMU 9.0.0.
Otherwise, I'll have to adapt and re-send that.

 block/copy-before-write.c | 18 ++++++++++++------
 block/copy-before-write.h |  1 +
 pve-backup.c              |  9 +++++++++
 3 files changed, 22 insertions(+), 6 deletions(-)

diff --git a/block/copy-before-write.c b/block/copy-before-write.c
index 9ca5ec5e5c..7d70f221ca 100644
--- a/block/copy-before-write.c
+++ b/block/copy-before-write.c
@@ -27,6 +27,7 @@
 #include "qapi/qmp/qjson.h"
 
 #include "sysemu/block-backend.h"
+#include "qemu/atomic.h"
 #include "qemu/cutils.h"
 #include "qapi/error.h"
 #include "block/block_int.h"
@@ -74,7 +75,8 @@ typedef struct BDRVCopyBeforeWriteState {
      * @snapshot_error is normally zero. But on first copy-before-write failure
      * when @on_cbw_error == ON_CBW_ERROR_BREAK_SNAPSHOT, @snapshot_error takes
      * value of this error (<0). After that all in-flight and further
-     * snapshot-API requests will fail with that error.
+     * snapshot-API requests will fail with that error. To be accessed with
+     * atomics.
      */
     int snapshot_error;
 } BDRVCopyBeforeWriteState;
@@ -114,7 +116,7 @@ static coroutine_fn int cbw_do_copy_before_write(BlockDriverState *bs,
         return 0;
     }
 
-    if (s->snapshot_error) {
+    if (qatomic_read(&s->snapshot_error)) {
         return 0;
     }
 
@@ -138,9 +140,7 @@ static coroutine_fn int cbw_do_copy_before_write(BlockDriverState *bs,
     WITH_QEMU_LOCK_GUARD(&s->lock) {
         if (ret < 0) {
             assert(s->on_cbw_error == ON_CBW_ERROR_BREAK_SNAPSHOT);
-            if (!s->snapshot_error) {
-                s->snapshot_error = ret;
-            }
+            qatomic_cmpxchg(&s->snapshot_error, 0, ret);
         } else {
             bdrv_set_dirty_bitmap(s->done_bitmap, off, end - off);
         }
@@ -214,7 +214,7 @@ cbw_snapshot_read_lock(BlockDriverState *bs, int64_t offset, int64_t bytes,
 
     QEMU_LOCK_GUARD(&s->lock);
 
-    if (s->snapshot_error) {
+    if (qatomic_read(&s->snapshot_error)) {
         g_free(req);
         return NULL;
     }
@@ -594,6 +594,12 @@ void bdrv_cbw_drop(BlockDriverState *bs)
     bdrv_unref(bs);
 }
 
+int bdrv_cbw_snapshot_error(BlockDriverState *bs)
+{
+    BDRVCopyBeforeWriteState *s = bs->opaque;
+    return qatomic_read(&s->snapshot_error);
+}
+
 static void cbw_init(void)
 {
     bdrv_register(&bdrv_cbw_filter);
diff --git a/block/copy-before-write.h b/block/copy-before-write.h
index dc6cafe7fa..a27d2d7d9f 100644
--- a/block/copy-before-write.h
+++ b/block/copy-before-write.h
@@ -44,5 +44,6 @@ BlockDriverState *bdrv_cbw_append(BlockDriverState *source,
                                   BlockCopyState **bcs,
                                   Error **errp);
 void bdrv_cbw_drop(BlockDriverState *bs);
+int bdrv_cbw_snapshot_error(BlockDriverState *bs);
 
 #endif /* COPY_BEFORE_WRITE_H */
diff --git a/pve-backup.c b/pve-backup.c
index 00aaff6509..f8fa8e068b 100644
--- a/pve-backup.c
+++ b/pve-backup.c
@@ -385,6 +385,15 @@ static void pvebackup_complete_cb(void *opaque, int ret)
         di->fleecing.snapshot_access = NULL;
     }
     if (di->fleecing.cbw) {
+        /*
+         * With fleecing, failure for cbw does not fail the guest write, but only sets the snapshot
+         * error, making further requests to the snapshot fail with EACCES, which then also fail the
+         * job. But that code is not the root cause and just confusing, so update it.
+         */
+        int snapshot_error = bdrv_cbw_snapshot_error(di->fleecing.cbw);
+        if (di->completed_ret == -EACCES && snapshot_error) {
+            di->completed_ret = snapshot_error;
+        }
         bdrv_cbw_drop(di->fleecing.cbw);
         di->fleecing.cbw = NULL;
     }
-- 
2.39.2



_______________________________________________
pve-devel mailing list
pve-devel@lists.proxmox.com
https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


             reply	other threads:[~2024-04-29 12:53 UTC|newest]

Thread overview: 2+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-04-29 12:53 Fiona Ebner [this message]
2024-04-29 13:03 ` Fiona Ebner

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240429125300.86575-1-f.ebner@proxmox.com \
    --to=f.ebner@proxmox.com \
    --cc=pve-devel@lists.proxmox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox
Service provided by Proxmox Server Solutions GmbH | Privacy | Legal