]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/commitdiff
xfs: wire up realtime refcount btree cursors
authorDarrick J. Wong <djwong@kernel.org>
Mon, 24 Feb 2025 18:21:53 +0000 (10:21 -0800)
committerDarrick J. Wong <djwong@kernel.org>
Tue, 25 Feb 2025 17:15:59 +0000 (09:15 -0800)
Source kernel commit: e5a171729baf61b703069b11fa0d2955890e9b6b

Wire up realtime refcount btree cursors wherever they're needed
throughout the code base.

Signed-off-by: "Darrick J. Wong" <djwong@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
libxfs/xfs_btree.h
libxfs/xfs_refcount.c
libxfs/xfs_rtgroup.c
libxfs/xfs_rtgroup.h

index dbc047b2fb2cf5a86064147aec440d0b54741867..355b304696e6c3e3e90d512cd97bbf7c8d3fad2f 100644 (file)
@@ -297,7 +297,7 @@ struct xfs_btree_cur
                struct {
                        unsigned int    nr_ops;         /* # record updates */
                        unsigned int    shape_changes;  /* # of extent splits */
-               } bc_refc;      /* refcountbt */
+               } bc_refc;      /* refcountbt/rtrefcountbt */
        };
 
        /* Must be at the end of the struct! */
index ef08c26c75b32c57f7ee204894169348d84f02dc..738c3cd4ea513198e67ae64d5c5f9d48336f3a4d 100644 (file)
@@ -25,6 +25,7 @@
 #include "xfs_health.h"
 #include "defer_item.h"
 #include "xfs_rtgroup.h"
+#include "xfs_rtrefcount_btree.h"
 
 struct kmem_cache      *xfs_refcount_intent_cache;
 
@@ -1460,6 +1461,32 @@ xfs_refcount_finish_one(
        return error;
 }
 
+/*
+ * Set up a continuation a deferred rtrefcount operation by updating the
+ * intent.  Checks to make sure we're not going to run off the end of the
+ * rtgroup.
+ */
+static inline int
+xfs_rtrefcount_continue_op(
+       struct xfs_btree_cur            *cur,
+       struct xfs_refcount_intent      *ri,
+       xfs_agblock_t                   new_agbno)
+{
+       struct xfs_mount                *mp = cur->bc_mp;
+       struct xfs_rtgroup              *rtg = to_rtg(ri->ri_group);
+
+       if (XFS_IS_CORRUPT(mp, !xfs_verify_rgbext(rtg, new_agbno,
+                                       ri->ri_blockcount))) {
+               xfs_btree_mark_sick(cur);
+               return -EFSCORRUPTED;
+       }
+
+       ri->ri_startblock = xfs_rgbno_to_rtb(rtg, new_agbno);
+
+       ASSERT(xfs_verify_rtbext(mp, ri->ri_startblock, ri->ri_blockcount));
+       return 0;
+}
+
 /*
  * Process one of the deferred realtime refcount operations.  We pass back the
  * btree cursor to maintain our lock on the btree between calls.
@@ -1470,8 +1497,77 @@ xfs_rtrefcount_finish_one(
        struct xfs_refcount_intent      *ri,
        struct xfs_btree_cur            **pcur)
 {
-       ASSERT(0);
-       return -EFSCORRUPTED;
+       struct xfs_mount                *mp = tp->t_mountp;
+       struct xfs_rtgroup              *rtg = to_rtg(ri->ri_group);
+       struct xfs_btree_cur            *rcur = *pcur;
+       int                             error = 0;
+       xfs_rgblock_t                   bno;
+       unsigned long                   nr_ops = 0;
+       int                             shape_changes = 0;
+
+       bno = xfs_rtb_to_rgbno(mp, ri->ri_startblock);
+
+       trace_xfs_refcount_deferred(mp, ri);
+
+       if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_REFCOUNT_FINISH_ONE))
+               return -EIO;
+
+       /*
+        * If we haven't gotten a cursor or the cursor AG doesn't match
+        * the startblock, get one now.
+        */
+       if (rcur != NULL && rcur->bc_group != ri->ri_group) {
+               nr_ops = rcur->bc_refc.nr_ops;
+               shape_changes = rcur->bc_refc.shape_changes;
+               xfs_btree_del_cursor(rcur, 0);
+               rcur = NULL;
+               *pcur = NULL;
+       }
+       if (rcur == NULL) {
+               xfs_rtgroup_lock(rtg, XFS_RTGLOCK_REFCOUNT);
+               xfs_rtgroup_trans_join(tp, rtg, XFS_RTGLOCK_REFCOUNT);
+               *pcur = rcur = xfs_rtrefcountbt_init_cursor(tp, rtg);
+
+               rcur->bc_refc.nr_ops = nr_ops;
+               rcur->bc_refc.shape_changes = shape_changes;
+       }
+
+       switch (ri->ri_type) {
+       case XFS_REFCOUNT_INCREASE:
+               error = xfs_refcount_adjust(rcur, &bno, &ri->ri_blockcount,
+                               XFS_REFCOUNT_ADJUST_INCREASE);
+               if (error)
+                       return error;
+               if (ri->ri_blockcount > 0)
+                       error = xfs_rtrefcount_continue_op(rcur, ri, bno);
+               break;
+       case XFS_REFCOUNT_DECREASE:
+               error = xfs_refcount_adjust(rcur, &bno, &ri->ri_blockcount,
+                               XFS_REFCOUNT_ADJUST_DECREASE);
+               if (error)
+                       return error;
+               if (ri->ri_blockcount > 0)
+                       error = xfs_rtrefcount_continue_op(rcur, ri, bno);
+               break;
+       case XFS_REFCOUNT_ALLOC_COW:
+               error = __xfs_refcount_cow_alloc(rcur, bno, ri->ri_blockcount);
+               if (error)
+                       return error;
+               ri->ri_blockcount = 0;
+               break;
+       case XFS_REFCOUNT_FREE_COW:
+               error = __xfs_refcount_cow_free(rcur, bno, ri->ri_blockcount);
+               if (error)
+                       return error;
+               ri->ri_blockcount = 0;
+               break;
+       default:
+               ASSERT(0);
+               return -EFSCORRUPTED;
+       }
+       if (!error && ri->ri_blockcount > 0)
+               trace_xfs_refcount_finish_one_leftover(mp, ri);
+       return error;
 }
 
 /*
index f7bacce8500c0c1fdae46d253d5d00b2f0a35843..e1f853dd2c5b3eb7879a985aa3f5bf82e521fe78 100644 (file)
@@ -203,6 +203,9 @@ xfs_rtgroup_lock(
 
        if ((rtglock_flags & XFS_RTGLOCK_RMAP) && rtg_rmap(rtg))
                xfs_ilock(rtg_rmap(rtg), XFS_ILOCK_EXCL);
+
+       if ((rtglock_flags & XFS_RTGLOCK_REFCOUNT) && rtg_refcount(rtg))
+               xfs_ilock(rtg_refcount(rtg), XFS_ILOCK_EXCL);
 }
 
 /* Unlock metadata inodes associated with this rt group. */
@@ -215,6 +218,9 @@ xfs_rtgroup_unlock(
        ASSERT(!(rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED) ||
               !(rtglock_flags & XFS_RTGLOCK_BITMAP));
 
+       if ((rtglock_flags & XFS_RTGLOCK_REFCOUNT) && rtg_refcount(rtg))
+               xfs_iunlock(rtg_refcount(rtg), XFS_ILOCK_EXCL);
+
        if ((rtglock_flags & XFS_RTGLOCK_RMAP) && rtg_rmap(rtg))
                xfs_iunlock(rtg_rmap(rtg), XFS_ILOCK_EXCL);
 
@@ -246,6 +252,9 @@ xfs_rtgroup_trans_join(
 
        if ((rtglock_flags & XFS_RTGLOCK_RMAP) && rtg_rmap(rtg))
                xfs_trans_ijoin(tp, rtg_rmap(rtg), XFS_ILOCK_EXCL);
+
+       if ((rtglock_flags & XFS_RTGLOCK_REFCOUNT) && rtg_refcount(rtg))
+               xfs_trans_ijoin(tp, rtg_refcount(rtg), XFS_ILOCK_EXCL);
 }
 
 /* Retrieve rt group geometry. */
index 2663f2d849e295e491a284f8ebd8166b8eedd5ab..03f39d4e43fc7f3a9583730e5634a46520ceed21 100644 (file)
@@ -273,10 +273,13 @@ int xfs_update_last_rtgroup_size(struct xfs_mount *mp,
 #define XFS_RTGLOCK_BITMAP_SHARED      (1U << 1)
 /* Lock the rt rmap inode in exclusive mode */
 #define XFS_RTGLOCK_RMAP               (1U << 2)
+/* Lock the rt refcount inode in exclusive mode */
+#define XFS_RTGLOCK_REFCOUNT           (1U << 3)
 
 #define XFS_RTGLOCK_ALL_FLAGS  (XFS_RTGLOCK_BITMAP | \
                                 XFS_RTGLOCK_BITMAP_SHARED | \
-                                XFS_RTGLOCK_RMAP)
+                                XFS_RTGLOCK_RMAP | \
+                                XFS_RTGLOCK_REFCOUNT)
 
 void xfs_rtgroup_lock(struct xfs_rtgroup *rtg, unsigned int rtglock_flags);
 void xfs_rtgroup_unlock(struct xfs_rtgroup *rtg, unsigned int rtglock_flags);