]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/commitdiff
xfs: wire up rmap map and unmap to the realtime rmapbt
authorDarrick J. Wong <djwong@kernel.org>
Mon, 24 Feb 2025 18:21:49 +0000 (10:21 -0800)
committerDarrick J. Wong <djwong@kernel.org>
Tue, 25 Feb 2025 17:15:58 +0000 (09:15 -0800)
Source kernel commit: 609a592865c9e66a1c00eb7b8ee7436eea3c39a3

Connect the map and unmap reverse-mapping operations to the realtime
rmapbt via the deferred operation callbacks.  This enables us to
perform rmap operations against the correct btree.

Signed-off-by: "Darrick J. Wong" <djwong@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
libxfs/xfs_rmap.c
libxfs/xfs_rtgroup.c
libxfs/xfs_rtgroup.h

index a1a57cd0c62c107923833568dfff926ec48ed766..551f158e5424f343f56a6403c010feaf7d176275 100644 (file)
@@ -25,6 +25,7 @@
 #include "xfs_health.h"
 #include "defer_item.h"
 #include "xfs_rtgroup.h"
+#include "xfs_rtrmap_btree.h"
 
 struct kmem_cache      *xfs_rmap_intent_cache;
 
@@ -2618,6 +2619,47 @@ __xfs_rmap_finish_intent(
        }
 }
 
+static int
+xfs_rmap_finish_init_cursor(
+       struct xfs_trans                *tp,
+       struct xfs_rmap_intent          *ri,
+       struct xfs_btree_cur            **pcur)
+{
+       struct xfs_perag                *pag = to_perag(ri->ri_group);
+       struct xfs_buf                  *agbp = NULL;
+       int                             error;
+
+       /*
+        * Refresh the freelist before we start changing the rmapbt, because a
+        * shape change could cause us to allocate blocks.
+        */
+       error = xfs_free_extent_fix_freelist(tp, pag, &agbp);
+       if (error) {
+               xfs_ag_mark_sick(pag, XFS_SICK_AG_AGFL);
+               return error;
+       }
+       if (XFS_IS_CORRUPT(tp->t_mountp, !agbp)) {
+               xfs_ag_mark_sick(pag, XFS_SICK_AG_AGFL);
+               return -EFSCORRUPTED;
+       }
+       *pcur = xfs_rmapbt_init_cursor(tp->t_mountp, tp, agbp, pag);
+       return 0;
+}
+
+static int
+xfs_rtrmap_finish_init_cursor(
+       struct xfs_trans                *tp,
+       struct xfs_rmap_intent          *ri,
+       struct xfs_btree_cur            **pcur)
+{
+       struct xfs_rtgroup              *rtg = to_rtg(ri->ri_group);
+
+       xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP);
+       xfs_rtgroup_trans_join(tp, rtg, XFS_RTGLOCK_RMAP);
+       *pcur = xfs_rtrmapbt_init_cursor(tp, rtg);
+       return 0;
+}
+
 /*
  * Process one of the deferred rmap operations.  We pass back the
  * btree cursor to maintain our lock on the rmapbt between calls.
@@ -2633,8 +2675,6 @@ xfs_rmap_finish_one(
 {
        struct xfs_owner_info           oinfo;
        struct xfs_mount                *mp = tp->t_mountp;
-       struct xfs_btree_cur            *rcur = *pcur;
-       struct xfs_buf                  *agbp = NULL;
        xfs_agblock_t                   bno;
        bool                            unwritten;
        int                             error = 0;
@@ -2648,38 +2688,26 @@ xfs_rmap_finish_one(
         * If we haven't gotten a cursor or the cursor AG doesn't match
         * the startblock, get one now.
         */
-       if (rcur != NULL && rcur->bc_group != ri->ri_group) {
-               xfs_btree_del_cursor(rcur, 0);
-               rcur = NULL;
+       if (*pcur != NULL && (*pcur)->bc_group != ri->ri_group) {
+               xfs_btree_del_cursor(*pcur, 0);
                *pcur = NULL;
        }
-       if (rcur == NULL) {
-               struct xfs_perag        *pag = to_perag(ri->ri_group);
-
-               /*
-                * Refresh the freelist before we start changing the
-                * rmapbt, because a shape change could cause us to
-                * allocate blocks.
-                */
-               error = xfs_free_extent_fix_freelist(tp, pag, &agbp);
-               if (error) {
-                       xfs_ag_mark_sick(pag, XFS_SICK_AG_AGFL);
+       if (*pcur == NULL) {
+               if (ri->ri_group->xg_type == XG_TYPE_RTG)
+                       error = xfs_rtrmap_finish_init_cursor(tp, ri, pcur);
+               else
+                       error = xfs_rmap_finish_init_cursor(tp, ri, pcur);
+               if (error)
                        return error;
-               }
-               if (XFS_IS_CORRUPT(tp->t_mountp, !agbp)) {
-                       xfs_ag_mark_sick(pag, XFS_SICK_AG_AGFL);
-                       return -EFSCORRUPTED;
-               }
-
-               *pcur = rcur = xfs_rmapbt_init_cursor(mp, tp, agbp, pag);
        }
 
        xfs_rmap_ino_owner(&oinfo, ri->ri_owner, ri->ri_whichfork,
                        ri->ri_bmap.br_startoff);
        unwritten = ri->ri_bmap.br_state == XFS_EXT_UNWRITTEN;
-       bno = XFS_FSB_TO_AGBNO(rcur->bc_mp, ri->ri_bmap.br_startblock);
 
-       error = __xfs_rmap_finish_intent(rcur, ri->ri_type, bno,
+       bno = xfs_fsb_to_gbno(mp, ri->ri_bmap.br_startblock,
+                       ri->ri_group->xg_type);
+       error = __xfs_rmap_finish_intent(*pcur, ri->ri_type, bno,
                        ri->ri_bmap.br_blockcount, &oinfo, unwritten);
        if (error)
                return error;
index d46ce8e7fa6e851de0e86d74f0a8b7e6acbd86a8..f0c45e75e52c3e6ca606aaa2c1011d912c69cd64 100644 (file)
@@ -199,6 +199,9 @@ xfs_rtgroup_lock(
        } else if (rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED) {
                xfs_ilock(rtg_bitmap(rtg), XFS_ILOCK_SHARED);
        }
+
+       if ((rtglock_flags & XFS_RTGLOCK_RMAP) && rtg_rmap(rtg))
+               xfs_ilock(rtg_rmap(rtg), XFS_ILOCK_EXCL);
 }
 
 /* Unlock metadata inodes associated with this rt group. */
@@ -211,6 +214,9 @@ xfs_rtgroup_unlock(
        ASSERT(!(rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED) ||
               !(rtglock_flags & XFS_RTGLOCK_BITMAP));
 
+       if ((rtglock_flags & XFS_RTGLOCK_RMAP) && rtg_rmap(rtg))
+               xfs_iunlock(rtg_rmap(rtg), XFS_ILOCK_EXCL);
+
        if (rtglock_flags & XFS_RTGLOCK_BITMAP) {
                xfs_iunlock(rtg_summary(rtg), XFS_ILOCK_EXCL);
                xfs_iunlock(rtg_bitmap(rtg), XFS_ILOCK_EXCL);
@@ -236,6 +242,9 @@ xfs_rtgroup_trans_join(
                xfs_trans_ijoin(tp, rtg_bitmap(rtg), XFS_ILOCK_EXCL);
                xfs_trans_ijoin(tp, rtg_summary(rtg), XFS_ILOCK_EXCL);
        }
+
+       if ((rtglock_flags & XFS_RTGLOCK_RMAP) && rtg_rmap(rtg))
+               xfs_trans_ijoin(tp, rtg_rmap(rtg), XFS_ILOCK_EXCL);
 }
 
 /* Retrieve rt group geometry. */
index 09ec9f0e66016083c407976a6f4ab6e4645c4540..6ff222a053674dc63528bbf6a00e82232bf4e921 100644 (file)
@@ -265,9 +265,12 @@ int xfs_update_last_rtgroup_size(struct xfs_mount *mp,
 #define XFS_RTGLOCK_BITMAP             (1U << 0)
 /* Lock the rt bitmap inode in shared mode */
 #define XFS_RTGLOCK_BITMAP_SHARED      (1U << 1)
+/* Lock the rt rmap inode in exclusive mode */
+#define XFS_RTGLOCK_RMAP               (1U << 2)
 
 #define XFS_RTGLOCK_ALL_FLAGS  (XFS_RTGLOCK_BITMAP | \
-                                XFS_RTGLOCK_BITMAP_SHARED)
+                                XFS_RTGLOCK_BITMAP_SHARED | \
+                                XFS_RTGLOCK_RMAP)
 
 void xfs_rtgroup_lock(struct xfs_rtgroup *rtg, unsigned int rtglock_flags);
 void xfs_rtgroup_unlock(struct xfs_rtgroup *rtg, unsigned int rtglock_flags);