]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/commitdiff
xfs: create a shadow rmap btree during realtime rmap repair
authorDarrick J. Wong <djwong@kernel.org>
Mon, 24 Feb 2025 18:21:51 +0000 (10:21 -0800)
committerDarrick J. Wong <djwong@kernel.org>
Tue, 25 Feb 2025 17:15:58 +0000 (09:15 -0800)
Source kernel commit: 4a61f12eb11958f157e054d386466627445644cd

Create an in-memory btree of rmap records instead of an array.  This
enables us to do live record collection instead of freezing the fs.

Signed-off-by: "Darrick J. Wong" <djwong@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
libxfs/xfs_btree_mem.c
libxfs/xfs_rmap.c
libxfs/xfs_rtrmap_btree.c
libxfs/xfs_rtrmap_btree.h
libxfs/xfs_shared.h

index 8e3efdbccc156a281596b0d9f6529658a3e7703d..2b98b8d01dce0d8657376c3a137ad72019d33a06 100644 (file)
@@ -17,6 +17,7 @@
 #include "xfs_btree_mem.h"
 #include "xfs_ag.h"
 #include "xfs_trace.h"
+#include "xfs_rtgroup.h"
 
 /* Set the root of an in-memory btree. */
 void
index 551f158e5424f343f56a6403c010feaf7d176275..3748bfc7a9dfc1a55bd5c6f7f98d57390c3262c5 100644 (file)
@@ -326,7 +326,8 @@ xfs_rmap_check_btrec(
        struct xfs_btree_cur            *cur,
        const struct xfs_rmap_irec      *irec)
 {
-       if (xfs_btree_is_rtrmap(cur->bc_ops))
+       if (xfs_btree_is_rtrmap(cur->bc_ops) ||
+           xfs_btree_is_mem_rtrmap(cur->bc_ops))
                return xfs_rtrmap_check_irec(to_rtg(cur->bc_group), irec);
        return xfs_rmap_check_irec(to_perag(cur->bc_group), irec);
 }
index ac51e736e7e48969050c6e77636ea2525d455f1b..10055110b8cf42e09ff7fc0b6c205c48359e4682 100644 (file)
@@ -26,6 +26,9 @@
 #include "xfs_rtgroup.h"
 #include "xfs_bmap.h"
 #include "xfs_health.h"
+#include "xfile.h"
+#include "buf_mem.h"
+#include "xfs_btree_mem.h"
 
 static struct kmem_cache       *xfs_rtrmapbt_cur_cache;
 
@@ -540,6 +543,121 @@ xfs_rtrmapbt_init_cursor(
        return cur;
 }
 
+#ifdef CONFIG_XFS_BTREE_IN_MEM
+/*
+ * Validate an in-memory realtime rmap btree block.  Callers are allowed to
+ * generate an in-memory btree even if the ondisk feature is not enabled.
+ */
+static xfs_failaddr_t
+xfs_rtrmapbt_mem_verify(
+       struct xfs_buf          *bp)
+{
+       struct xfs_mount        *mp = bp->b_mount;
+       struct xfs_btree_block  *block = XFS_BUF_TO_BLOCK(bp);
+       xfs_failaddr_t          fa;
+       unsigned int            level;
+       unsigned int            maxrecs;
+
+       if (!xfs_verify_magic(bp, block->bb_magic))
+               return __this_address;
+
+       fa = xfs_btree_fsblock_v5hdr_verify(bp, XFS_RMAP_OWN_UNKNOWN);
+       if (fa)
+               return fa;
+
+       level = be16_to_cpu(block->bb_level);
+       if (xfs_has_rmapbt(mp)) {
+               if (level >= mp->m_rtrmap_maxlevels)
+                       return __this_address;
+       } else {
+               if (level >= xfs_rtrmapbt_maxlevels_ondisk())
+                       return __this_address;
+       }
+
+       maxrecs = xfs_rtrmapbt_maxrecs(mp, XFBNO_BLOCKSIZE, level == 0);
+       return xfs_btree_memblock_verify(bp, maxrecs);
+}
+
+static void
+xfs_rtrmapbt_mem_rw_verify(
+       struct xfs_buf  *bp)
+{
+       xfs_failaddr_t  fa = xfs_rtrmapbt_mem_verify(bp);
+
+       if (fa)
+               xfs_verifier_error(bp, -EFSCORRUPTED, fa);
+}
+
+/* skip crc checks on in-memory btrees to save time */
+static const struct xfs_buf_ops xfs_rtrmapbt_mem_buf_ops = {
+       .name                   = "xfs_rtrmapbt_mem",
+       .magic                  = { 0, cpu_to_be32(XFS_RTRMAP_CRC_MAGIC) },
+       .verify_read            = xfs_rtrmapbt_mem_rw_verify,
+       .verify_write           = xfs_rtrmapbt_mem_rw_verify,
+       .verify_struct          = xfs_rtrmapbt_mem_verify,
+};
+
+const struct xfs_btree_ops xfs_rtrmapbt_mem_ops = {
+       .type                   = XFS_BTREE_TYPE_MEM,
+       .geom_flags             = XFS_BTGEO_OVERLAPPING,
+
+       .rec_len                = sizeof(struct xfs_rmap_rec),
+       /* Overlapping btree; 2 keys per pointer. */
+       .key_len                = 2 * sizeof(struct xfs_rmap_key),
+       .ptr_len                = XFS_BTREE_LONG_PTR_LEN,
+
+       .lru_refs               = XFS_RMAP_BTREE_REF,
+       .statoff                = XFS_STATS_CALC_INDEX(xs_rtrmap_mem_2),
+
+       .dup_cursor             = xfbtree_dup_cursor,
+       .set_root               = xfbtree_set_root,
+       .alloc_block            = xfbtree_alloc_block,
+       .free_block             = xfbtree_free_block,
+       .get_minrecs            = xfbtree_get_minrecs,
+       .get_maxrecs            = xfbtree_get_maxrecs,
+       .init_key_from_rec      = xfs_rtrmapbt_init_key_from_rec,
+       .init_high_key_from_rec = xfs_rtrmapbt_init_high_key_from_rec,
+       .init_rec_from_cur      = xfs_rtrmapbt_init_rec_from_cur,
+       .init_ptr_from_cur      = xfbtree_init_ptr_from_cur,
+       .key_diff               = xfs_rtrmapbt_key_diff,
+       .buf_ops                = &xfs_rtrmapbt_mem_buf_ops,
+       .diff_two_keys          = xfs_rtrmapbt_diff_two_keys,
+       .keys_inorder           = xfs_rtrmapbt_keys_inorder,
+       .recs_inorder           = xfs_rtrmapbt_recs_inorder,
+       .keys_contiguous        = xfs_rtrmapbt_keys_contiguous,
+};
+
+/* Create a cursor for an in-memory btree. */
+struct xfs_btree_cur *
+xfs_rtrmapbt_mem_cursor(
+       struct xfs_rtgroup      *rtg,
+       struct xfs_trans        *tp,
+       struct xfbtree          *xfbt)
+{
+       struct xfs_mount        *mp = rtg_mount(rtg);
+       struct xfs_btree_cur    *cur;
+
+       cur = xfs_btree_alloc_cursor(mp, tp, &xfs_rtrmapbt_mem_ops,
+                       mp->m_rtrmap_maxlevels, xfs_rtrmapbt_cur_cache);
+       cur->bc_mem.xfbtree = xfbt;
+       cur->bc_nlevels = xfbt->nlevels;
+       cur->bc_group = xfs_group_hold(rtg_group(rtg));
+       return cur;
+}
+
+/* Create an in-memory realtime rmap btree. */
+int
+xfs_rtrmapbt_mem_init(
+       struct xfs_mount        *mp,
+       struct xfbtree          *xfbt,
+       struct xfs_buftarg      *btp,
+       xfs_rgnumber_t          rgno)
+{
+       xfbt->owner = rgno;
+       return xfbtree_init(mp, xfbt, btp, &xfs_rtrmapbt_mem_ops);
+}
+#endif /* CONFIG_XFS_BTREE_IN_MEM */
+
 /*
  * Install a new rt reverse mapping btree root.  Caller is responsible for
  * invalidating and freeing the old btree blocks.
index ad76ac7938b602eab8f30a20912fd5b908641795..9d0915089891a5edc77c9c170ed83e10cb1ff21d 100644 (file)
@@ -11,6 +11,7 @@ struct xfs_btree_cur;
 struct xfs_mount;
 struct xbtree_ifakeroot;
 struct xfs_rtgroup;
+struct xfbtree;
 
 /* rmaps only exist on crc enabled filesystems */
 #define XFS_RTRMAP_BLOCK_LEN   XFS_BTREE_LBLOCK_CRC_LEN
@@ -201,4 +202,9 @@ int xfs_rtrmapbt_init_rtsb(struct xfs_mount *mp, struct xfs_rtgroup *rtg,
 unsigned long long xfs_rtrmapbt_calc_size(struct xfs_mount *mp,
                unsigned long long len);
 
+struct xfs_btree_cur *xfs_rtrmapbt_mem_cursor(struct xfs_rtgroup *rtg,
+               struct xfs_trans *tp, struct xfbtree *xfbtree);
+int xfs_rtrmapbt_mem_init(struct xfs_mount *mp, struct xfbtree *xfbtree,
+               struct xfs_buftarg *btp, xfs_rgnumber_t rgno);
+
 #endif /* __XFS_RTRMAP_BTREE_H__ */
index da23dac22c3f08238c99f037b08cbb5ab0a99d52..960716c387cc2baf5bd6d665ed19cee5470fb362 100644 (file)
@@ -57,6 +57,7 @@ extern const struct xfs_btree_ops xfs_refcountbt_ops;
 extern const struct xfs_btree_ops xfs_rmapbt_ops;
 extern const struct xfs_btree_ops xfs_rmapbt_mem_ops;
 extern const struct xfs_btree_ops xfs_rtrmapbt_ops;
+extern const struct xfs_btree_ops xfs_rtrmapbt_mem_ops;
 
 static inline bool xfs_btree_is_bno(const struct xfs_btree_ops *ops)
 {
@@ -98,8 +99,14 @@ static inline bool xfs_btree_is_mem_rmap(const struct xfs_btree_ops *ops)
 {
        return ops == &xfs_rmapbt_mem_ops;
 }
+
+static inline bool xfs_btree_is_mem_rtrmap(const struct xfs_btree_ops *ops)
+{
+       return ops == &xfs_rtrmapbt_mem_ops;
+}
 #else
 # define xfs_btree_is_mem_rmap(...)    (false)
+# define xfs_btree_is_mem_rtrmap(...)  (false)
 #endif
 
 static inline bool xfs_btree_is_rtrmap(const struct xfs_btree_ops *ops)