]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/commitdiff
xfs: dynamically allocate cursors based on maxlevels
authorDarrick J. Wong <djwong@kernel.org>
Thu, 28 Apr 2022 19:39:03 +0000 (15:39 -0400)
committerEric Sandeen <sandeen@sandeen.net>
Thu, 28 Apr 2022 19:39:03 +0000 (15:39 -0400)
Source kernel commit: c940a0c54a2e9333478f1d87ed40006a04fcec7e

To support future btree code, we need to be able to size btree cursors
dynamically for very large btrees.  Switch the maxlevels computation to
use the precomputed values in the superblock, and create cursors that
can handle a certain height.  For now, we retain the btree cursor cache
that can handle up to 9-level btrees, though a subsequent patch
introduces separate caches for each btree type, where each cache's
objects will be exactly tall enough to handle the specific btree type.

Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Eric Sandeen <sandeen@sandeen.net>
libxfs/init.c
libxfs/xfs_alloc_btree.c
libxfs/xfs_bmap_btree.c
libxfs/xfs_btree.h
libxfs/xfs_ialloc_btree.c
libxfs/xfs_refcount_btree.c
libxfs/xfs_rmap_btree.c

index 0cd4b929c92e1ec7a6ea2c7a1e4837a8cd8a5dc4..2d29a78adf2b5ad14ca4ab9baec47c480198ebad 100644 (file)
@@ -242,7 +242,7 @@ init_zones(void)
        xfs_da_state_zone = kmem_zone_init(
                        sizeof(struct xfs_da_state), "xfs_da_state");
        xfs_btree_cur_zone = kmem_zone_init(
-                       xfs_btree_cur_sizeof(XFS_BTREE_MAXLEVELS),
+                       xfs_btree_cur_sizeof(XFS_BTREE_CUR_CACHE_MAXLEVELS),
                        "xfs_btree_cur");
        xfs_bmap_free_item_zone = kmem_zone_init(
                        sizeof(struct xfs_extent_free_item),
index 7d7618c5435a533433a8f4306f3712e84118b4e9..c1030ad1ce82255944321125bf1b92236c9c96e8 100644 (file)
@@ -475,7 +475,7 @@ xfs_allocbt_init_common(
 
        ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT);
 
-       cur = xfs_btree_alloc_cursor(mp, tp, btnum);
+       cur = xfs_btree_alloc_cursor(mp, tp, btnum, mp->m_ag_maxlevels);
        cur->bc_ag.abt.active = false;
 
        if (btnum == XFS_BTNUM_CNT) {
index c4b34cdd06f26fa3fe839eb21f7e0483177d9882..9e2263ed5182f51cde5d3db433e39e7581352b2a 100644 (file)
@@ -550,7 +550,8 @@ xfs_bmbt_init_cursor(
        struct xfs_btree_cur    *cur;
        ASSERT(whichfork != XFS_COW_FORK);
 
-       cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_BMAP);
+       cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_BMAP,
+                       mp->m_bm_maxlevels[whichfork]);
        cur->bc_nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1;
        cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_bmbt_2);
 
index ed0b7d5ab3a3d0c7071203358767619143142069..b46cd98309fa2aaa0be8f936ee2ae96a0fedc4e6 100644 (file)
@@ -94,6 +94,12 @@ uint32_t xfs_btree_magic(int crc, xfs_btnum_t btnum);
 
 #define        XFS_BTREE_MAXLEVELS     9       /* max of all btrees */
 
+/*
+ * The btree cursor zone hands out cursors that can handle up to this many
+ * levels.  This is the known maximum for all btree types.
+ */
+#define XFS_BTREE_CUR_CACHE_MAXLEVELS  (9)
+
 struct xfs_btree_ops {
        /* size of the key and record structures */
        size_t  key_len;
@@ -583,15 +589,18 @@ static inline struct xfs_btree_cur *
 xfs_btree_alloc_cursor(
        struct xfs_mount        *mp,
        struct xfs_trans        *tp,
-       xfs_btnum_t             btnum)
+       xfs_btnum_t             btnum,
+       uint8_t                 maxlevels)
 {
        struct xfs_btree_cur    *cur;
 
+       ASSERT(maxlevels <= XFS_BTREE_CUR_CACHE_MAXLEVELS);
+
        cur = kmem_cache_zalloc(xfs_btree_cur_zone, GFP_NOFS | __GFP_NOFAIL);
        cur->bc_tp = tp;
        cur->bc_mp = mp;
        cur->bc_btnum = btnum;
-       cur->bc_maxlevels = XFS_BTREE_MAXLEVELS;
+       cur->bc_maxlevels = maxlevels;
 
        return cur;
 }
index 87a54c07f86a5702bce221d3d7d12898eddbba9f..be0918b7a7cb14c1d851f2556f9bf3a8dd687313 100644 (file)
@@ -431,7 +431,8 @@ xfs_inobt_init_common(
 {
        struct xfs_btree_cur    *cur;
 
-       cur = xfs_btree_alloc_cursor(mp, tp, btnum);
+       cur = xfs_btree_alloc_cursor(mp, tp, btnum,
+                       M_IGEO(mp)->inobt_maxlevels);
        if (btnum == XFS_BTNUM_INO) {
                cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_ibt_2);
                cur->bc_ops = &xfs_inobt_ops;
index 55e68613f73129e03f76360e4052f4dfd5163357..6a71692449d38ccab9b1946911dc3c94913b15f4 100644 (file)
@@ -321,7 +321,8 @@ xfs_refcountbt_init_common(
 
        ASSERT(pag->pag_agno < mp->m_sb.sb_agcount);
 
-       cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_REFC);
+       cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_REFC,
+                       mp->m_refc_maxlevels);
        cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_refcbt_2);
 
        cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
index f6339a312d1cb75776c3608b338d804875d7f989..4c281b71f5e57a5b3acfa27d3c76a1b943bfbdc9 100644 (file)
@@ -450,7 +450,8 @@ xfs_rmapbt_init_common(
        struct xfs_btree_cur    *cur;
 
        /* Overlapping btree; 2 keys per pointer. */
-       cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_RMAP);
+       cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_RMAP,
+                       mp->m_rmap_maxlevels);
        cur->bc_flags = XFS_BTREE_CRC_BLOCKS | XFS_BTREE_OVERLAPPING;
        cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_rmap_2);
        cur->bc_ops = &xfs_rmapbt_ops;