From: Darrick J. Wong Date: Thu, 28 Apr 2022 19:39:03 +0000 (-0400) Subject: xfs: dynamically allocate cursors based on maxlevels X-Git-Tag: v5.16.0-rc0~20 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=22f64346ed74e6b9687f3a7bc14f90f97ab2cfef;p=thirdparty%2Fxfsprogs-dev.git xfs: dynamically allocate cursors based on maxlevels Source kernel commit: c940a0c54a2e9333478f1d87ed40006a04fcec7e To support future btree code, we need to be able to size btree cursors dynamically for very large btrees. Switch the maxlevels computation to use the precomputed values in the superblock, and create cursors that can handle a certain height. For now, we retain the btree cursor cache that can handle up to 9-level btrees, though a subsequent patch introduces separate caches for each btree type, where each cache's objects will be exactly tall enough to handle the specific btree type. Signed-off-by: Darrick J. Wong Reviewed-by: Dave Chinner Signed-off-by: Eric Sandeen --- diff --git a/libxfs/init.c b/libxfs/init.c index 0cd4b929c..2d29a78ad 100644 --- a/libxfs/init.c +++ b/libxfs/init.c @@ -242,7 +242,7 @@ init_zones(void) xfs_da_state_zone = kmem_zone_init( sizeof(struct xfs_da_state), "xfs_da_state"); xfs_btree_cur_zone = kmem_zone_init( - xfs_btree_cur_sizeof(XFS_BTREE_MAXLEVELS), + xfs_btree_cur_sizeof(XFS_BTREE_CUR_CACHE_MAXLEVELS), "xfs_btree_cur"); xfs_bmap_free_item_zone = kmem_zone_init( sizeof(struct xfs_extent_free_item), diff --git a/libxfs/xfs_alloc_btree.c b/libxfs/xfs_alloc_btree.c index 7d7618c54..c1030ad1c 100644 --- a/libxfs/xfs_alloc_btree.c +++ b/libxfs/xfs_alloc_btree.c @@ -475,7 +475,7 @@ xfs_allocbt_init_common( ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT); - cur = xfs_btree_alloc_cursor(mp, tp, btnum); + cur = xfs_btree_alloc_cursor(mp, tp, btnum, mp->m_ag_maxlevels); cur->bc_ag.abt.active = false; if (btnum == XFS_BTNUM_CNT) { diff --git a/libxfs/xfs_bmap_btree.c b/libxfs/xfs_bmap_btree.c index c4b34cdd0..9e2263ed5 100644 --- a/libxfs/xfs_bmap_btree.c +++ b/libxfs/xfs_bmap_btree.c @@ -550,7 +550,8 @@ xfs_bmbt_init_cursor( struct xfs_btree_cur *cur; ASSERT(whichfork != XFS_COW_FORK); - cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_BMAP); + cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_BMAP, + mp->m_bm_maxlevels[whichfork]); cur->bc_nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1; cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_bmbt_2); diff --git a/libxfs/xfs_btree.h b/libxfs/xfs_btree.h index ed0b7d5ab..b46cd9830 100644 --- a/libxfs/xfs_btree.h +++ b/libxfs/xfs_btree.h @@ -94,6 +94,12 @@ uint32_t xfs_btree_magic(int crc, xfs_btnum_t btnum); #define XFS_BTREE_MAXLEVELS 9 /* max of all btrees */ +/* + * The btree cursor zone hands out cursors that can handle up to this many + * levels. This is the known maximum for all btree types. + */ +#define XFS_BTREE_CUR_CACHE_MAXLEVELS (9) + struct xfs_btree_ops { /* size of the key and record structures */ size_t key_len; @@ -583,15 +589,18 @@ static inline struct xfs_btree_cur * xfs_btree_alloc_cursor( struct xfs_mount *mp, struct xfs_trans *tp, - xfs_btnum_t btnum) + xfs_btnum_t btnum, + uint8_t maxlevels) { struct xfs_btree_cur *cur; + ASSERT(maxlevels <= XFS_BTREE_CUR_CACHE_MAXLEVELS); + cur = kmem_cache_zalloc(xfs_btree_cur_zone, GFP_NOFS | __GFP_NOFAIL); cur->bc_tp = tp; cur->bc_mp = mp; cur->bc_btnum = btnum; - cur->bc_maxlevels = XFS_BTREE_MAXLEVELS; + cur->bc_maxlevels = maxlevels; return cur; } diff --git a/libxfs/xfs_ialloc_btree.c b/libxfs/xfs_ialloc_btree.c index 87a54c07f..be0918b7a 100644 --- a/libxfs/xfs_ialloc_btree.c +++ b/libxfs/xfs_ialloc_btree.c @@ -431,7 +431,8 @@ xfs_inobt_init_common( { struct xfs_btree_cur *cur; - cur = xfs_btree_alloc_cursor(mp, tp, btnum); + cur = xfs_btree_alloc_cursor(mp, tp, btnum, + M_IGEO(mp)->inobt_maxlevels); if (btnum == XFS_BTNUM_INO) { cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_ibt_2); cur->bc_ops = &xfs_inobt_ops; diff --git a/libxfs/xfs_refcount_btree.c b/libxfs/xfs_refcount_btree.c index 55e68613f..6a7169244 100644 --- a/libxfs/xfs_refcount_btree.c +++ b/libxfs/xfs_refcount_btree.c @@ -321,7 +321,8 @@ xfs_refcountbt_init_common( ASSERT(pag->pag_agno < mp->m_sb.sb_agcount); - cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_REFC); + cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_REFC, + mp->m_refc_maxlevels); cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_refcbt_2); cur->bc_flags |= XFS_BTREE_CRC_BLOCKS; diff --git a/libxfs/xfs_rmap_btree.c b/libxfs/xfs_rmap_btree.c index f6339a312..4c281b71f 100644 --- a/libxfs/xfs_rmap_btree.c +++ b/libxfs/xfs_rmap_btree.c @@ -450,7 +450,8 @@ xfs_rmapbt_init_common( struct xfs_btree_cur *cur; /* Overlapping btree; 2 keys per pointer. */ - cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_RMAP); + cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_RMAP, + mp->m_rmap_maxlevels); cur->bc_flags = XFS_BTREE_CRC_BLOCKS | XFS_BTREE_OVERLAPPING; cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_rmap_2); cur->bc_ops = &xfs_rmapbt_ops;