]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/commitdiff
xfs: use separate btree cursor cache for each btree type
authorDarrick J. Wong <djwong@kernel.org>
Thu, 28 Apr 2022 19:39:04 +0000 (15:39 -0400)
committerEric Sandeen <sandeen@sandeen.net>
Thu, 28 Apr 2022 19:39:04 +0000 (15:39 -0400)
Source kernel commit: 9fa47bdcd33b117599e9ee3f2e315cb47939ac2d

Now that we have the infrastructure to track the max possible height of
each btree type, we can create a separate slab cache for cursors of each
type of btree.  For smaller indices like the free space btrees, this
means that we can pack more cursors into a slab page, improving slab
utilization.

Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Eric Sandeen <sandeen@sandeen.net>
16 files changed:
include/kmem.h
include/platform_defs.h.in
libxfs/init.c
libxfs/kmem.c
libxfs/xfs_alloc_btree.c
libxfs/xfs_alloc_btree.h
libxfs/xfs_bmap_btree.c
libxfs/xfs_bmap_btree.h
libxfs/xfs_btree.c
libxfs/xfs_btree.h
libxfs/xfs_ialloc_btree.c
libxfs/xfs_ialloc_btree.h
libxfs/xfs_refcount_btree.c
libxfs/xfs_refcount_btree.h
libxfs/xfs_rmap_btree.c
libxfs/xfs_rmap_btree.h

index 6d3172569fc5926de69723a8e489deb7819abf99..2a0acf93398ff5efc4c1dba6bfd42922e3195acd 100644 (file)
 #define KM_NOLOCKDEP   0x0020u
 
 typedef struct kmem_zone {
-       int     zone_unitsize;  /* Size in bytes of zone unit           */
-       char    *zone_name;     /* tag name                             */
-       int     allocated;      /* debug: How many currently allocated  */
+       int             zone_unitsize;  /* Size in bytes of zone unit */
+       int             allocated;      /* debug: How many allocated? */
+       unsigned int    align;
+       const char      *zone_name;     /* tag name */
+       void            (*ctor)(void *);
 } kmem_zone_t;
 
 typedef unsigned int __bitwise gfp_t;
@@ -26,11 +28,26 @@ typedef unsigned int __bitwise gfp_t;
 
 #define __GFP_ZERO     (__force gfp_t)1
 
-extern kmem_zone_t *kmem_zone_init(int, char *);
+kmem_zone_t * kmem_cache_create(const char *name, unsigned int size,
+               unsigned int align, unsigned int slab_flags,
+               void (*ctor)(void *));
+
+static inline kmem_zone_t *
+kmem_zone_init(unsigned int size, const char *name)
+{
+       return kmem_cache_create(name, size, 0, 0, NULL);
+}
+
 extern void    *kmem_cache_alloc(kmem_zone_t *, gfp_t);
 extern void    *kmem_cache_zalloc(kmem_zone_t *, gfp_t);
 extern int     kmem_zone_destroy(kmem_zone_t *);
 
+static inline void
+kmem_cache_destroy(kmem_zone_t *zone)
+{
+       kmem_zone_destroy(zone);
+}
+
 static inline void
 kmem_cache_free(kmem_zone_t *zone, void *ptr)
 {
index 6e6f26ef5f8e4e5a388e922e1e979b1c554103d4..315ad77cfb78e0b36cd48a088f4b5f5f9756bb12 100644 (file)
@@ -134,4 +134,7 @@ static inline size_t __ab_c_size(size_t a, size_t b, size_t c)
 #    define fallthrough                    do {} while (0)  /* fallthrough */
 #endif
 
+/* Only needed for the kernel. */
+#define __init
+
 #endif /* __XFS_PLATFORM_DEFS_H__ */
index d7df763265ea7bfe8b73983b965a0c3eec848b2a..fcfd45c5f79d6de9233f23422a2a8b90bf851380 100644 (file)
@@ -231,6 +231,8 @@ check_open(char *path, int flags, char **rawfile, char **blockfile)
 static void
 init_zones(void)
 {
+       int     error;
+
        /* initialise zone allocation */
        xfs_buf_zone = kmem_zone_init(sizeof(struct xfs_buf), "xfs_buffer");
        xfs_inode_zone = kmem_zone_init(sizeof(struct xfs_inode), "xfs_inode");
@@ -241,9 +243,11 @@ init_zones(void)
                        sizeof(struct xfs_buf_log_item), "xfs_buf_log_item");
        xfs_da_state_zone = kmem_zone_init(
                        sizeof(struct xfs_da_state), "xfs_da_state");
-       xfs_btree_cur_zone = kmem_zone_init(
-                       xfs_btree_cur_sizeof(XFS_BTREE_CUR_CACHE_MAXLEVELS),
-                       "xfs_btree_cur");
+       error = xfs_btree_init_cur_caches();
+       if (error) {
+               fprintf(stderr, "Could not allocate btree cursor caches.\n");
+               abort();
+       }
        xfs_bmap_free_item_zone = kmem_zone_init(
                        sizeof(struct xfs_extent_free_item),
                        "xfs_bmap_free_item");
@@ -262,7 +266,7 @@ destroy_zones(void)
        leaked += kmem_zone_destroy(xfs_ifork_zone);
        leaked += kmem_zone_destroy(xfs_buf_item_zone);
        leaked += kmem_zone_destroy(xfs_da_state_zone);
-       leaked += kmem_zone_destroy(xfs_btree_cur_zone);
+       xfs_btree_destroy_cur_caches();
        leaked += kmem_zone_destroy(xfs_bmap_free_item_zone);
        leaked += kmem_zone_destroy(xfs_trans_zone);
 
index 3d72ac944377743cdbdf87721d1d091fd23dd826..c4c57670d22b3bd8525869cbe29cfad8f25aa0c9 100644 (file)
@@ -6,9 +6,9 @@
 /*
  * Simple memory interface
  */
-
 kmem_zone_t *
-kmem_zone_init(int size, char *name)
+kmem_cache_create(const char *name, unsigned int size, unsigned int align,
+               unsigned int slab_flags, void (*ctor)(void *))
 {
        kmem_zone_t     *ptr = malloc(sizeof(kmem_zone_t));
 
@@ -21,6 +21,9 @@ kmem_zone_init(int size, char *name)
        ptr->zone_unitsize = size;
        ptr->zone_name = name;
        ptr->allocated = 0;
+       ptr->align = align;
+       ptr->ctor = ctor;
+
        return ptr;
 }
 
index 6de3af3717ae3c8177dd5b84bdbcbb5ea6ab8cf7..2176a923a1c11d5a18cd028796381334c68e8a1b 100644 (file)
@@ -18,6 +18,7 @@
 #include "xfs_trans.h"
 #include "xfs_ag.h"
 
+static kmem_zone_t     *xfs_allocbt_cur_cache;
 
 STATIC struct xfs_btree_cur *
 xfs_allocbt_dup_cursor(
@@ -475,7 +476,8 @@ xfs_allocbt_init_common(
 
        ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT);
 
-       cur = xfs_btree_alloc_cursor(mp, tp, btnum, mp->m_alloc_maxlevels);
+       cur = xfs_btree_alloc_cursor(mp, tp, btnum, mp->m_alloc_maxlevels,
+                       xfs_allocbt_cur_cache);
        cur->bc_ag.abt.active = false;
 
        if (btnum == XFS_BTNUM_CNT) {
@@ -615,3 +617,22 @@ xfs_allocbt_calc_size(
 {
        return xfs_btree_calc_size(mp->m_alloc_mnr, len);
 }
+
+int __init
+xfs_allocbt_init_cur_cache(void)
+{
+       xfs_allocbt_cur_cache = kmem_cache_create("xfs_bnobt_cur",
+                       xfs_btree_cur_sizeof(xfs_allocbt_maxlevels_ondisk()),
+                       0, 0, NULL);
+
+       if (!xfs_allocbt_cur_cache)
+               return -ENOMEM;
+       return 0;
+}
+
+void
+xfs_allocbt_destroy_cur_cache(void)
+{
+       kmem_cache_destroy(xfs_allocbt_cur_cache);
+       xfs_allocbt_cur_cache = NULL;
+}
index c715bee5ae90b8725f4903096686f09ae6d42495..45df893ef6bb00d473f717061ac1ca417ed0a39f 100644 (file)
@@ -62,4 +62,7 @@ void xfs_allocbt_commit_staged_btree(struct xfs_btree_cur *cur,
 
 unsigned int xfs_allocbt_maxlevels_ondisk(void);
 
+int __init xfs_allocbt_init_cur_cache(void);
+void xfs_allocbt_destroy_cur_cache(void);
+
 #endif /* __XFS_ALLOC_BTREE_H__ */
index 85faea1dd00c6f29900e3a0a1a683cc3ba675dcc..cde313d7c699004fa9cd771ebf9eca721593c71f 100644 (file)
@@ -20,6 +20,8 @@
 #include "xfs_trace.h"
 #include "xfs_rmap.h"
 
+static kmem_zone_t     *xfs_bmbt_cur_cache;
+
 /*
  * Convert on-disk form of btree root to in-memory form.
  */
@@ -551,7 +553,7 @@ xfs_bmbt_init_cursor(
        ASSERT(whichfork != XFS_COW_FORK);
 
        cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_BMAP,
-                       mp->m_bm_maxlevels[whichfork]);
+                       mp->m_bm_maxlevels[whichfork], xfs_bmbt_cur_cache);
        cur->bc_nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1;
        cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_bmbt_2);
 
@@ -673,3 +675,22 @@ xfs_bmbt_calc_size(
 {
        return xfs_btree_calc_size(mp->m_bmap_dmnr, len);
 }
+
+int __init
+xfs_bmbt_init_cur_cache(void)
+{
+       xfs_bmbt_cur_cache = kmem_cache_create("xfs_bmbt_cur",
+                       xfs_btree_cur_sizeof(xfs_bmbt_maxlevels_ondisk()),
+                       0, 0, NULL);
+
+       if (!xfs_bmbt_cur_cache)
+               return -ENOMEM;
+       return 0;
+}
+
+void
+xfs_bmbt_destroy_cur_cache(void)
+{
+       kmem_cache_destroy(xfs_bmbt_cur_cache);
+       xfs_bmbt_cur_cache = NULL;
+}
index 2a1c9e607b52e5a0c7ec664a486693d028e96c3a..3e7a40a83835c2a5ec4718973c2a240103a24a16 100644 (file)
@@ -112,4 +112,7 @@ extern unsigned long long xfs_bmbt_calc_size(struct xfs_mount *mp,
 
 unsigned int xfs_bmbt_maxlevels_ondisk(void);
 
+int __init xfs_bmbt_init_cur_cache(void);
+void xfs_bmbt_destroy_cur_cache(void);
+
 #endif /* __XFS_BMAP_BTREE_H__ */
index e541b061cea71d605f9a88e2cd6202efe5a7846a..4fe2378e75081f3fe963e7a4742a70d5d952a73b 100644 (file)
 #include "xfs_alloc.h"
 #include "xfs_btree_staging.h"
 #include "xfs_ag.h"
-
-/*
- * Cursor allocation zone.
- */
-kmem_zone_t    *xfs_btree_cur_zone;
+#include "xfs_alloc_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_rmap_btree.h"
+#include "xfs_refcount_btree.h"
 
 /*
  * Btree magic numbers.
@@ -376,7 +376,7 @@ xfs_btree_del_cursor(
                kmem_free(cur->bc_ops);
        if (!(cur->bc_flags & XFS_BTREE_LONG_PTRS) && cur->bc_ag.pag)
                xfs_perag_put(cur->bc_ag.pag);
-       kmem_cache_free(xfs_btree_cur_zone, cur);
+       kmem_cache_free(cur->bc_cache, cur);
 }
 
 /*
@@ -4963,3 +4963,42 @@ xfs_btree_has_more_records(
        else
                return block->bb_u.s.bb_rightsib != cpu_to_be32(NULLAGBLOCK);
 }
+
+/* Set up all the btree cursor caches. */
+int __init
+xfs_btree_init_cur_caches(void)
+{
+       int             error;
+
+       error = xfs_allocbt_init_cur_cache();
+       if (error)
+               return error;
+       error = xfs_inobt_init_cur_cache();
+       if (error)
+               goto err;
+       error = xfs_bmbt_init_cur_cache();
+       if (error)
+               goto err;
+       error = xfs_rmapbt_init_cur_cache();
+       if (error)
+               goto err;
+       error = xfs_refcountbt_init_cur_cache();
+       if (error)
+               goto err;
+
+       return 0;
+err:
+       xfs_btree_destroy_cur_caches();
+       return error;
+}
+
+/* Destroy all the btree cursor caches, if they've been allocated. */
+void
+xfs_btree_destroy_cur_caches(void)
+{
+       xfs_allocbt_destroy_cur_cache();
+       xfs_inobt_destroy_cur_cache();
+       xfs_bmbt_destroy_cur_cache();
+       xfs_rmapbt_destroy_cur_cache();
+       xfs_refcountbt_destroy_cur_cache();
+}
index fdf7090c74f4593f16eee36798a085d4b234029a..7bc5a379605212159d295331250c511ddd75e7f6 100644 (file)
@@ -13,8 +13,6 @@ struct xfs_trans;
 struct xfs_ifork;
 struct xfs_perag;
 
-extern kmem_zone_t     *xfs_btree_cur_zone;
-
 /*
  * Generic key, ptr and record wrapper structures.
  *
@@ -92,12 +90,6 @@ uint32_t xfs_btree_magic(int crc, xfs_btnum_t btnum);
 #define XFS_BTREE_STATS_ADD(cur, stat, val)    \
        XFS_STATS_ADD_OFF((cur)->bc_mp, (cur)->bc_statoff + __XBTS_ ## stat, val)
 
-/*
- * The btree cursor zone hands out cursors that can handle up to this many
- * levels.  This is the known maximum for all btree types.
- */
-#define XFS_BTREE_CUR_CACHE_MAXLEVELS  (9)
-
 struct xfs_btree_ops {
        /* size of the key and record structures */
        size_t  key_len;
@@ -238,6 +230,7 @@ struct xfs_btree_cur
        struct xfs_trans        *bc_tp; /* transaction we're in, if any */
        struct xfs_mount        *bc_mp; /* file system mount struct */
        const struct xfs_btree_ops *bc_ops;
+       kmem_zone_t             *bc_cache; /* cursor cache */
        unsigned int            bc_flags; /* btree features - below */
        xfs_btnum_t             bc_btnum; /* identifies which btree type */
        union xfs_btree_irec    bc_rec; /* current insert/search record value */
@@ -592,19 +585,22 @@ xfs_btree_alloc_cursor(
        struct xfs_mount        *mp,
        struct xfs_trans        *tp,
        xfs_btnum_t             btnum,
-       uint8_t                 maxlevels)
+       uint8_t                 maxlevels,
+       kmem_zone_t             *cache)
 {
        struct xfs_btree_cur    *cur;
 
-       ASSERT(maxlevels <= XFS_BTREE_CUR_CACHE_MAXLEVELS);
-
-       cur = kmem_cache_zalloc(xfs_btree_cur_zone, GFP_NOFS | __GFP_NOFAIL);
+       cur = kmem_cache_zalloc(cache, GFP_NOFS | __GFP_NOFAIL);
        cur->bc_tp = tp;
        cur->bc_mp = mp;
        cur->bc_btnum = btnum;
        cur->bc_maxlevels = maxlevels;
+       cur->bc_cache = cache;
 
        return cur;
 }
 
+int __init xfs_btree_init_cur_caches(void);
+void xfs_btree_destroy_cur_caches(void);
+
 #endif /* __XFS_BTREE_H__ */
index 30b1abe91cf2de7cd14832b051f8b629c9681192..539e7c03e1d0eea43a483c1e1e5a95cb26e54b60 100644 (file)
@@ -21,6 +21,8 @@
 #include "xfs_rmap.h"
 #include "xfs_ag.h"
 
+static kmem_zone_t     *xfs_inobt_cur_cache;
+
 STATIC int
 xfs_inobt_get_minrecs(
        struct xfs_btree_cur    *cur,
@@ -432,7 +434,7 @@ xfs_inobt_init_common(
        struct xfs_btree_cur    *cur;
 
        cur = xfs_btree_alloc_cursor(mp, tp, btnum,
-                       M_IGEO(mp)->inobt_maxlevels);
+                       M_IGEO(mp)->inobt_maxlevels, xfs_inobt_cur_cache);
        if (btnum == XFS_BTNUM_INO) {
                cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_ibt_2);
                cur->bc_ops = &xfs_inobt_ops;
@@ -811,3 +813,22 @@ xfs_iallocbt_calc_size(
 {
        return xfs_btree_calc_size(M_IGEO(mp)->inobt_mnr, len);
 }
+
+int __init
+xfs_inobt_init_cur_cache(void)
+{
+       xfs_inobt_cur_cache = kmem_cache_create("xfs_inobt_cur",
+                       xfs_btree_cur_sizeof(xfs_inobt_maxlevels_ondisk()),
+                       0, 0, NULL);
+
+       if (!xfs_inobt_cur_cache)
+               return -ENOMEM;
+       return 0;
+}
+
+void
+xfs_inobt_destroy_cur_cache(void)
+{
+       kmem_cache_destroy(xfs_inobt_cur_cache);
+       xfs_inobt_cur_cache = NULL;
+}
index 6d3e4a3316d7582b7c67e4ad9757829df5ef6c5f..26451cb76b98bb306d6d50d13b2f290ec1d58b52 100644 (file)
@@ -77,4 +77,7 @@ void xfs_inobt_commit_staged_btree(struct xfs_btree_cur *cur,
 
 unsigned int xfs_iallocbt_maxlevels_ondisk(void);
 
+int __init xfs_inobt_init_cur_cache(void);
+void xfs_inobt_destroy_cur_cache(void);
+
 #endif /* __XFS_IALLOC_BTREE_H__ */
index 1d7b2d7c01cdb6991adec898657467954b6b38d3..2c02e33e367e20ee45115dc8d35c653ba6de08d4 100644 (file)
@@ -20,6 +20,8 @@
 #include "xfs_rmap.h"
 #include "xfs_ag.h"
 
+static kmem_zone_t     *xfs_refcountbt_cur_cache;
+
 static struct xfs_btree_cur *
 xfs_refcountbt_dup_cursor(
        struct xfs_btree_cur    *cur)
@@ -322,7 +324,7 @@ xfs_refcountbt_init_common(
        ASSERT(pag->pag_agno < mp->m_sb.sb_agcount);
 
        cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_REFC,
-                       mp->m_refc_maxlevels);
+                       mp->m_refc_maxlevels, xfs_refcountbt_cur_cache);
        cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_refcbt_2);
 
        cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
@@ -513,3 +515,22 @@ xfs_refcountbt_calc_reserves(
 
        return error;
 }
+
+int __init
+xfs_refcountbt_init_cur_cache(void)
+{
+       xfs_refcountbt_cur_cache = kmem_cache_create("xfs_refcbt_cur",
+                       xfs_btree_cur_sizeof(xfs_refcountbt_maxlevels_ondisk()),
+                       0, 0, NULL);
+
+       if (!xfs_refcountbt_cur_cache)
+               return -ENOMEM;
+       return 0;
+}
+
+void
+xfs_refcountbt_destroy_cur_cache(void)
+{
+       kmem_cache_destroy(xfs_refcountbt_cur_cache);
+       xfs_refcountbt_cur_cache = NULL;
+}
index d7f7c89cbf355824565dfae29ceef13c74b1d220..d66b37259bedb4dd66dc0073df34806b551c6350 100644 (file)
@@ -67,4 +67,7 @@ void xfs_refcountbt_commit_staged_btree(struct xfs_btree_cur *cur,
 
 unsigned int xfs_refcountbt_maxlevels_ondisk(void);
 
+int __init xfs_refcountbt_init_cur_cache(void);
+void xfs_refcountbt_destroy_cur_cache(void);
+
 #endif /* __XFS_REFCOUNT_BTREE_H__ */
index eb3ef4090197e42369bec13773f7fc6aea0cb19d..ae3329b5d3b4d44812c438843e260f2540f0a3f8 100644 (file)
@@ -20,6 +20,8 @@
 #include "xfs_ag.h"
 #include "xfs_ag_resv.h"
 
+static kmem_zone_t     *xfs_rmapbt_cur_cache;
+
 /*
  * Reverse map btree.
  *
@@ -451,7 +453,7 @@ xfs_rmapbt_init_common(
 
        /* Overlapping btree; 2 keys per pointer. */
        cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_RMAP,
-                       mp->m_rmap_maxlevels);
+                       mp->m_rmap_maxlevels, xfs_rmapbt_cur_cache);
        cur->bc_flags = XFS_BTREE_CRC_BLOCKS | XFS_BTREE_OVERLAPPING;
        cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_rmap_2);
        cur->bc_ops = &xfs_rmapbt_ops;
@@ -672,3 +674,22 @@ xfs_rmapbt_calc_reserves(
 
        return error;
 }
+
+int __init
+xfs_rmapbt_init_cur_cache(void)
+{
+       xfs_rmapbt_cur_cache = kmem_cache_create("xfs_rmapbt_cur",
+                       xfs_btree_cur_sizeof(xfs_rmapbt_maxlevels_ondisk()),
+                       0, 0, NULL);
+
+       if (!xfs_rmapbt_cur_cache)
+               return -ENOMEM;
+       return 0;
+}
+
+void
+xfs_rmapbt_destroy_cur_cache(void)
+{
+       kmem_cache_destroy(xfs_rmapbt_cur_cache);
+       xfs_rmapbt_cur_cache = NULL;
+}
index e9778b62ad55bf4a5a269616e83e296dbb6a0644..3244715dd111bd530b1b878829bd930fb41cb867 100644 (file)
@@ -61,4 +61,7 @@ extern int xfs_rmapbt_calc_reserves(struct xfs_mount *mp, struct xfs_trans *tp,
 
 unsigned int xfs_rmapbt_maxlevels_ondisk(void);
 
+int __init xfs_rmapbt_init_cur_cache(void);
+void xfs_rmapbt_destroy_cur_cache(void);
+
 #endif /* __XFS_RMAP_BTREE_H__ */