]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/commitdiff
xfs: create slab caches for frequently-used deferred items
authorDarrick J. Wong <djwong@kernel.org>
Thu, 28 Apr 2022 19:39:04 +0000 (15:39 -0400)
committerEric Sandeen <sandeen@sandeen.net>
Thu, 28 Apr 2022 19:39:04 +0000 (15:39 -0400)
Source kernel commit: f3c799c22c661e181c71a0d9914fc923023f65fb

Create slab caches for the high-level structures that coordinate
deferred intent items, since they're used fairly heavily.

Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Chandan Babu R <chandan.babu@oracle.com>
Signed-off-by: Eric Sandeen <sandeen@sandeen.net>
libxfs/defer_item.c
libxfs/init.c
libxfs/xfs_bmap.c
libxfs/xfs_bmap.h
libxfs/xfs_defer.c
libxfs/xfs_defer.h
libxfs/xfs_refcount.c
libxfs/xfs_refcount.h
libxfs/xfs_rmap.c
libxfs/xfs_rmap.h

index 1412d08907d2cf7ce8ddfc63a1cb71b593989bdb..1277469f2e0fe396957fc9ad9e9a2a0553f451b2 100644 (file)
@@ -216,7 +216,7 @@ xfs_rmap_update_finish_item(
                        rmap->ri_bmap.br_blockcount,
                        rmap->ri_bmap.br_state,
                        state);
-       kmem_free(rmap);
+       kmem_cache_free(xfs_rmap_intent_cache, rmap);
        return error;
 }
 
@@ -235,7 +235,7 @@ xfs_rmap_update_cancel_item(
        struct xfs_rmap_intent          *rmap;
 
        rmap = container_of(item, struct xfs_rmap_intent, ri_list);
-       kmem_free(rmap);
+       kmem_cache_free(xfs_rmap_intent_cache, rmap);
 }
 
 const struct xfs_defer_op_type xfs_rmap_update_defer_type = {
@@ -319,7 +319,7 @@ xfs_refcount_update_finish_item(
                refc->ri_blockcount = new_aglen;
                return -EAGAIN;
        }
-       kmem_free(refc);
+       kmem_cache_free(xfs_refcount_intent_cache, refc);
        return error;
 }
 
@@ -338,7 +338,7 @@ xfs_refcount_update_cancel_item(
        struct xfs_refcount_intent      *refc;
 
        refc = container_of(item, struct xfs_refcount_intent, ri_list);
-       kmem_free(refc);
+       kmem_cache_free(xfs_refcount_intent_cache, refc);
 }
 
 const struct xfs_defer_op_type xfs_refcount_update_defer_type = {
@@ -418,7 +418,7 @@ xfs_bmap_update_finish_item(
                bmap->bi_bmap.br_blockcount = count;
                return -EAGAIN;
        }
-       kmem_free(bmap);
+       kmem_cache_free(xfs_bmap_intent_cache, bmap);
        return error;
 }
 
@@ -437,7 +437,7 @@ xfs_bmap_update_cancel_item(
        struct xfs_bmap_intent          *bmap;
 
        bmap = container_of(item, struct xfs_bmap_intent, bi_list);
-       kmem_free(bmap);
+       kmem_cache_free(xfs_bmap_intent_cache, bmap);
 }
 
 const struct xfs_defer_op_type xfs_bmap_update_defer_type = {
index e258f938d630364649735de3ce64d7b9bb5852ce..2e574aa42f749e3bb467efb58f449e9246244778 100644 (file)
@@ -241,6 +241,11 @@ init_caches(void)
                        sizeof(struct xfs_inode_log_item),"xfs_inode_log_item");
        xfs_buf_item_cache = kmem_cache_init(
                        sizeof(struct xfs_buf_log_item), "xfs_buf_log_item");
+       error = xfs_defer_init_item_caches();
+       if (error) {
+               fprintf(stderr, "Could not allocate defer init item caches.\n");
+               abort();
+       }
        xfs_da_state_cache = kmem_cache_init(
                        sizeof(struct xfs_da_state), "xfs_da_state");
        error = xfs_btree_init_cur_caches();
@@ -266,6 +271,7 @@ destroy_caches(void)
        leaked += kmem_cache_destroy(xfs_ifork_cache);
        leaked += kmem_cache_destroy(xfs_buf_item_cache);
        leaked += kmem_cache_destroy(xfs_da_state_cache);
+       xfs_defer_destroy_item_caches();
        xfs_btree_destroy_cur_caches();
        leaked += kmem_cache_destroy(xfs_bmap_free_item_cache);
        leaked += kmem_cache_destroy(xfs_trans_cache);
index 0514d6e5c150f02226a4207394458733416736f8..c261d11977e0f7ff8d7c6fe92b57162948b87621 100644 (file)
@@ -30,7 +30,7 @@
 #include "xfs_ag_resv.h"
 #include "xfs_refcount.h"
 
-
+struct kmem_cache              *xfs_bmap_intent_cache;
 struct kmem_cache              *xfs_bmap_free_item_cache;
 
 /*
@@ -6183,7 +6183,7 @@ __xfs_bmap_add(
                        bmap->br_blockcount,
                        bmap->br_state);
 
-       bi = kmem_alloc(sizeof(struct xfs_bmap_intent), KM_NOFS);
+       bi = kmem_cache_alloc(xfs_bmap_intent_cache, GFP_NOFS | __GFP_NOFAIL);
        INIT_LIST_HEAD(&bi->bi_list);
        bi->bi_type = type;
        bi->bi_owner = ip;
@@ -6294,3 +6294,20 @@ xfs_bmap_validate_extent(
                return __this_address;
        return NULL;
 }
+
+int __init
+xfs_bmap_intent_init_cache(void)
+{
+       xfs_bmap_intent_cache = kmem_cache_create("xfs_bmap_intent",
+                       sizeof(struct xfs_bmap_intent),
+                       0, 0, NULL);
+
+       return xfs_bmap_intent_cache != NULL ? 0 : -ENOMEM;
+}
+
+void
+xfs_bmap_intent_destroy_cache(void)
+{
+       kmem_cache_destroy(xfs_bmap_intent_cache);
+       xfs_bmap_intent_cache = NULL;
+}
index db01fe83bb8ae767cc816d9a9f655273b241c969..fa73a56827b1e7fef4105ec5e9927612bba2ee2c 100644 (file)
@@ -290,4 +290,9 @@ int xfs_bmapi_remap(struct xfs_trans *tp, struct xfs_inode *ip,
                xfs_fileoff_t bno, xfs_filblks_t len, xfs_fsblock_t startblock,
                int flags);
 
+extern struct kmem_cache       *xfs_bmap_intent_cache;
+
+int __init xfs_bmap_intent_init_cache(void);
+void xfs_bmap_intent_destroy_cache(void);
+
 #endif /* __XFS_BMAP_H__ */
index 40d49abc37570d1526931c16e6b05af249b00ece..f71bb055c06df4c25aa3ced55565015f232fdb70 100644 (file)
 #include "xfs_trans.h"
 #include "xfs_inode.h"
 #include "xfs_trace.h"
+#include "xfs_rmap.h"
+#include "xfs_refcount.h"
+#include "xfs_bmap.h"
+
+static struct kmem_cache       *xfs_defer_pending_cache;
 
 /*
  * Deferred Operations in XFS
@@ -361,7 +366,7 @@ xfs_defer_cancel_list(
                        ops->cancel_item(pwi);
                }
                ASSERT(dfp->dfp_count == 0);
-               kmem_free(dfp);
+               kmem_cache_free(xfs_defer_pending_cache, dfp);
        }
 }
 
@@ -458,7 +463,7 @@ xfs_defer_finish_one(
 
        /* Done with the dfp, free it. */
        list_del(&dfp->dfp_list);
-       kmem_free(dfp);
+       kmem_cache_free(xfs_defer_pending_cache, dfp);
 out:
        if (ops->finish_cleanup)
                ops->finish_cleanup(tp, state, error);
@@ -592,8 +597,8 @@ xfs_defer_add(
                        dfp = NULL;
        }
        if (!dfp) {
-               dfp = kmem_alloc(sizeof(struct xfs_defer_pending),
-                               KM_NOFS);
+               dfp = kmem_cache_zalloc(xfs_defer_pending_cache,
+                               GFP_NOFS | __GFP_NOFAIL);
                dfp->dfp_type = type;
                dfp->dfp_intent = NULL;
                dfp->dfp_done = NULL;
@@ -805,3 +810,55 @@ xfs_defer_resources_rele(
        dres->dr_bufs = 0;
        dres->dr_ordered = 0;
 }
+
+static inline int __init
+xfs_defer_init_cache(void)
+{
+       xfs_defer_pending_cache = kmem_cache_create("xfs_defer_pending",
+                       sizeof(struct xfs_defer_pending),
+                       0, 0, NULL);
+
+       return xfs_defer_pending_cache != NULL ? 0 : -ENOMEM;
+}
+
+static inline void
+xfs_defer_destroy_cache(void)
+{
+       kmem_cache_destroy(xfs_defer_pending_cache);
+       xfs_defer_pending_cache = NULL;
+}
+
+/* Set up caches for deferred work items. */
+int __init
+xfs_defer_init_item_caches(void)
+{
+       int                             error;
+
+       error = xfs_defer_init_cache();
+       if (error)
+               return error;
+       error = xfs_rmap_intent_init_cache();
+       if (error)
+               goto err;
+       error = xfs_refcount_intent_init_cache();
+       if (error)
+               goto err;
+       error = xfs_bmap_intent_init_cache();
+       if (error)
+               goto err;
+
+       return 0;
+err:
+       xfs_defer_destroy_item_caches();
+       return error;
+}
+
+/* Destroy all the deferred work item caches, if they've been allocated. */
+void
+xfs_defer_destroy_item_caches(void)
+{
+       xfs_bmap_intent_destroy_cache();
+       xfs_refcount_intent_destroy_cache();
+       xfs_rmap_intent_destroy_cache();
+       xfs_defer_destroy_cache();
+}
index 7952695c7c410c61d4f89c9b930358a317930739..7bb8a31ad65bb453b14b95c5a5a59f814665c85d 100644 (file)
@@ -122,4 +122,7 @@ void xfs_defer_ops_capture_free(struct xfs_mount *mp,
                struct xfs_defer_capture *d);
 void xfs_defer_resources_rele(struct xfs_defer_resources *dres);
 
+int __init xfs_defer_init_item_caches(void);
+void xfs_defer_destroy_item_caches(void);
+
 #endif /* __XFS_DEFER_H__ */
index 2aa64d3e23f27fbdd887292366cff4f79751308e..da3cd7d57eb7f5014b5ec1fc9ec55c373bd41249 100644 (file)
@@ -23,6 +23,8 @@
 #include "xfs_rmap.h"
 #include "xfs_ag.h"
 
+struct kmem_cache      *xfs_refcount_intent_cache;
+
 /* Allowable refcount adjustment amounts. */
 enum xfs_refc_adjust_op {
        XFS_REFCOUNT_ADJUST_INCREASE    = 1,
@@ -1234,8 +1236,8 @@ __xfs_refcount_add(
                        type, XFS_FSB_TO_AGBNO(tp->t_mountp, startblock),
                        blockcount);
 
-       ri = kmem_alloc(sizeof(struct xfs_refcount_intent),
-                       KM_NOFS);
+       ri = kmem_cache_alloc(xfs_refcount_intent_cache,
+                       GFP_NOFS | __GFP_NOFAIL);
        INIT_LIST_HEAD(&ri->ri_list);
        ri->ri_type = type;
        ri->ri_startblock = startblock;
@@ -1781,3 +1783,20 @@ xfs_refcount_has_record(
 
        return xfs_btree_has_record(cur, &low, &high, exists);
 }
+
+int __init
+xfs_refcount_intent_init_cache(void)
+{
+       xfs_refcount_intent_cache = kmem_cache_create("xfs_refc_intent",
+                       sizeof(struct xfs_refcount_intent),
+                       0, 0, NULL);
+
+       return xfs_refcount_intent_cache != NULL ? 0 : -ENOMEM;
+}
+
+void
+xfs_refcount_intent_destroy_cache(void)
+{
+       kmem_cache_destroy(xfs_refcount_intent_cache);
+       xfs_refcount_intent_cache = NULL;
+}
index 894045968bc6a1078850699bf7c0f779f19e2fff..9eb01edbd89d637f62297a9e3fb7a104aba65c31 100644 (file)
@@ -83,4 +83,9 @@ extern void xfs_refcount_btrec_to_irec(const union xfs_btree_rec *rec,
 extern int xfs_refcount_insert(struct xfs_btree_cur *cur,
                struct xfs_refcount_irec *irec, int *stat);
 
+extern struct kmem_cache       *xfs_refcount_intent_cache;
+
+int __init xfs_refcount_intent_init_cache(void);
+void xfs_refcount_intent_destroy_cache(void);
+
 #endif /* __XFS_REFCOUNT_H__ */
index e93010ff5dfe0e47edfa8ec3e5069822469143f5..d6601a6573fe52a04181015c46082c7a106f60d0 100644 (file)
@@ -23,6 +23,8 @@
 #include "xfs_inode.h"
 #include "xfs_ag.h"
 
+struct kmem_cache      *xfs_rmap_intent_cache;
+
 /*
  * Lookup the first record less than or equal to [bno, len, owner, offset]
  * in the btree given by cur.
@@ -2484,7 +2486,7 @@ __xfs_rmap_add(
                        bmap->br_blockcount,
                        bmap->br_state);
 
-       ri = kmem_alloc(sizeof(struct xfs_rmap_intent), KM_NOFS);
+       ri = kmem_cache_alloc(xfs_rmap_intent_cache, GFP_NOFS | __GFP_NOFAIL);
        INIT_LIST_HEAD(&ri->ri_list);
        ri->ri_type = type;
        ri->ri_owner = owner;
@@ -2778,3 +2780,20 @@ const struct xfs_owner_info XFS_RMAP_OINFO_REFC = {
 const struct xfs_owner_info XFS_RMAP_OINFO_COW = {
        .oi_owner = XFS_RMAP_OWN_COW,
 };
+
+int __init
+xfs_rmap_intent_init_cache(void)
+{
+       xfs_rmap_intent_cache = kmem_cache_create("xfs_rmap_intent",
+                       sizeof(struct xfs_rmap_intent),
+                       0, 0, NULL);
+
+       return xfs_rmap_intent_cache != NULL ? 0 : -ENOMEM;
+}
+
+void
+xfs_rmap_intent_destroy_cache(void)
+{
+       kmem_cache_destroy(xfs_rmap_intent_cache);
+       xfs_rmap_intent_cache = NULL;
+}
index 85dd98ac3f12f6030ed2875c4f3add21caca8fba..b718ebeda372d67191801c39246b5198d798f7ab 100644 (file)
@@ -215,4 +215,9 @@ extern const struct xfs_owner_info XFS_RMAP_OINFO_INODES;
 extern const struct xfs_owner_info XFS_RMAP_OINFO_REFC;
 extern const struct xfs_owner_info XFS_RMAP_OINFO_COW;
 
+extern struct kmem_cache       *xfs_rmap_intent_cache;
+
+int __init xfs_rmap_intent_init_cache(void);
+void xfs_rmap_intent_destroy_cache(void);
+
 #endif /* __XFS_RMAP_H__ */