]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
RDMA/srpt: Make slab cache names unique
authorBart Van Assche <bvanassche@acm.org>
Wed, 9 Oct 2024 21:00:48 +0000 (14:00 -0700)
committerJason Gunthorpe <jgg@nvidia.com>
Fri, 11 Oct 2024 17:07:33 +0000 (14:07 -0300)
Since commit 4c39529663b9 ("slab: Warn on duplicate cache names when
DEBUG_VM=y"), slab complains about duplicate cache names. Hence this
patch. The approach is as follows:
- Maintain an xarray with the slab size as index and a reference count
  and a kmem_cache pointer as contents. Use srpt-${slab_size} as kmem
  cache name.
- Use 512-byte alignment for all slabs instead of only for some of the
  slabs.
- Increment the reference count instead of calling kmem_cache_create().
- Decrement the reference count instead of calling kmem_cache_destroy().

Fixes: 5dabcd0456d7 ("RDMA/srpt: Add support for immediate data")
Link: https://patch.msgid.link/r/20241009210048.4122518-1-bvanassche@acm.org
Reported-by: Shinichiro Kawasaki <shinichiro.kawasaki@wdc.com>
Closes: https://lore.kernel.org/linux-block/xpe6bea7rakpyoyfvspvin2dsozjmjtjktpph7rep3h25tv7fb@ooz4cu5z6bq6/
Suggested-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Tested-by: Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/ulp/srpt/ib_srpt.c

index 9632afbd727b64b0ddf0072ab8ce692588ad9803..5dfb4644446ba83f52dcafbc32a0fda1c886be8f 100644 (file)
@@ -68,6 +68,8 @@ MODULE_LICENSE("Dual BSD/GPL");
 static u64 srpt_service_guid;
 static DEFINE_SPINLOCK(srpt_dev_lock); /* Protects srpt_dev_list. */
 static LIST_HEAD(srpt_dev_list);       /* List of srpt_device structures. */
+static DEFINE_MUTEX(srpt_mc_mutex);    /* Protects srpt_memory_caches. */
+static DEFINE_XARRAY(srpt_memory_caches); /* See also srpt_memory_cache_entry */
 
 static unsigned srp_max_req_size = DEFAULT_MAX_REQ_SIZE;
 module_param(srp_max_req_size, int, 0444);
@@ -105,6 +107,63 @@ static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc);
 static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc);
 static void srpt_process_wait_list(struct srpt_rdma_ch *ch);
 
+/* Type of the entries in srpt_memory_caches. */
+struct srpt_memory_cache_entry {
+       refcount_t ref;
+       struct kmem_cache *c;
+};
+
+static struct kmem_cache *srpt_cache_get(unsigned int object_size)
+{
+       struct srpt_memory_cache_entry *e;
+       char name[32];
+       void *res;
+
+       guard(mutex)(&srpt_mc_mutex);
+       e = xa_load(&srpt_memory_caches, object_size);
+       if (e) {
+               refcount_inc(&e->ref);
+               return e->c;
+       }
+       snprintf(name, sizeof(name), "srpt-%u", object_size);
+       e = kmalloc(sizeof(*e), GFP_KERNEL);
+       if (!e)
+               return NULL;
+       refcount_set(&e->ref, 1);
+       e->c = kmem_cache_create(name, object_size, /*align=*/512, 0, NULL);
+       if (!e->c)
+               goto free_entry;
+       res = xa_store(&srpt_memory_caches, object_size, e, GFP_KERNEL);
+       if (xa_is_err(res))
+               goto destroy_cache;
+       return e->c;
+
+destroy_cache:
+       kmem_cache_destroy(e->c);
+
+free_entry:
+       kfree(e);
+       return NULL;
+}
+
+static void srpt_cache_put(struct kmem_cache *c)
+{
+       struct srpt_memory_cache_entry *e = NULL;
+       unsigned long object_size;
+
+       guard(mutex)(&srpt_mc_mutex);
+       xa_for_each(&srpt_memory_caches, object_size, e)
+               if (e->c == c)
+                       break;
+       if (WARN_ON_ONCE(!e))
+               return;
+       if (!refcount_dec_and_test(&e->ref))
+               return;
+       WARN_ON_ONCE(xa_erase(&srpt_memory_caches, object_size) != e);
+       kmem_cache_destroy(e->c);
+       kfree(e);
+}
+
 /*
  * The only allowed channel state changes are those that change the channel
  * state into a state with a higher numerical value. Hence the new > prev test.
@@ -2119,13 +2178,13 @@ static void srpt_release_channel_work(struct work_struct *w)
                             ch->sport->sdev, ch->rq_size,
                             ch->rsp_buf_cache, DMA_TO_DEVICE);
 
-       kmem_cache_destroy(ch->rsp_buf_cache);
+       srpt_cache_put(ch->rsp_buf_cache);
 
        srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring,
                             sdev, ch->rq_size,
                             ch->req_buf_cache, DMA_FROM_DEVICE);
 
-       kmem_cache_destroy(ch->req_buf_cache);
+       srpt_cache_put(ch->req_buf_cache);
 
        kref_put(&ch->kref, srpt_free_ch);
 }
@@ -2245,8 +2304,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
        INIT_LIST_HEAD(&ch->cmd_wait_list);
        ch->max_rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
 
-       ch->rsp_buf_cache = kmem_cache_create("srpt-rsp-buf", ch->max_rsp_size,
-                                             512, 0, NULL);
+       ch->rsp_buf_cache = srpt_cache_get(ch->max_rsp_size);
        if (!ch->rsp_buf_cache)
                goto free_ch;
 
@@ -2280,8 +2338,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
                alignment_offset = round_up(imm_data_offset, 512) -
                        imm_data_offset;
                req_sz = alignment_offset + imm_data_offset + srp_max_req_size;
-               ch->req_buf_cache = kmem_cache_create("srpt-req-buf", req_sz,
-                                                     512, 0, NULL);
+               ch->req_buf_cache = srpt_cache_get(req_sz);
                if (!ch->req_buf_cache)
                        goto free_rsp_ring;
 
@@ -2478,7 +2535,7 @@ free_recv_ring:
                             ch->req_buf_cache, DMA_FROM_DEVICE);
 
 free_recv_cache:
-       kmem_cache_destroy(ch->req_buf_cache);
+       srpt_cache_put(ch->req_buf_cache);
 
 free_rsp_ring:
        srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
@@ -2486,7 +2543,7 @@ free_rsp_ring:
                             ch->rsp_buf_cache, DMA_TO_DEVICE);
 
 free_rsp_cache:
-       kmem_cache_destroy(ch->rsp_buf_cache);
+       srpt_cache_put(ch->rsp_buf_cache);
 
 free_ch:
        if (rdma_cm_id)
@@ -3055,7 +3112,7 @@ static void srpt_free_srq(struct srpt_device *sdev)
        srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
                             sdev->srq_size, sdev->req_buf_cache,
                             DMA_FROM_DEVICE);
-       kmem_cache_destroy(sdev->req_buf_cache);
+       srpt_cache_put(sdev->req_buf_cache);
        sdev->srq = NULL;
 }
 
@@ -3082,8 +3139,7 @@ static int srpt_alloc_srq(struct srpt_device *sdev)
        pr_debug("create SRQ #wr= %d max_allow=%d dev= %s\n", sdev->srq_size,
                 sdev->device->attrs.max_srq_wr, dev_name(&device->dev));
 
-       sdev->req_buf_cache = kmem_cache_create("srpt-srq-req-buf",
-                                               srp_max_req_size, 0, 0, NULL);
+       sdev->req_buf_cache = srpt_cache_get(srp_max_req_size);
        if (!sdev->req_buf_cache)
                goto free_srq;
 
@@ -3105,7 +3161,7 @@ static int srpt_alloc_srq(struct srpt_device *sdev)
        return 0;
 
 free_cache:
-       kmem_cache_destroy(sdev->req_buf_cache);
+       srpt_cache_put(sdev->req_buf_cache);
 
 free_srq:
        ib_destroy_srq(srq);