]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
net: page_pool: create hooks for custom memory providers
authorPavel Begunkov <asml.silence@gmail.com>
Tue, 4 Feb 2025 21:56:15 +0000 (13:56 -0800)
committerJakub Kicinski <kuba@kernel.org>
Fri, 7 Feb 2025 00:27:30 +0000 (16:27 -0800)
A spin off from the original page pool memory providers patch by Jakub,
which allows extending page pools with custom allocators. One of such
providers is devmem TCP, and the other is io_uring zerocopy added in
following patches.

Link: https://lore.kernel.org/netdev/20230707183935.997267-7-kuba@kernel.org/
Co-developed-by: Jakub Kicinski <kuba@kernel.org> # initial mp proposal
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: David Wei <dw@davidwei.uk>
Link: https://patch.msgid.link/20250204215622.695511-5-dw@davidwei.uk
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
include/net/page_pool/memory_provider.h [new file with mode: 0644]
include/net/page_pool/types.h
net/core/devmem.c
net/core/page_pool.c

diff --git a/include/net/page_pool/memory_provider.h b/include/net/page_pool/memory_provider.h
new file mode 100644 (file)
index 0000000..e49d0a5
--- /dev/null
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NET_PAGE_POOL_MEMORY_PROVIDER_H
+#define _NET_PAGE_POOL_MEMORY_PROVIDER_H
+
+#include <net/netmem.h>
+#include <net/page_pool/types.h>
+
+struct memory_provider_ops {
+       netmem_ref (*alloc_netmems)(struct page_pool *pool, gfp_t gfp);
+       bool (*release_netmem)(struct page_pool *pool, netmem_ref netmem);
+       int (*init)(struct page_pool *pool);
+       void (*destroy)(struct page_pool *pool);
+};
+
+#endif
index 7f405672b089d380f18f77f2e4ae3a19c265cf8d..36eb57d73abc6cfc601e700ca08be20fb8281055 100644 (file)
@@ -152,8 +152,11 @@ struct page_pool_stats {
  */
 #define PAGE_POOL_FRAG_GROUP_ALIGN     (4 * sizeof(long))
 
+struct memory_provider_ops;
+
 struct pp_memory_provider_params {
        void *mp_priv;
+       const struct memory_provider_ops *mp_ops;
 };
 
 struct page_pool {
@@ -216,6 +219,7 @@ struct page_pool {
        struct ptr_ring ring;
 
        void *mp_priv;
+       const struct memory_provider_ops *mp_ops;
 
 #ifdef CONFIG_PAGE_POOL_STATS
        /* recycle stats are per-cpu to avoid locking */
index fb0dddcb4e602b4cadefb9621781d5629fa5292e..c81625ca57c6141a7df433ada89d99707c1a222f 100644 (file)
@@ -16,6 +16,7 @@
 #include <net/netdev_queues.h>
 #include <net/netdev_rx_queue.h>
 #include <net/page_pool/helpers.h>
+#include <net/page_pool/memory_provider.h>
 #include <trace/events/page_pool.h>
 
 #include "devmem.h"
@@ -27,6 +28,8 @@
 /* Protected by rtnl_lock() */
 static DEFINE_XARRAY_FLAGS(net_devmem_dmabuf_bindings, XA_FLAGS_ALLOC1);
 
+static const struct memory_provider_ops dmabuf_devmem_ops;
+
 static void net_devmem_dmabuf_free_chunk_owner(struct gen_pool *genpool,
                                               struct gen_pool_chunk *chunk,
                                               void *not_used)
@@ -118,6 +121,7 @@ void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
                WARN_ON(rxq->mp_params.mp_priv != binding);
 
                rxq->mp_params.mp_priv = NULL;
+               rxq->mp_params.mp_ops = NULL;
 
                rxq_idx = get_netdev_rx_queue_index(rxq);
 
@@ -153,7 +157,7 @@ int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
        }
 
        rxq = __netif_get_rx_queue(dev, rxq_idx);
-       if (rxq->mp_params.mp_priv) {
+       if (rxq->mp_params.mp_ops) {
                NL_SET_ERR_MSG(extack, "designated queue already memory provider bound");
                return -EEXIST;
        }
@@ -171,6 +175,7 @@ int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
                return err;
 
        rxq->mp_params.mp_priv = binding;
+       rxq->mp_params.mp_ops = &dmabuf_devmem_ops;
 
        err = netdev_rx_queue_restart(dev, rxq_idx);
        if (err)
@@ -180,6 +185,7 @@ int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
 
 err_xa_erase:
        rxq->mp_params.mp_priv = NULL;
+       rxq->mp_params.mp_ops = NULL;
        xa_erase(&binding->bound_rxqs, xa_idx);
 
        return err;
@@ -399,3 +405,10 @@ bool mp_dmabuf_devmem_release_page(struct page_pool *pool, netmem_ref netmem)
        /* We don't want the page pool put_page()ing our net_iovs. */
        return false;
 }
+
+static const struct memory_provider_ops dmabuf_devmem_ops = {
+       .init                   = mp_dmabuf_devmem_init,
+       .destroy                = mp_dmabuf_devmem_destroy,
+       .alloc_netmems          = mp_dmabuf_devmem_alloc_netmems,
+       .release_netmem         = mp_dmabuf_devmem_release_page,
+};
index f5e908c9e7ad8fb47e8bddf4aefd43a3134b41ee..d632cf2c91c35b82538607c38cf00e9b57b0d061 100644 (file)
@@ -13,6 +13,7 @@
 
 #include <net/netdev_rx_queue.h>
 #include <net/page_pool/helpers.h>
+#include <net/page_pool/memory_provider.h>
 #include <net/xdp.h>
 
 #include <linux/dma-direction.h>
@@ -285,13 +286,19 @@ static int page_pool_init(struct page_pool *pool,
                rxq = __netif_get_rx_queue(pool->slow.netdev,
                                           pool->slow.queue_idx);
                pool->mp_priv = rxq->mp_params.mp_priv;
+               pool->mp_ops = rxq->mp_params.mp_ops;
        }
 
-       if (pool->mp_priv) {
+       if (pool->mp_ops) {
                if (!pool->dma_map || !pool->dma_sync)
                        return -EOPNOTSUPP;
 
-               err = mp_dmabuf_devmem_init(pool);
+               if (WARN_ON(!is_kernel_rodata((unsigned long)pool->mp_ops))) {
+                       err = -EFAULT;
+                       goto free_ptr_ring;
+               }
+
+               err = pool->mp_ops->init(pool);
                if (err) {
                        pr_warn("%s() mem-provider init failed %d\n", __func__,
                                err);
@@ -587,8 +594,8 @@ netmem_ref page_pool_alloc_netmems(struct page_pool *pool, gfp_t gfp)
                return netmem;
 
        /* Slow-path: cache empty, do real allocation */
-       if (static_branch_unlikely(&page_pool_mem_providers) && pool->mp_priv)
-               netmem = mp_dmabuf_devmem_alloc_netmems(pool, gfp);
+       if (static_branch_unlikely(&page_pool_mem_providers) && pool->mp_ops)
+               netmem = pool->mp_ops->alloc_netmems(pool, gfp);
        else
                netmem = __page_pool_alloc_pages_slow(pool, gfp);
        return netmem;
@@ -679,8 +686,8 @@ void page_pool_return_page(struct page_pool *pool, netmem_ref netmem)
        bool put;
 
        put = true;
-       if (static_branch_unlikely(&page_pool_mem_providers) && pool->mp_priv)
-               put = mp_dmabuf_devmem_release_page(pool, netmem);
+       if (static_branch_unlikely(&page_pool_mem_providers) && pool->mp_ops)
+               put = pool->mp_ops->release_netmem(pool, netmem);
        else
                __page_pool_release_page_dma(pool, netmem);
 
@@ -1048,8 +1055,8 @@ static void __page_pool_destroy(struct page_pool *pool)
        page_pool_unlist(pool);
        page_pool_uninit(pool);
 
-       if (pool->mp_priv) {
-               mp_dmabuf_devmem_destroy(pool);
+       if (pool->mp_ops) {
+               pool->mp_ops->destroy(pool);
                static_branch_dec(&page_pool_mem_providers);
        }