]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
page_pool: rename page_pool_return_page() to page_pool_return_netmem()
authorByungchul Park <byungchul@sk.com>
Wed, 2 Jul 2025 05:32:52 +0000 (14:32 +0900)
committerJakub Kicinski <kuba@kernel.org>
Tue, 8 Jul 2025 01:40:08 +0000 (18:40 -0700)
Now that page_pool_return_page() is for returning netmem, not struct
page, rename it to page_pool_return_netmem() to reflect what it does.

Signed-off-by: Byungchul Park <byungchul@sk.com>
Reviewed-by: Mina Almasry <almasrymina@google.com>
Reviewed-by: Toke Høiland-Jørgensen <toke@redhat.com>
Reviewed-by: Pavel Begunkov <asml.silence@gmail.com>
Reviewed-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
Link: https://patch.msgid.link/20250702053256.4594-2-byungchul@sk.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
net/core/page_pool.c

index ba7cf3e3c32fdc9fe0b5576016dd19cd477a633e..3bf25e554f96a4034ebaf98a17b62525f317403c 100644 (file)
@@ -371,7 +371,7 @@ struct page_pool *page_pool_create(const struct page_pool_params *params)
 }
 EXPORT_SYMBOL(page_pool_create);
 
-static void page_pool_return_page(struct page_pool *pool, netmem_ref netmem);
+static void page_pool_return_netmem(struct page_pool *pool, netmem_ref netmem);
 
 static noinline netmem_ref page_pool_refill_alloc_cache(struct page_pool *pool)
 {
@@ -409,7 +409,7 @@ static noinline netmem_ref page_pool_refill_alloc_cache(struct page_pool *pool)
                         * (2) break out to fallthrough to alloc_pages_node.
                         * This limit stress on page buddy alloactor.
                         */
-                       page_pool_return_page(pool, netmem);
+                       page_pool_return_netmem(pool, netmem);
                        alloc_stat_inc(pool, waive);
                        netmem = 0;
                        break;
@@ -712,7 +712,7 @@ static __always_inline void __page_pool_release_page_dma(struct page_pool *pool,
  * a regular page (that will eventually be returned to the normal
  * page-allocator via put_page).
  */
-void page_pool_return_page(struct page_pool *pool, netmem_ref netmem)
+static void page_pool_return_netmem(struct page_pool *pool, netmem_ref netmem)
 {
        int count;
        bool put;
@@ -826,7 +826,7 @@ __page_pool_put_page(struct page_pool *pool, netmem_ref netmem,
         * will be invoking put_page.
         */
        recycle_stat_inc(pool, released_refcnt);
-       page_pool_return_page(pool, netmem);
+       page_pool_return_netmem(pool, netmem);
 
        return 0;
 }
@@ -869,7 +869,7 @@ void page_pool_put_unrefed_netmem(struct page_pool *pool, netmem_ref netmem,
        if (netmem && !page_pool_recycle_in_ring(pool, netmem)) {
                /* Cache full, fallback to free pages */
                recycle_stat_inc(pool, ring_full);
-               page_pool_return_page(pool, netmem);
+               page_pool_return_netmem(pool, netmem);
        }
 }
 EXPORT_SYMBOL(page_pool_put_unrefed_netmem);
@@ -912,7 +912,7 @@ static void page_pool_recycle_ring_bulk(struct page_pool *pool,
         * since put_page() with refcnt == 1 can be an expensive operation.
         */
        for (; i < bulk_len; i++)
-               page_pool_return_page(pool, bulk[i]);
+               page_pool_return_netmem(pool, bulk[i]);
 }
 
 /**
@@ -995,7 +995,7 @@ static netmem_ref page_pool_drain_frag(struct page_pool *pool,
                return netmem;
        }
 
-       page_pool_return_page(pool, netmem);
+       page_pool_return_netmem(pool, netmem);
        return 0;
 }
 
@@ -1009,7 +1009,7 @@ static void page_pool_free_frag(struct page_pool *pool)
        if (!netmem || page_pool_unref_netmem(netmem, drain_count))
                return;
 
-       page_pool_return_page(pool, netmem);
+       page_pool_return_netmem(pool, netmem);
 }
 
 netmem_ref page_pool_alloc_frag_netmem(struct page_pool *pool,
@@ -1076,7 +1076,7 @@ static void page_pool_empty_ring(struct page_pool *pool)
                        pr_crit("%s() page_pool refcnt %d violation\n",
                                __func__, netmem_ref_count(netmem));
 
-               page_pool_return_page(pool, netmem);
+               page_pool_return_netmem(pool, netmem);
        }
 }
 
@@ -1109,7 +1109,7 @@ static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
         */
        while (pool->alloc.count) {
                netmem = pool->alloc.cache[--pool->alloc.count];
-               page_pool_return_page(pool, netmem);
+               page_pool_return_netmem(pool, netmem);
        }
 }
 
@@ -1253,7 +1253,7 @@ void page_pool_update_nid(struct page_pool *pool, int new_nid)
        /* Flush pool alloc cache, as refill will check NUMA node */
        while (pool->alloc.count) {
                netmem = pool->alloc.cache[--pool->alloc.count];
-               page_pool_return_page(pool, netmem);
+               page_pool_return_netmem(pool, netmem);
        }
 }
 EXPORT_SYMBOL(page_pool_update_nid);