]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
net: use NAPI_SKB_CACHE_FREE to keep 32 as default to do bulk free
authorJason Xing <kernelxing@tencent.com>
Tue, 18 Nov 2025 07:06:45 +0000 (15:06 +0800)
committerJakub Kicinski <kuba@kernel.org>
Thu, 20 Nov 2025 04:29:24 +0000 (20:29 -0800)
- Replace NAPI_SKB_CACHE_HALF with NAPI_SKB_CACHE_FREE
- Only free 32 skbs in napi_skb_cache_put()

Since the first patch adjusting NAPI_SKB_CACHE_SIZE to 128, the number
of packets to be freed in the softirq was increased from 32 to 64.
Considering a subsequent net_rx_action() calling napi_poll() a few
times can easily consume the 64 available slots and we can afford
keeping a higher value of sk_buffs in per-cpu storage, decrease
NAPI_SKB_CACHE_FREE to 32 like before. So now the logic is 1) keeping
96 skbs, 2) freeing 32 skbs at one time.

Suggested-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Jason Xing <kernelxing@tencent.com>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Alexander Lobakin <aleksander.lobakin@intel.com>
Link: https://patch.msgid.link/20251118070646.61344-4-kerneljasonxing@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
net/core/skbuff.c

index b6fe7ab85c4a9ebd92918f30a89b8003fe943749..d81ac78c32ffcfc167ea6184205bbbeccd1e2b4c 100644 (file)
@@ -225,7 +225,7 @@ static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
 
 #define NAPI_SKB_CACHE_SIZE    128
 #define NAPI_SKB_CACHE_BULK    32
-#define NAPI_SKB_CACHE_HALF    (NAPI_SKB_CACHE_SIZE / 2)
+#define NAPI_SKB_CACHE_FREE    32
 
 struct napi_alloc_cache {
        local_lock_t bh_lock;
@@ -1445,7 +1445,6 @@ void __consume_stateless_skb(struct sk_buff *skb)
 static void napi_skb_cache_put(struct sk_buff *skb)
 {
        struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
-       u32 i;
 
        if (!kasan_mempool_poison_object(skb))
                return;
@@ -1454,13 +1453,16 @@ static void napi_skb_cache_put(struct sk_buff *skb)
        nc->skb_cache[nc->skb_count++] = skb;
 
        if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
-               for (i = NAPI_SKB_CACHE_HALF; i < NAPI_SKB_CACHE_SIZE; i++)
+               u32 i, remaining = NAPI_SKB_CACHE_SIZE - NAPI_SKB_CACHE_FREE;
+
+               for (i = remaining; i < NAPI_SKB_CACHE_SIZE; i++)
                        kasan_mempool_unpoison_object(nc->skb_cache[i],
                                                skbuff_cache_size);
 
-               kmem_cache_free_bulk(net_hotdata.skbuff_cache, NAPI_SKB_CACHE_HALF,
-                                    nc->skb_cache + NAPI_SKB_CACHE_HALF);
-               nc->skb_count = NAPI_SKB_CACHE_HALF;
+               kmem_cache_free_bulk(net_hotdata.skbuff_cache,
+                                    NAPI_SKB_CACHE_FREE,
+                                    nc->skb_cache + remaining);
+               nc->skb_count = remaining;
        }
        local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
 }