From: Breno Leitao Date: Thu, 14 Nov 2024 11:00:11 +0000 (-0800) Subject: net: netpoll: Individualize the skb pool X-Git-Tag: v6.13-rc1~135^2~28^2~1 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=221a9c1df790fa711d65daf5ba05d0addc279153;p=thirdparty%2Flinux.git net: netpoll: Individualize the skb pool The current implementation of the netpoll system uses a global skb pool, which can lead to inefficient memory usage and waste when targets are disabled or no longer in use. This can result in a significant amount of memory being unnecessarily allocated and retained, potentially causing performance issues and limiting the availability of resources for other system components. Modify the netpoll system to assign a skb pool to each target instead of using a global one. This approach allows for more fine-grained control over memory allocation and deallocation, ensuring that resources are only allocated and retained as needed. Signed-off-by: Breno Leitao Link: https://patch.msgid.link/20241114-skb_buffers_v2-v3-1-9be9f52a8b69@debian.org Signed-off-by: Jakub Kicinski --- diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index cd4e28db0cbd7..77635b885c18b 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -32,6 +32,7 @@ struct netpoll { bool ipv6; u16 local_port, remote_port; u8 remote_mac[ETH_ALEN]; + struct sk_buff_head skb_pool; }; struct netpoll_info { diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 94b7f07a952ff..719c9aae845fb 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -45,9 +45,6 @@ #define MAX_UDP_CHUNK 1460 #define MAX_SKBS 32 - -static struct sk_buff_head skb_pool; - #define USEC_PER_POLL 50 #define MAX_SKB_SIZE \ @@ -234,20 +231,23 @@ void netpoll_poll_enable(struct net_device *dev) up(&ni->dev_lock); } -static void refill_skbs(void) +static void refill_skbs(struct netpoll *np) { + struct sk_buff_head *skb_pool; struct sk_buff *skb; unsigned long flags; - spin_lock_irqsave(&skb_pool.lock, flags); - while (skb_pool.qlen < MAX_SKBS) { + skb_pool = &np->skb_pool; + + spin_lock_irqsave(&skb_pool->lock, flags); + while (skb_pool->qlen < MAX_SKBS) { skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC); if (!skb) break; - __skb_queue_tail(&skb_pool, skb); + __skb_queue_tail(skb_pool, skb); } - spin_unlock_irqrestore(&skb_pool.lock, flags); + spin_unlock_irqrestore(&skb_pool->lock, flags); } static void zap_completion_queue(void) @@ -284,12 +284,12 @@ static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve) struct sk_buff *skb; zap_completion_queue(); - refill_skbs(); + refill_skbs(np); repeat: skb = alloc_skb(len, GFP_ATOMIC); if (!skb) - skb = skb_dequeue(&skb_pool); + skb = skb_dequeue(&np->skb_pool); if (!skb) { if (++count < 10) { @@ -673,6 +673,8 @@ int netpoll_setup(struct netpoll *np) struct in_device *in_dev; int err; + skb_queue_head_init(&np->skb_pool); + rtnl_lock(); if (np->dev_name[0]) { struct net *net = current->nsproxy->net_ns; @@ -773,7 +775,7 @@ put_noaddr: } /* fill up the skb queue */ - refill_skbs(); + refill_skbs(np); err = __netpoll_setup(np, ndev); if (err) @@ -792,13 +794,6 @@ unlock: } EXPORT_SYMBOL(netpoll_setup); -static int __init netpoll_init(void) -{ - skb_queue_head_init(&skb_pool); - return 0; -} -core_initcall(netpoll_init); - static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head) { struct netpoll_info *npinfo =