From c75734b585c61411fb5a12daedcbf4dbb9e7043d Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 22 Jan 2026 04:57:16 +0000 Subject: [PATCH] net: always inline skb_frag_unref() and __skb_frag_unref() clang is not inlining skb_frag_unref() and __skb_frag_unref() in gro fast path. It also does not inline gro_try_pull_from_frag0(). Using __always_inline fixes this issue, makes the kernel faster _and_ smaller. Also change __skb_frag_ref(), skb_frag_ref() and skb_page_unref() to let them inlined for the last patch in this series. $ scripts/bloat-o-meter -t vmlinux.0 vmlinux.1 add/remove: 2/6 grow/shrink: 1/2 up/down: 218/-511 (-293) Function old new delta gro_pull_from_frag0 - 188 +188 __pfx_gro_pull_from_frag0 - 16 +16 skb_shift 1125 1139 +14 __pfx_skb_frag_unref 16 - -16 __pfx_gro_try_pull_from_frag0 16 - -16 __pfx___skb_frag_unref 16 - -16 __skb_frag_unref 36 - -36 skb_frag_unref 59 - -59 dev_gro_receive 1608 1514 -94 napi_gro_frags 892 771 -121 gro_try_pull_from_frag0 153 - -153 Total: Before=22566192, After=22565899, chg -0.00% Signed-off-by: Eric Dumazet Link: https://patch.msgid.link/20260122045720.1221017-2-edumazet@google.com Signed-off-by: Jakub Kicinski --- include/linux/skbuff_ref.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/include/linux/skbuff_ref.h b/include/linux/skbuff_ref.h index 9e49372ef1a05..05c8486bafac8 100644 --- a/include/linux/skbuff_ref.h +++ b/include/linux/skbuff_ref.h @@ -15,7 +15,7 @@ * * Takes an additional reference on the paged fragment @frag. */ -static inline void __skb_frag_ref(skb_frag_t *frag) +static __always_inline void __skb_frag_ref(skb_frag_t *frag) { get_netmem(skb_frag_netmem(frag)); } @@ -27,14 +27,14 @@ static inline void __skb_frag_ref(skb_frag_t *frag) * * Takes an additional reference on the @f'th paged fragment of @skb. */ -static inline void skb_frag_ref(struct sk_buff *skb, int f) +static __always_inline void skb_frag_ref(struct sk_buff *skb, int f) { __skb_frag_ref(&skb_shinfo(skb)->frags[f]); } bool napi_pp_put_page(netmem_ref netmem); -static inline void skb_page_unref(netmem_ref netmem, bool recycle) +static __always_inline void skb_page_unref(netmem_ref netmem, bool recycle) { #ifdef CONFIG_PAGE_POOL if (recycle && napi_pp_put_page(netmem)) @@ -51,7 +51,7 @@ static inline void skb_page_unref(netmem_ref netmem, bool recycle) * Releases a reference on the paged fragment @frag * or recycles the page via the page_pool API. */ -static inline void __skb_frag_unref(skb_frag_t *frag, bool recycle) +static __always_inline void __skb_frag_unref(skb_frag_t *frag, bool recycle) { skb_page_unref(skb_frag_netmem(frag), recycle); } @@ -63,7 +63,7 @@ static inline void __skb_frag_unref(skb_frag_t *frag, bool recycle) * * Releases a reference on the @f'th paged fragment of @skb. */ -static inline void skb_frag_unref(struct sk_buff *skb, int f) +static __always_inline void skb_frag_unref(struct sk_buff *skb, int f) { struct skb_shared_info *shinfo = skb_shinfo(skb); -- 2.47.3