]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
net: mark deliver_skb() as unlikely and not inlined
authorEric Dumazet <edumazet@google.com>
Mon, 3 Nov 2025 16:52:56 +0000 (16:52 +0000)
committerJakub Kicinski <kuba@kernel.org>
Wed, 5 Nov 2025 00:08:25 +0000 (16:08 -0800)
deliver_skb() should not be inlined as is it not called
in the fast path.

Add unlikely() clauses giving hints to the compiler about this fact.

Before this patch:

size net/core/dev.o
   text    data     bss     dec     hex filename
 121794   13330     176  135300   21084 net/core/dev.o

__netif_receive_skb_core() size on x86_64 : 4080 bytes.

After:

size net/core/dev.o
  text    data     bss     dec     hex filenamee
 120330   13338     176  133844   20ad4 net/core/dev.o

__netif_receive_skb_core() size on x86_64 : 2781 bytes.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Link: https://patch.msgid.link/20251103165256.1712169-1-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
net/core/dev.c

index 2c1de5fb97d93c8711797168249d7e4f8a2ae0a3..ba39146bbd25f7a5920e3de58df4915853e196de 100644 (file)
@@ -2463,9 +2463,9 @@ int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb)
        return __dev_forward_skb2(dev, skb, false) ?: netif_rx_internal(skb);
 }
 
-static inline int deliver_skb(struct sk_buff *skb,
-                             struct packet_type *pt_prev,
-                             struct net_device *orig_dev)
+static int deliver_skb(struct sk_buff *skb,
+                      struct packet_type *pt_prev,
+                      struct net_device *orig_dev)
 {
        if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
                return -ENOMEM;
@@ -2484,7 +2484,7 @@ static inline void deliver_ptype_list_skb(struct sk_buff *skb,
        list_for_each_entry_rcu(ptype, ptype_list, list) {
                if (ptype->type != type)
                        continue;
-               if (pt_prev)
+               if (unlikely(pt_prev))
                        deliver_skb(skb, pt_prev, orig_dev);
                pt_prev = ptype;
        }
@@ -2545,7 +2545,7 @@ again:
                if (skb_loop_sk(ptype, skb))
                        continue;
 
-               if (pt_prev) {
+               if (unlikely(pt_prev)) {
                        deliver_skb(skb2, pt_prev, skb->dev);
                        pt_prev = ptype;
                        continue;
@@ -4421,7 +4421,7 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
                return skb;
 
        bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
-       if (*pt_prev) {
+       if (unlikely(*pt_prev)) {
                *ret = deliver_skb(skb, *pt_prev, orig_dev);
                *pt_prev = NULL;
        }
@@ -5883,7 +5883,7 @@ static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
        if (nf_hook_ingress_active(skb)) {
                int ingress_retval;
 
-               if (*pt_prev) {
+               if (unlikely(*pt_prev)) {
                        *ret = deliver_skb(skb, *pt_prev, orig_dev);
                        *pt_prev = NULL;
                }
@@ -5960,13 +5960,13 @@ another_round:
 
        list_for_each_entry_rcu(ptype, &dev_net_rcu(skb->dev)->ptype_all,
                                list) {
-               if (pt_prev)
+               if (unlikely(pt_prev))
                        ret = deliver_skb(skb, pt_prev, orig_dev);
                pt_prev = ptype;
        }
 
        list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
-               if (pt_prev)
+               if (unlikely(pt_prev))
                        ret = deliver_skb(skb, pt_prev, orig_dev);
                pt_prev = ptype;
        }
@@ -5997,7 +5997,7 @@ skip_classify:
        }
 
        if (skb_vlan_tag_present(skb)) {
-               if (pt_prev) {
+               if (unlikely(pt_prev)) {
                        ret = deliver_skb(skb, pt_prev, orig_dev);
                        pt_prev = NULL;
                }
@@ -6009,7 +6009,7 @@ skip_classify:
 
        rx_handler = rcu_dereference(skb->dev->rx_handler);
        if (rx_handler) {
-               if (pt_prev) {
+               if (unlikely(pt_prev)) {
                        ret = deliver_skb(skb, pt_prev, orig_dev);
                        pt_prev = NULL;
                }