]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
net: add skb_defer_disable_key static key
authorEric Dumazet <edumazet@google.com>
Wed, 11 Mar 2026 19:13:40 +0000 (19:13 +0000)
committerJakub Kicinski <kuba@kernel.org>
Fri, 13 Mar 2026 02:25:33 +0000 (19:25 -0700)
Add a static key to bypass skb_attempt_defer_free() steps
if net.core.skb_defer_max is set to zero.

Main benefit is the atomic_long_inc_return() avoidance.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Link: https://patch.msgid.link/20260311191340.1996888-1-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
net/core/net-sysfs.h
net/core/skbuff.c
net/core/sysctl_net_core.c

index e938f25e8e86f9dfd8f710a08922c4cabf662c2e..38e2e3ffd0bdc4ccea2f6c7636c1bbc7d0f46af5 100644 (file)
@@ -13,4 +13,5 @@ int netdev_change_owner(struct net_device *, const struct net *net_old,
 
 extern struct mutex rps_default_mask_mutex;
 
+DECLARE_STATIC_KEY_FALSE(skb_defer_disable_key);
 #endif
index 513cbfed19bc34bbb6767cdd7a50dad68be430fb..3d6978dd0aa83f63984b994359d0c914c6427a00 100644 (file)
@@ -7256,6 +7256,8 @@ static void kfree_skb_napi_cache(struct sk_buff *skb)
        local_bh_enable();
 }
 
+DEFINE_STATIC_KEY_FALSE(skb_defer_disable_key);
+
 /**
  * skb_attempt_defer_free - queue skb for remote freeing
  * @skb: buffer
@@ -7272,6 +7274,9 @@ void skb_attempt_defer_free(struct sk_buff *skb)
        bool kick;
        int cpu;
 
+       if (static_branch_unlikely(&skb_defer_disable_key))
+               goto nodefer;
+
        /* zero copy notifications should not be delayed. */
        if (skb_zcopy(skb))
                goto nodefer;
index 502705e0464981ecfc32233d22c747e14b3febf7..b508618bfc12393ba926ebf5a2dd4ea73ef03ee8 100644 (file)
@@ -349,6 +349,29 @@ static int proc_do_rss_key(const struct ctl_table *table, int write,
        return proc_dostring(&fake_table, write, buffer, lenp, ppos);
 }
 
+static int proc_do_skb_defer_max(const struct ctl_table *table, int write,
+                void *buffer, size_t *lenp, loff_t *ppos)
+{
+       static DEFINE_MUTEX(skb_defer_max_mutex);
+       int ret, oval, nval;
+
+       mutex_lock(&skb_defer_max_mutex);
+
+       oval = !net_hotdata.sysctl_skb_defer_max;
+       ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+       nval = !net_hotdata.sysctl_skb_defer_max;
+
+       if (nval != oval) {
+               if (nval)
+                       static_branch_enable(&skb_defer_disable_key);
+               else
+                       static_branch_disable(&skb_defer_disable_key);
+       }
+
+       mutex_unlock(&skb_defer_max_mutex);
+       return ret;
+}
+
 #ifdef CONFIG_BPF_JIT
 static int proc_dointvec_minmax_bpf_enable(const struct ctl_table *table, int write,
                                           void *buffer, size_t *lenp,
@@ -650,7 +673,7 @@ static struct ctl_table net_core_table[] = {
                .data           = &net_hotdata.sysctl_skb_defer_max,
                .maxlen         = sizeof(unsigned int),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec_minmax,
+               .proc_handler   = proc_do_skb_defer_max,
                .extra1         = SYSCTL_ZERO,
        },
 };