]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
net: __lock_sock() can be static
authorEric Dumazet <edumazet@google.com>
Mon, 23 Feb 2026 09:27:15 +0000 (09:27 +0000)
committerJakub Kicinski <kuba@kernel.org>
Wed, 25 Feb 2026 00:30:33 +0000 (16:30 -0800)
After commit 6511882cdd82 ("mptcp: allocate fwd memory separately
on the rx and tx path") __lock_sock() can be static again.

Make sure __lock_sock() is not inlined, so that lock_sock_nested()
no longer needs a stack canary.

Add a noinline attribute on lock_sock_nested() so that calls
to lock_sock() from net/core/sock.c are not inlined,
none of them are fast path to deserve that:

 - sockopt_lock_sock()
 - sock_set_reuseport()
 - sock_set_reuseaddr()
 - sock_set_mark()
 - sock_set_keepalive()
 - sock_no_linger()
 - sock_bindtoindex()
 - sk_wait_data()
 - sock_set_rcvbuf()

$ scripts/bloat-o-meter -t vmlinux.old vmlinux
add/remove: 0/0 grow/shrink: 0/3 up/down: 0/-312 (-312)
Function                                     old     new   delta
__lock_sock                                  192     188      -4
__lock_sock_fast                             239      86    -153
lock_sock_nested                             227      72    -155
Total: Before=24888707, After=24888395, chg -0.00%

Signed-off-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com>
Link: https://patch.msgid.link/20260223092716.3673939-1-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
include/net/sock.h
net/core/sock.c

index 66b56288c1d3850439b2a0bed00be801d5770efa..55b61e4b0d8318887d527e919fc1103d78ac6d14 100644 (file)
@@ -1709,7 +1709,6 @@ static inline void lock_sock(struct sock *sk)
        lock_sock_nested(sk, 0);
 }
 
-void __lock_sock(struct sock *sk);
 void __release_sock(struct sock *sk);
 void release_sock(struct sock *sk);
 
index 693e6d80f501ef552aa58928f28b78a578169536..cfb2a6209946089669882cdbd5d1b36c53838989 100644 (file)
@@ -3175,7 +3175,7 @@ bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
 }
 EXPORT_SYMBOL(sk_page_frag_refill);
 
-void __lock_sock(struct sock *sk)
+static void __lock_sock(struct sock *sk)
        __releases(&sk->sk_lock.slock)
        __acquires(&sk->sk_lock.slock)
 {
@@ -3774,14 +3774,14 @@ void sock_init_data(struct socket *sock, struct sock *sk)
 }
 EXPORT_SYMBOL(sock_init_data);
 
-void lock_sock_nested(struct sock *sk, int subclass)
+void noinline lock_sock_nested(struct sock *sk, int subclass)
 {
        /* The sk_lock has mutex_lock() semantics here. */
        mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
 
        might_sleep();
        spin_lock_bh(&sk->sk_lock.slock);
-       if (sock_owned_by_user_nocheck(sk))
+       if (unlikely(sock_owned_by_user_nocheck(sk)))
                __lock_sock(sk);
        sk->sk_lock.owned = 1;
        spin_unlock_bh(&sk->sk_lock.slock);
@@ -3810,7 +3810,7 @@ bool __lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock)
        might_sleep();
        spin_lock_bh(&sk->sk_lock.slock);
 
-       if (!sock_owned_by_user_nocheck(sk)) {
+       if (likely(!sock_owned_by_user_nocheck(sk))) {
                /*
                 * Fast path return with bottom halves disabled and
                 * sock::sk_lock.slock held.