From: Kuniyuki Iwashima Date: Thu, 5 Sep 2024 19:32:39 +0000 (-0700) Subject: af_unix: Move spin_lock() in manage_oob(). X-Git-Tag: v6.12-rc1~232^2~66^2~1 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=a0264a9f51fe0d196f22efd7538eb749e3448c2d;p=thirdparty%2Fkernel%2Flinux.git af_unix: Move spin_lock() in manage_oob(). When OOB skb has been already consumed, manage_oob() returns the next skb if exists. In such a case, we need to fall back to the else branch below. Then, we want to keep holding spin_lock(&sk->sk_receive_queue.lock). Let's move it out of if-else branch and add lightweight check before spin_lock() for major use cases without OOB skb. Signed-off-by: Kuniyuki Iwashima Link: https://patch.msgid.link/20240905193240.17565-4-kuniyu@amazon.com Signed-off-by: Jakub Kicinski --- diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 91d7877a10794..159d78fc3d14d 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -2657,9 +2657,12 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk, struct sk_buff *read_skb = NULL, *unread_skb = NULL; struct unix_sock *u = unix_sk(sk); - if (!unix_skb_len(skb)) { - spin_lock(&sk->sk_receive_queue.lock); + if (likely(unix_skb_len(skb) && skb != READ_ONCE(u->oob_skb))) + return skb; + spin_lock(&sk->sk_receive_queue.lock); + + if (!unix_skb_len(skb)) { if (copied && (!u->oob_skb || skb == u->oob_skb)) { skb = NULL; } else if (flags & MSG_PEEK) { @@ -2670,14 +2673,9 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk, __skb_unlink(read_skb, &sk->sk_receive_queue); } - spin_unlock(&sk->sk_receive_queue.lock); - - consume_skb(read_skb); - return skb; + goto unlock; } - spin_lock(&sk->sk_receive_queue.lock); - if (skb != u->oob_skb) goto unlock; @@ -2698,6 +2696,7 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk, unlock: spin_unlock(&sk->sk_receive_queue.lock); + consume_skb(read_skb); kfree_skb(unread_skb); return skb;