]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
tcp/dccp: complete lockless accesses to sk->sk_max_ack_backlog
authorJason Xing <kernelxing@tencent.com>
Sun, 31 Mar 2024 09:05:21 +0000 (17:05 +0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 1 Feb 2025 17:22:19 +0000 (18:22 +0100)
[ Upstream commit 9a79c65f00e2b036e17af3a3a607d7d732b7affb ]

Since commit 099ecf59f05b ("net: annotate lockless accesses to
sk->sk_max_ack_backlog") decided to handle the sk_max_ack_backlog
locklessly, there is one more function mostly called in TCP/DCCP
cases. So this patch completes it:)

Signed-off-by: Jason Xing <kernelxing@tencent.com>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Link: https://lore.kernel.org/r/20240331090521.71965-1-kerneljasonxing@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Stable-dep-of: 3479c7549fb1 ("tcp/dccp: allow a connection when sk_max_ack_backlog is zero")
Signed-off-by: Sasha Levin <sashal@kernel.org>
include/net/inet_connection_sock.h

index f5967805c33fd9b889ac5294b77fff68d0ac217b..2a4bf25534767fa385ac0b46b7056b480827676f 100644 (file)
@@ -282,7 +282,7 @@ static inline int inet_csk_reqsk_queue_len(const struct sock *sk)
 
 static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
 {
-       return inet_csk_reqsk_queue_len(sk) >= sk->sk_max_ack_backlog;
+       return inet_csk_reqsk_queue_len(sk) >= READ_ONCE(sk->sk_max_ack_backlog);
 }
 
 bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);