]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
net: mark racy access on sk->sk_rcvbuf
authorlinke li <lilinke99@qq.com>
Thu, 21 Mar 2024 08:44:10 +0000 (16:44 +0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 17 May 2024 10:14:38 +0000 (12:14 +0200)
[ Upstream commit c2deb2e971f5d9aca941ef13ee05566979e337a4 ]

sk->sk_rcvbuf in __sock_queue_rcv_skb() and __sk_receive_skb() can be
changed by other threads. Mark this as benign using READ_ONCE().

This patch is aimed at reducing the number of benign races reported by
KCSAN in order to focus future debugging effort on harmful races.

Signed-off-by: linke li <lilinke99@qq.com>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <sashal@kernel.org>
net/core/sock.c

index 9cf404e8038a49a69197f2854cb069869bc70b47..599f186fe3d3a191ad06524d8d6102c1ee6fcc89 100644 (file)
@@ -482,7 +482,7 @@ int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
        unsigned long flags;
        struct sk_buff_head *list = &sk->sk_receive_queue;
 
-       if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
+       if (atomic_read(&sk->sk_rmem_alloc) >= READ_ONCE(sk->sk_rcvbuf)) {
                atomic_inc(&sk->sk_drops);
                trace_sock_rcvqueue_full(sk, skb);
                return -ENOMEM;
@@ -552,7 +552,7 @@ int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
 
        skb->dev = NULL;
 
-       if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
+       if (sk_rcvqueues_full(sk, READ_ONCE(sk->sk_rcvbuf))) {
                atomic_inc(&sk->sk_drops);
                goto discard_and_relse;
        }